tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

base-riscv-i.cc (11312B)


      1 // Copyright 2022 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 #include "jit/riscv64/extension/base-riscv-i.h"
      5 #include "jit/riscv64/constant/Constant-riscv64.h"
      6 #include "jit/riscv64/Assembler-riscv64.h"
      7 #include "jit/riscv64/Architecture-riscv64.h"
      8 namespace js {
      9 namespace jit {
     10 
     11 void AssemblerRISCVI::lui(Register rd, int32_t imm20) {
     12  GenInstrU(LUI, rd, imm20);
     13 }
     14 
     15 void AssemblerRISCVI::auipc(Register rd, int32_t imm20) {
     16  GenInstrU(AUIPC, rd, imm20);
     17 }
     18 
     19 // Jumps
     20 
     21 CodeOffset AssemblerRISCVI::jal(Register rd, int32_t imm21) {
     22  GenInstrJ(JAL, rd, imm21);
     23  // FIXME: Pad short branches so that we don't need to care about decompression
     24  // when inserting veneers
     25  CodeOffset retAddr = CodeOffset(currentOffset());
     26  addi(zero, zero, 0);
     27  return retAddr;
     28 }
     29 
     30 BufferOffset AssemblerRISCVI::jalr(Register rd, Register rs1, int16_t imm12) {
     31  return GenInstrI(0b000, JALR, rd, rs1, imm12);
     32 }
     33 
     34 // Branches
     35 
     36 void AssemblerRISCVI::beq(Register rs1, Register rs2, int16_t imm13) {
     37  GenInstrBranchCC_rri(0b000, rs1, rs2, imm13);
     38  // FIXME: Pad short branches so that we don't need to care about decompression
     39  // when inserting veneers
     40  addi(zero, zero, 0);
     41 }
     42 
     43 void AssemblerRISCVI::bne(Register rs1, Register rs2, int16_t imm13) {
     44  GenInstrBranchCC_rri(0b001, rs1, rs2, imm13);
     45  // FIXME: Pad short branches so that we don't need to care about decompression
     46  // when inserting veneers
     47  addi(zero, zero, 0);
     48 }
     49 
     50 void AssemblerRISCVI::blt(Register rs1, Register rs2, int16_t imm13) {
     51  GenInstrBranchCC_rri(0b100, rs1, rs2, imm13);
     52  // FIXME: Pad short branches so that we don't need to care about decompression
     53  // when inserting veneers
     54  addi(zero, zero, 0);
     55 }
     56 
     57 void AssemblerRISCVI::bge(Register rs1, Register rs2, int16_t imm13) {
     58  GenInstrBranchCC_rri(0b101, rs1, rs2, imm13);
     59  // FIXME: Pad short branches so that we don't need to care about decompression
     60  // when inserting veneers
     61  addi(zero, zero, 0);
     62 }
     63 
     64 void AssemblerRISCVI::bltu(Register rs1, Register rs2, int16_t imm13) {
     65  GenInstrBranchCC_rri(0b110, rs1, rs2, imm13);
     66  // FIXME: Pad short branches so that we don't need to care about decompression
     67  // when inserting veneers
     68  addi(zero, zero, 0);
     69 }
     70 
     71 void AssemblerRISCVI::bgeu(Register rs1, Register rs2, int16_t imm13) {
     72  GenInstrBranchCC_rri(0b111, rs1, rs2, imm13);
     73  // FIXME: Pad short branches so that we don't need to care about decompression
     74  // when inserting veneers
     75  addi(zero, zero, 0);
     76 }
     77 
     78 // Loads
     79 
     80 void AssemblerRISCVI::lb(Register rd, Register rs1, int16_t imm12) {
     81  GenInstrLoad_ri(0b000, rd, rs1, imm12);
     82 }
     83 
     84 void AssemblerRISCVI::lh(Register rd, Register rs1, int16_t imm12) {
     85  GenInstrLoad_ri(0b001, rd, rs1, imm12);
     86 }
     87 
     88 void AssemblerRISCVI::lw(Register rd, Register rs1, int16_t imm12) {
     89  GenInstrLoad_ri(0b010, rd, rs1, imm12);
     90 }
     91 
     92 void AssemblerRISCVI::lbu(Register rd, Register rs1, int16_t imm12) {
     93  GenInstrLoad_ri(0b100, rd, rs1, imm12);
     94 }
     95 
     96 void AssemblerRISCVI::lhu(Register rd, Register rs1, int16_t imm12) {
     97  GenInstrLoad_ri(0b101, rd, rs1, imm12);
     98 }
     99 
    100 // Stores
    101 
    102 void AssemblerRISCVI::sb(Register source, Register base, int16_t imm12) {
    103  GenInstrStore_rri(0b000, base, source, imm12);
    104 }
    105 
    106 void AssemblerRISCVI::sh(Register source, Register base, int16_t imm12) {
    107  GenInstrStore_rri(0b001, base, source, imm12);
    108 }
    109 
    110 void AssemblerRISCVI::sw(Register source, Register base, int16_t imm12) {
    111  GenInstrStore_rri(0b010, base, source, imm12);
    112 }
    113 
    114 // Arithmetic with immediate
    115 
    116 void AssemblerRISCVI::addi(Register rd, Register rs1, int16_t imm12) {
    117  GenInstrALU_ri(0b000, rd, rs1, imm12);
    118 }
    119 
    120 void AssemblerRISCVI::slti(Register rd, Register rs1, int16_t imm12) {
    121  GenInstrALU_ri(0b010, rd, rs1, imm12);
    122 }
    123 
    124 void AssemblerRISCVI::sltiu(Register rd, Register rs1, int16_t imm12) {
    125  GenInstrALU_ri(0b011, rd, rs1, imm12);
    126 }
    127 
    128 void AssemblerRISCVI::xori(Register rd, Register rs1, int16_t imm12) {
    129  GenInstrALU_ri(0b100, rd, rs1, imm12);
    130 }
    131 
    132 void AssemblerRISCVI::ori(Register rd, Register rs1, int16_t imm12) {
    133  GenInstrALU_ri(0b110, rd, rs1, imm12);
    134 }
    135 
    136 void AssemblerRISCVI::andi(Register rd, Register rs1, int16_t imm12) {
    137  GenInstrALU_ri(0b111, rd, rs1, imm12);
    138 }
    139 
    140 void AssemblerRISCVI::slli(Register rd, Register rs1, uint8_t shamt) {
    141  GenInstrShift_ri(0, 0b001, rd, rs1, shamt & 0x3f);
    142 }
    143 
    144 void AssemblerRISCVI::srli(Register rd, Register rs1, uint8_t shamt) {
    145  GenInstrShift_ri(0, 0b101, rd, rs1, shamt & 0x3f);
    146 }
    147 
    148 void AssemblerRISCVI::srai(Register rd, Register rs1, uint8_t shamt) {
    149  GenInstrShift_ri(1, 0b101, rd, rs1, shamt & 0x3f);
    150 }
    151 
    152 // Arithmetic
    153 
    154 void AssemblerRISCVI::add(Register rd, Register rs1, Register rs2) {
    155  GenInstrALU_rr(0b0000000, 0b000, rd, rs1, rs2);
    156 }
    157 
    158 void AssemblerRISCVI::sub(Register rd, Register rs1, Register rs2) {
    159  GenInstrALU_rr(0b0100000, 0b000, rd, rs1, rs2);
    160 }
    161 
    162 void AssemblerRISCVI::sll(Register rd, Register rs1, Register rs2) {
    163  GenInstrALU_rr(0b0000000, 0b001, rd, rs1, rs2);
    164 }
    165 
    166 void AssemblerRISCVI::slt(Register rd, Register rs1, Register rs2) {
    167  GenInstrALU_rr(0b0000000, 0b010, rd, rs1, rs2);
    168 }
    169 
    170 void AssemblerRISCVI::sltu(Register rd, Register rs1, Register rs2) {
    171  GenInstrALU_rr(0b0000000, 0b011, rd, rs1, rs2);
    172 }
    173 
    174 void AssemblerRISCVI::xor_(Register rd, Register rs1, Register rs2) {
    175  GenInstrALU_rr(0b0000000, 0b100, rd, rs1, rs2);
    176 }
    177 
    178 void AssemblerRISCVI::srl(Register rd, Register rs1, Register rs2) {
    179  GenInstrALU_rr(0b0000000, 0b101, rd, rs1, rs2);
    180 }
    181 
    182 void AssemblerRISCVI::sra(Register rd, Register rs1, Register rs2) {
    183  GenInstrALU_rr(0b0100000, 0b101, rd, rs1, rs2);
    184 }
    185 
    186 void AssemblerRISCVI::or_(Register rd, Register rs1, Register rs2) {
    187  GenInstrALU_rr(0b0000000, 0b110, rd, rs1, rs2);
    188 }
    189 
    190 void AssemblerRISCVI::and_(Register rd, Register rs1, Register rs2) {
    191  GenInstrALU_rr(0b0000000, 0b111, rd, rs1, rs2);
    192 }
    193 
    194 // Memory fences
    195 
    196 void AssemblerRISCVI::fence(uint8_t pred, uint8_t succ) {
    197  MOZ_ASSERT(is_uint4(pred) && is_uint4(succ));
    198  uint16_t imm12 = succ | (pred << 4) | (0b0000 << 8);
    199  GenInstrI(0b000, MISC_MEM, ToRegister(0UL), ToRegister(0UL), imm12);
    200 }
    201 
    202 void AssemblerRISCVI::fence_tso() {
    203  uint16_t imm12 = (0b0011) | (0b0011 << 4) | (0b1000 << 8);
    204  GenInstrI(0b000, MISC_MEM, ToRegister(0UL), ToRegister(0UL), imm12);
    205 }
    206 
    207 // Environment call / break
    208 
    209 void AssemblerRISCVI::ecall() {
    210  GenInstrI(0b000, SYSTEM, ToRegister(0UL), ToRegister(0UL), 0);
    211 }
    212 
    213 void AssemblerRISCVI::ebreak() {
    214  GenInstrI(0b000, SYSTEM, ToRegister(0UL), ToRegister(0UL), 1);
    215 }
    216 
    217 // This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
    218 // instruction (i.e., it should always trap, if your implementation has invalid
    219 // instruction traps).
    220 void AssemblerRISCVI::unimp() {
    221  GenInstrI(0b001, SYSTEM, ToRegister(0), ToRegister(0), 0b110000000000);
    222 }
    223 
    224 bool AssemblerRISCVI::IsBranch(Instr instr) {
    225  return (instr & kBaseOpcodeMask) == BRANCH;
    226 }
    227 
    228 bool AssemblerRISCVI::IsJump(Instr instr) {
    229  int Op = instr & kBaseOpcodeMask;
    230  return Op == JAL || Op == JALR;
    231 }
    232 
    233 bool AssemblerRISCVI::IsNop(Instr instr) { return instr == kNopByte; }
    234 
    235 bool AssemblerRISCVI::IsJal(Instr instr) {
    236  return (instr & kBaseOpcodeMask) == JAL;
    237 }
    238 
    239 bool AssemblerRISCVI::IsJalr(Instr instr) {
    240  return (instr & kBaseOpcodeMask) == JALR;
    241 }
    242 
    243 bool AssemblerRISCVI::IsLui(Instr instr) {
    244  return (instr & kBaseOpcodeMask) == LUI;
    245 }
    246 bool AssemblerRISCVI::IsAuipc(Instr instr) {
    247  return (instr & kBaseOpcodeMask) == AUIPC;
    248 }
    249 bool AssemblerRISCVI::IsAddi(Instr instr) {
    250  return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDI;
    251 }
    252 bool AssemblerRISCVI::IsOri(Instr instr) {
    253  return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ORI;
    254 }
    255 bool AssemblerRISCVI::IsSlli(Instr instr) {
    256  return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_SLLI;
    257 }
    258 
    259 int AssemblerRISCVI::JumpOffset(Instr instr) {
    260  int32_t imm21 = ((instr & 0x7fe00000) >> 20) | ((instr & 0x100000) >> 9) |
    261                  (instr & 0xff000) | ((instr & 0x80000000) >> 11);
    262  imm21 = imm21 << 11 >> 11;
    263  return imm21;
    264 }
    265 
    266 int AssemblerRISCVI::JalrOffset(Instr instr) {
    267  MOZ_ASSERT(IsJalr(instr));
    268  int32_t imm12 = static_cast<int32_t>(instr & kImm12Mask) >> 20;
    269  return imm12;
    270 }
    271 
    272 int AssemblerRISCVI::AuipcOffset(Instr instr) {
    273  MOZ_ASSERT(IsAuipc(instr));
    274  int32_t imm20 = static_cast<int32_t>(instr & kImm20Mask);
    275  return imm20;
    276 }
    277 
    278 bool AssemblerRISCVI::IsLw(Instr instr) {
    279  return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_LW;
    280 }
    281 
    282 int AssemblerRISCVI::LoadOffset(Instr instr) {
    283 #if JS_CODEGEN_RISCV64
    284  MOZ_ASSERT(IsLd(instr));
    285 #elif V8_TARGET_ARCH_RISCV32
    286  MOZ_ASSERT(IsLw(instr));
    287 #endif
    288  int32_t imm12 = static_cast<int32_t>(instr & kImm12Mask) >> 20;
    289  return imm12;
    290 }
    291 
    292 #ifdef JS_CODEGEN_RISCV64
    293 
    294 bool AssemblerRISCVI::IsAddiw(Instr instr) {
    295  return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDIW;
    296 }
    297 
    298 bool AssemblerRISCVI::IsLd(Instr instr) {
    299  return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_LD;
    300 }
    301 
    302 void AssemblerRISCVI::lwu(Register rd, Register rs1, int16_t imm12) {
    303  GenInstrLoad_ri(0b110, rd, rs1, imm12);
    304 }
    305 
    306 void AssemblerRISCVI::ld(Register rd, Register rs1, int16_t imm12) {
    307  GenInstrLoad_ri(0b011, rd, rs1, imm12);
    308 }
    309 
    310 void AssemblerRISCVI::sd(Register source, Register base, int16_t imm12) {
    311  GenInstrStore_rri(0b011, base, source, imm12);
    312 }
    313 
    314 void AssemblerRISCVI::addiw(Register rd, Register rs1, int16_t imm12) {
    315  GenInstrI(0b000, OP_IMM_32, rd, rs1, imm12);
    316 }
    317 
    318 void AssemblerRISCVI::slliw(Register rd, Register rs1, uint8_t shamt) {
    319  GenInstrShiftW_ri(0, 0b001, rd, rs1, shamt & 0x1f);
    320 }
    321 
    322 void AssemblerRISCVI::srliw(Register rd, Register rs1, uint8_t shamt) {
    323  GenInstrShiftW_ri(0, 0b101, rd, rs1, shamt & 0x1f);
    324 }
    325 
    326 void AssemblerRISCVI::sraiw(Register rd, Register rs1, uint8_t shamt) {
    327  GenInstrShiftW_ri(1, 0b101, rd, rs1, shamt & 0x1f);
    328 }
    329 
    330 void AssemblerRISCVI::addw(Register rd, Register rs1, Register rs2) {
    331  GenInstrALUW_rr(0b0000000, 0b000, rd, rs1, rs2);
    332 }
    333 
    334 void AssemblerRISCVI::subw(Register rd, Register rs1, Register rs2) {
    335  GenInstrALUW_rr(0b0100000, 0b000, rd, rs1, rs2);
    336 }
    337 
    338 void AssemblerRISCVI::sllw(Register rd, Register rs1, Register rs2) {
    339  GenInstrALUW_rr(0b0000000, 0b001, rd, rs1, rs2);
    340 }
    341 
    342 void AssemblerRISCVI::srlw(Register rd, Register rs1, Register rs2) {
    343  GenInstrALUW_rr(0b0000000, 0b101, rd, rs1, rs2);
    344 }
    345 
    346 void AssemblerRISCVI::sraw(Register rd, Register rs1, Register rs2) {
    347  GenInstrALUW_rr(0b0100000, 0b101, rd, rs1, rs2);
    348 }
    349 
    350 #endif
    351 
    352 int AssemblerRISCVI::BranchOffset(Instr instr) {
    353  // | imm[12] | imm[10:5] | rs2 | rs1 | funct3 | imm[4:1|11] | opcode |
    354  //  31          25                      11          7
    355  int32_t imm13 = ((instr & 0xf00) >> 7) | ((instr & 0x7e000000) >> 20) |
    356                  ((instr & 0x80) << 4) | ((instr & 0x80000000) >> 19);
    357  imm13 = imm13 << 19 >> 19;
    358  return imm13;
    359 }
    360 
    361 int AssemblerRISCVI::BrachlongOffset(Instr auipc, Instr instr_I) {
    362  MOZ_ASSERT(reinterpret_cast<Instruction*>(&instr_I)->InstructionType() ==
    363             InstructionBase::kIType);
    364  MOZ_ASSERT(IsAuipc(auipc));
    365  MOZ_ASSERT(((auipc & kRdFieldMask) >> kRdShift) ==
    366             ((instr_I & kRs1FieldMask) >> kRs1Shift));
    367  int32_t imm_auipc = AuipcOffset(auipc);
    368  int32_t imm12 = static_cast<int32_t>(instr_I & kImm12Mask) >> 20;
    369  int32_t offset = imm12 + imm_auipc;
    370  return offset;
    371 }
    372 
    373 }  // namespace jit
    374 }  // namespace js