tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Assembler-riscv64.cpp (61261B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 // Copyright (c) 1994-2006 Sun Microsystems Inc.
      8 // All Rights Reserved.
      9 //
     10 // Redistribution and use in source and binary forms, with or without
     11 // modification, are permitted provided that the following conditions are
     12 // met:
     13 //
     14 // - Redistributions of source code must retain the above copyright notice,
     15 // this list of conditions and the following disclaimer.
     16 //
     17 // - Redistribution in binary form must reproduce the above copyright
     18 // notice, this list of conditions and the following disclaimer in the
     19 // documentation and/or other materials provided with the distribution.
     20 //
     21 // - Neither the name of Sun Microsystems or the names of contributors may
     22 // be used to endorse or promote products derived from this software without
     23 // specific prior written permission.
     24 //
     25 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
     26 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     27 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
     29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
     33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     36 
     37 // The original source code covered by the above license above has been
     38 // modified significantly by Google Inc.
     39 // Copyright 2021 the V8 project authors. All rights reserved.
     40 #include "jit/riscv64/Assembler-riscv64.h"
     41 
     42 #include "mozilla/DebugOnly.h"
     43 #include "mozilla/Maybe.h"
     44 
     45 #include "gc/Marking.h"
     46 #include "jit/AutoWritableJitCode.h"
     47 #include "jit/ExecutableAllocator.h"
     48 #include "jit/riscv64/disasm/Disasm-riscv64.h"
     49 #include "vm/Realm.h"
     50 #include "wasm/WasmFrame.h"
     51 
     52 #if defined(__linux__) && !defined(JS_SIMULATOR_RISCV64)
     53 #  include <sys/syscall.h>
     54 #  if __has_include(<asm/hwprobe.h>)
     55 #    include <asm/hwprobe.h>
     56 #  endif
     57 #endif
     58 
     59 using mozilla::DebugOnly;
     60 namespace js {
     61 namespace jit {
     62 
     63 // static
     64 void RVFlags::Init() {
     65  MOZ_ASSERT(!sComputed);
     66 #if defined(__linux__) && !defined(JS_SIMULATOR_RISCV64) && \
     67    __has_include(<asm/hwprobe.h>)
     68  riscv_hwprobe probe[1] = {{RISCV_HWPROBE_KEY_IMA_EXT_0, 0}};
     69  if (syscall(__NR_riscv_hwprobe, probe, 1, 0, nullptr, 0) == 0) {
     70    if (probe[0].value & RISCV_HWPROBE_EXT_ZBA) {
     71      sZbaExtension = true;
     72    }
     73    if (probe[0].value & RISCV_HWPROBE_EXT_ZBB) {
     74      sZbbExtension = true;
     75    }
     76  }
     77 #else
     78  if (getenv("RISCV_EXT_ZBA")) {
     79    // Force on Zba extension for testing purposes or on non-linux platforms.
     80    sZbaExtension = true;
     81  }
     82  if (getenv("RISCV_EXT_ZBB")) {
     83    // Force on Zbb extension for testing purposes or on non-linux platforms.
     84    sZbbExtension = true;
     85  }
     86 #endif
     87 
     88  sComputed = true;
     89 }
     90 
     91 #define UNIMPLEMENTED_RISCV() MOZ_CRASH("RISC_V not implemented");
     92 
     93 bool Assembler::FLAG_riscv_debug = false;
     94 
     95 void Assembler::nop() { addi(ToRegister(0), ToRegister(0), 0); }
     96 
     97 // Size of the instruction stream, in bytes.
     98 size_t Assembler::size() const { return m_buffer.size(); }
     99 
    100 bool Assembler::swapBuffer(wasm::Bytes& bytes) {
    101  // For now, specialize to the one use case. As long as wasm::Bytes is a
    102  // Vector, not a linked-list of chunks, there's not much we can do other
    103  // than copy.
    104  MOZ_ASSERT(bytes.empty());
    105  if (!bytes.resize(bytesNeeded())) {
    106    return false;
    107  }
    108  m_buffer.executableCopy(bytes.begin());
    109  return true;
    110 }
    111 
    112 // Size of the relocation table, in bytes.
    113 size_t Assembler::jumpRelocationTableBytes() const {
    114  return jumpRelocations_.length();
    115 }
    116 
    117 size_t Assembler::dataRelocationTableBytes() const {
    118  return dataRelocations_.length();
    119 }
    120 // Size of the data table, in bytes.
    121 size_t Assembler::bytesNeeded() const {
    122  return size() + jumpRelocationTableBytes() + dataRelocationTableBytes();
    123 }
    124 
    125 void Assembler::executableCopy(uint8_t* buffer) {
    126  MOZ_ASSERT(isFinished);
    127  m_buffer.executableCopy(buffer);
    128 }
    129 
    130 uint32_t Assembler::AsmPoolMaxOffset = 1024;
    131 
    132 uint32_t Assembler::GetPoolMaxOffset() {
    133  static bool isSet = false;
    134  if (!isSet) {
    135    char* poolMaxOffsetStr = getenv("ASM_POOL_MAX_OFFSET");
    136    uint32_t poolMaxOffset;
    137    if (poolMaxOffsetStr &&
    138        sscanf(poolMaxOffsetStr, "%u", &poolMaxOffset) == 1) {
    139      AsmPoolMaxOffset = poolMaxOffset;
    140    }
    141    isSet = true;
    142  }
    143  return AsmPoolMaxOffset;
    144 }
    145 
    146 // Pool callbacks stuff:
    147 void Assembler::InsertIndexIntoTag(uint8_t* load_, uint32_t index) {
    148  MOZ_CRASH("Unimplement");
    149 }
    150 
    151 void Assembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
    152  MOZ_CRASH("Unimplement");
    153 }
    154 
    155 void Assembler::processCodeLabels(uint8_t* rawCode) {
    156  for (const CodeLabel& label : codeLabels_) {
    157    Bind(rawCode, label);
    158  }
    159 }
    160 
    161 void Assembler::WritePoolGuard(BufferOffset branch, Instruction* dest,
    162                               BufferOffset afterPool) {
    163  DEBUG_PRINTF("\tWritePoolGuard\n");
    164  Instr jal = JAL | (0 & kImm20Mask);
    165  jal = SetJalOffset(branch.getOffset(), afterPool.getOffset(), jal);
    166  dest->SetInstructionBits(jal);
    167  DEBUG_PRINTF("%p(%x): ", dest, branch.getOffset());
    168  disassembleInstr(dest->InstructionBits(), JitSpew_Codegen);
    169 }
    170 
    171 void Assembler::WritePoolHeader(uint8_t* start, Pool* p, bool isNatural) {
    172  static_assert(sizeof(PoolHeader) == 4);
    173 
    174  // Get the total size of the pool.
    175  const uintptr_t totalPoolSize = sizeof(PoolHeader) + p->getPoolSize();
    176  const uintptr_t totalPoolInstructions = totalPoolSize / kInstrSize;
    177 
    178  MOZ_ASSERT((totalPoolSize & 0x3) == 0);
    179  MOZ_ASSERT(totalPoolInstructions < (1 << 15));
    180 
    181  PoolHeader header(totalPoolInstructions, isNatural);
    182  *(PoolHeader*)start = header;
    183 }
    184 
    185 void Assembler::copyJumpRelocationTable(uint8_t* dest) {
    186  if (jumpRelocations_.length()) {
    187    memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
    188  }
    189 }
    190 
    191 void Assembler::copyDataRelocationTable(uint8_t* dest) {
    192  if (dataRelocations_.length()) {
    193    memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
    194  }
    195 }
    196 
    197 void Assembler::RV_li(Register rd, int64_t imm) {
    198  UseScratchRegisterScope temps(this);
    199  if (RecursiveLiCount(imm) > GeneralLiCount(imm, temps.hasAvailable())) {
    200    GeneralLi(rd, imm);
    201  } else {
    202    RecursiveLi(rd, imm);
    203  }
    204 }
    205 
    206 int Assembler::RV_li_count(int64_t imm, bool is_get_temp_reg) {
    207  if (RecursiveLiCount(imm) > GeneralLiCount(imm, is_get_temp_reg)) {
    208    return GeneralLiCount(imm, is_get_temp_reg);
    209  } else {
    210    return RecursiveLiCount(imm);
    211  }
    212 }
    213 
    214 void Assembler::GeneralLi(Register rd, int64_t imm) {
    215  // 64-bit imm is put in the register rd.
    216  // In most cases the imm is 32 bit and 2 instructions are generated. If a
    217  // temporary register is available, in the worst case, 6 instructions are
    218  // generated for a full 64-bit immediate. If temporay register is not
    219  // available the maximum will be 8 instructions. If imm is more than 32 bits
    220  // and a temp register is available, imm is divided into two 32-bit parts,
    221  // low_32 and up_32. Each part is built in a separate register. low_32 is
    222  // built before up_32. If low_32 is negative (upper 32 bits are 1), 0xffffffff
    223  // is subtracted from up_32 before up_32 is built. This compensates for 32
    224  // bits of 1's in the lower when the two registers are added. If no temp is
    225  // available, the upper 32 bit is built in rd, and the lower 32 bits are
    226  // devided to 3 parts (11, 11, and 10 bits). The parts are shifted and added
    227  // to the upper part built in rd.
    228  if (is_int32(imm + 0x800)) {
    229    // 32-bit case. Maximum of 2 instructions generated
    230    int64_t high_20 = ((imm + 0x800) >> 12);
    231    int64_t low_12 = imm << 52 >> 52;
    232    if (high_20) {
    233      lui(rd, (int32_t)high_20);
    234      if (low_12) {
    235        addi(rd, rd, low_12);
    236      }
    237    } else {
    238      addi(rd, zero_reg, low_12);
    239    }
    240    return;
    241  } else {
    242    UseScratchRegisterScope temps(this);
    243    BlockTrampolinePoolScope block_trampoline_pool(this, 8);
    244    // 64-bit case: divide imm into two 32-bit parts, upper and lower
    245    int64_t up_32 = imm >> 32;
    246    int64_t low_32 = imm & 0xffffffffull;
    247    Register temp_reg = rd;
    248    // Check if a temporary register is available
    249    if (up_32 == 0 || low_32 == 0) {
    250      // No temp register is needed
    251    } else {
    252      temp_reg = temps.hasAvailable() ? temps.Acquire() : InvalidReg;
    253    }
    254    if (temp_reg != InvalidReg) {
    255      // keep track of hardware behavior for lower part in sim_low
    256      int64_t sim_low = 0;
    257      // Build lower part
    258      if (low_32 != 0) {
    259        int64_t high_20 = ((low_32 + 0x800) >> 12);
    260        int64_t low_12 = low_32 & 0xfff;
    261        if (high_20) {
    262          // Adjust to 20 bits for the case of overflow
    263          high_20 &= 0xfffff;
    264          sim_low = ((high_20 << 12) << 32) >> 32;
    265          lui(rd, (int32_t)high_20);
    266          if (low_12) {
    267            sim_low += (low_12 << 52 >> 52) | low_12;
    268            addi(rd, rd, low_12);
    269          }
    270        } else {
    271          sim_low = low_12;
    272          ori(rd, zero_reg, low_12);
    273        }
    274      }
    275      if (sim_low & 0x100000000) {
    276        // Bit 31 is 1. Either an overflow or a negative 64 bit
    277        if (up_32 == 0) {
    278          // Positive number, but overflow because of the add 0x800
    279          slli(rd, rd, 32);
    280          srli(rd, rd, 32);
    281          return;
    282        }
    283        // low_32 is a negative 64 bit after the build
    284        up_32 = (up_32 - 0xffffffff) & 0xffffffff;
    285      }
    286      if (up_32 == 0) {
    287        return;
    288      }
    289      // Build upper part in a temporary register
    290      if (low_32 == 0) {
    291        // Build upper part in rd
    292        temp_reg = rd;
    293      }
    294      int64_t high_20 = (up_32 + 0x800) >> 12;
    295      int64_t low_12 = up_32 & 0xfff;
    296      if (high_20) {
    297        // Adjust to 20 bits for the case of overflow
    298        high_20 &= 0xfffff;
    299        lui(temp_reg, (int32_t)high_20);
    300        if (low_12) {
    301          addi(temp_reg, temp_reg, low_12);
    302        }
    303      } else {
    304        ori(temp_reg, zero_reg, low_12);
    305      }
    306      // Put it at the bgining of register
    307      slli(temp_reg, temp_reg, 32);
    308      if (low_32 != 0) {
    309        add(rd, rd, temp_reg);
    310      }
    311      return;
    312    }
    313    // No temp register. Build imm in rd.
    314    // Build upper 32 bits first in rd. Divide lower 32 bits parts and add
    315    // parts to the upper part by doing shift and add.
    316    // First build upper part in rd.
    317    int64_t high_20 = (up_32 + 0x800) >> 12;
    318    int64_t low_12 = up_32 & 0xfff;
    319    if (high_20) {
    320      // Adjust to 20 bits for the case of overflow
    321      high_20 &= 0xfffff;
    322      lui(rd, (int32_t)high_20);
    323      if (low_12) {
    324        addi(rd, rd, low_12);
    325      }
    326    } else {
    327      ori(rd, zero_reg, low_12);
    328    }
    329    // upper part already in rd. Each part to be added to rd, has maximum of 11
    330    // bits, and always starts with a 1. rd is shifted by the size of the part
    331    // plus the number of zeros between the parts. Each part is added after the
    332    // left shift.
    333    uint32_t mask = 0x80000000;
    334    int32_t shift_val = 0;
    335    int32_t i;
    336    for (i = 0; i < 32; i++) {
    337      if ((low_32 & mask) == 0) {
    338        mask >>= 1;
    339        shift_val++;
    340        if (i == 31) {
    341          // rest is zero
    342          slli(rd, rd, shift_val);
    343        }
    344        continue;
    345      }
    346      // The first 1 seen
    347      int32_t part;
    348      if ((i + 11) < 32) {
    349        // Pick 11 bits
    350        part = ((uint32_t)(low_32 << i) >> i) >> (32 - (i + 11));
    351        slli(rd, rd, shift_val + 11);
    352        ori(rd, rd, part);
    353        i += 10;
    354        mask >>= 11;
    355      } else {
    356        part = (uint32_t)(low_32 << i) >> i;
    357        slli(rd, rd, shift_val + (32 - i));
    358        ori(rd, rd, part);
    359        break;
    360      }
    361      shift_val = 0;
    362    }
    363  }
    364 }
    365 
    366 int Assembler::GeneralLiCount(int64_t imm, bool is_get_temp_reg) {
    367  int count = 0;
    368  // imitate Assembler::RV_li
    369  if (is_int32(imm + 0x800)) {
    370    // 32-bit case. Maximum of 2 instructions generated
    371    int64_t high_20 = ((imm + 0x800) >> 12);
    372    int64_t low_12 = imm << 52 >> 52;
    373    if (high_20) {
    374      count++;
    375      if (low_12) {
    376        count++;
    377      }
    378    } else {
    379      count++;
    380    }
    381    return count;
    382  } else {
    383    // 64-bit case: divide imm into two 32-bit parts, upper and lower
    384    int64_t up_32 = imm >> 32;
    385    int64_t low_32 = imm & 0xffffffffull;
    386    // Check if a temporary register is available
    387    if (is_get_temp_reg) {
    388      // keep track of hardware behavior for lower part in sim_low
    389      int64_t sim_low = 0;
    390      // Build lower part
    391      if (low_32 != 0) {
    392        int64_t high_20 = ((low_32 + 0x800) >> 12);
    393        int64_t low_12 = low_32 & 0xfff;
    394        if (high_20) {
    395          // Adjust to 20 bits for the case of overflow
    396          high_20 &= 0xfffff;
    397          sim_low = ((high_20 << 12) << 32) >> 32;
    398          count++;
    399          if (low_12) {
    400            sim_low += (low_12 << 52 >> 52) | low_12;
    401            count++;
    402          }
    403        } else {
    404          sim_low = low_12;
    405          count++;
    406        }
    407      }
    408      if (sim_low & 0x100000000) {
    409        // Bit 31 is 1. Either an overflow or a negative 64 bit
    410        if (up_32 == 0) {
    411          // Positive number, but overflow because of the add 0x800
    412          count++;
    413          count++;
    414          return count;
    415        }
    416        // low_32 is a negative 64 bit after the build
    417        up_32 = (up_32 - 0xffffffff) & 0xffffffff;
    418      }
    419      if (up_32 == 0) {
    420        return count;
    421      }
    422      int64_t high_20 = (up_32 + 0x800) >> 12;
    423      int64_t low_12 = up_32 & 0xfff;
    424      if (high_20) {
    425        // Adjust to 20 bits for the case of overflow
    426        high_20 &= 0xfffff;
    427        count++;
    428        if (low_12) {
    429          count++;
    430        }
    431      } else {
    432        count++;
    433      }
    434      // Put it at the bgining of register
    435      count++;
    436      if (low_32 != 0) {
    437        count++;
    438      }
    439      return count;
    440    }
    441    // No temp register. Build imm in rd.
    442    // Build upper 32 bits first in rd. Divide lower 32 bits parts and add
    443    // parts to the upper part by doing shift and add.
    444    // First build upper part in rd.
    445    int64_t high_20 = (up_32 + 0x800) >> 12;
    446    int64_t low_12 = up_32 & 0xfff;
    447    if (high_20) {
    448      // Adjust to 20 bits for the case of overflow
    449      high_20 &= 0xfffff;
    450      count++;
    451      if (low_12) {
    452        count++;
    453      }
    454    } else {
    455      count++;
    456    }
    457    // upper part already in rd. Each part to be added to rd, has maximum of 11
    458    // bits, and always starts with a 1. rd is shifted by the size of the part
    459    // plus the number of zeros between the parts. Each part is added after the
    460    // left shift.
    461    uint32_t mask = 0x80000000;
    462    int32_t i;
    463    for (i = 0; i < 32; i++) {
    464      if ((low_32 & mask) == 0) {
    465        mask >>= 1;
    466        if (i == 31) {
    467          // rest is zero
    468          count++;
    469        }
    470        continue;
    471      }
    472      // The first 1 seen
    473      if ((i + 11) < 32) {
    474        // Pick 11 bits
    475        count++;
    476        count++;
    477        i += 10;
    478        mask >>= 11;
    479      } else {
    480        count++;
    481        count++;
    482        break;
    483      }
    484    }
    485  }
    486  return count;
    487 }
    488 
    489 void Assembler::li_ptr(Register rd, int64_t imm) {
    490  m_buffer.enterNoNops();
    491  m_buffer.assertNoPoolAndNoNops();
    492  // Initialize rd with an address
    493  // Pointers are 48 bits
    494  // 6 fixed instructions are generated
    495  DEBUG_PRINTF("li_ptr(%d, %" PRIx64 " <%" PRId64 ">)\n", ToNumber(rd), imm,
    496               imm);
    497  MOZ_ASSERT((imm & 0xfff0000000000000ll) == 0);
    498  int64_t a6 = imm & 0x3f;                      // bits 0:5. 6 bits
    499  int64_t b11 = (imm >> 6) & 0x7ff;             // bits 6:11. 11 bits
    500  int64_t high_31 = (imm >> 17) & 0x7fffffff;   // 31 bits
    501  int64_t high_20 = ((high_31 + 0x800) >> 12);  // 19 bits
    502  int64_t low_12 = high_31 & 0xfff;             // 12 bits
    503  lui(rd, (int32_t)high_20);
    504  addi(rd, rd, low_12);  // 31 bits in rd.
    505  slli(rd, rd, 11);      // Space for next 11 bis
    506  ori(rd, rd, b11);      // 11 bits are put in. 42 bit in rd
    507  slli(rd, rd, 6);       // Space for next 6 bits
    508  ori(rd, rd, a6);       // 6 bits are put in. 48 bis in rd
    509  m_buffer.leaveNoNops();
    510 }
    511 
    512 void Assembler::li_constant(Register rd, int64_t imm) {
    513  m_buffer.enterNoNops();
    514  m_buffer.assertNoPoolAndNoNops();
    515  DEBUG_PRINTF("li_constant(%d, %" PRIx64 " <%" PRId64 ">)\n", ToNumber(rd),
    516               imm, imm);
    517  lui(rd, (imm + (1LL << 47) + (1LL << 35) + (1LL << 23) + (1LL << 11)) >>
    518              48);  // Bits 63:48
    519  addiw(rd, rd,
    520        (imm + (1LL << 35) + (1LL << 23) + (1LL << 11)) << 16 >>
    521            52);  // Bits 47:36
    522  slli(rd, rd, 12);
    523  addi(rd, rd, (imm + (1LL << 23) + (1LL << 11)) << 28 >> 52);  // Bits 35:24
    524  slli(rd, rd, 12);
    525  addi(rd, rd, (imm + (1LL << 11)) << 40 >> 52);  // Bits 23:12
    526  slli(rd, rd, 12);
    527  addi(rd, rd, imm << 52 >> 52);  // Bits 11:0
    528  m_buffer.leaveNoNops();
    529 }
    530 
    531 ABIArg ABIArgGenerator::next(MIRType type) {
    532  switch (type) {
    533    case MIRType::Int32:
    534    case MIRType::Int64:
    535    case MIRType::Pointer:
    536    case MIRType::WasmAnyRef:
    537    case MIRType::WasmArrayData:
    538    case MIRType::StackResults: {
    539      if (intRegIndex_ == NumIntArgRegs) {
    540        current_ = ABIArg(stackOffset_);
    541        stackOffset_ += sizeof(uintptr_t);
    542        break;
    543      }
    544      current_ = ABIArg(Register::FromCode(intRegIndex_ + a0.encoding()));
    545      intRegIndex_++;
    546      break;
    547    }
    548    case MIRType::Float32:
    549    case MIRType::Double: {
    550      if (floatRegIndex_ == NumFloatArgRegs) {
    551        current_ = ABIArg(stackOffset_);
    552        stackOffset_ += sizeof(double);
    553        break;
    554      }
    555      current_ = ABIArg(FloatRegister(
    556          FloatRegisters::Encoding(floatRegIndex_ + fa0.encoding()),
    557          type == MIRType::Double ? FloatRegisters::Double
    558                                  : FloatRegisters::Single));
    559      floatRegIndex_++;
    560      break;
    561    }
    562    case MIRType::Simd128: {
    563      MOZ_CRASH("RISCV64 does not support simd yet.");
    564      break;
    565    }
    566    default:
    567      MOZ_CRASH("Unexpected argument type");
    568  }
    569  return current_;
    570 }
    571 
    572 bool Assembler::oom() const {
    573  return AssemblerShared::oom() || m_buffer.oom() || jumpRelocations_.oom() ||
    574         dataRelocations_.oom() || !enoughLabelCache_;
    575 }
    576 
    577 int Assembler::disassembleInstr(Instr instr, bool enable_spew) {
    578  if (!FLAG_riscv_debug && !enable_spew) return -1;
    579  disasm::NameConverter converter;
    580  disasm::Disassembler disasm(converter);
    581  EmbeddedVector<char, 128> disasm_buffer;
    582 
    583  int size =
    584      disasm.InstructionDecode(disasm_buffer, reinterpret_cast<byte*>(&instr));
    585  DEBUG_PRINTF("%s\n", disasm_buffer.start());
    586  if (enable_spew) {
    587    JitSpew(JitSpew_Codegen, "%s", disasm_buffer.start());
    588  }
    589  return size;
    590 }
    591 
    592 uint64_t Assembler::jumpChainTargetAddressAt(Instruction* pc) {
    593  Instruction* instr0 = pc;
    594  DEBUG_PRINTF("jumpChainTargetAddressAt: pc: 0x%p\t", instr0);
    595  Instruction* instr1 = pc + 1 * kInstrSize;
    596  Instruction* instr2 = pc + 2 * kInstrSize;
    597  Instruction* instr3 = pc + 3 * kInstrSize;
    598  Instruction* instr4 = pc + 4 * kInstrSize;
    599  Instruction* instr5 = pc + 5 * kInstrSize;
    600 
    601  // Interpret instructions for address generated by li: See listing in
    602  // Assembler::jumpChainSetTargetValueAt() just below.
    603  if (IsLui(*reinterpret_cast<Instr*>(instr0)) &&
    604      IsAddi(*reinterpret_cast<Instr*>(instr1)) &&
    605      IsSlli(*reinterpret_cast<Instr*>(instr2)) &&
    606      IsOri(*reinterpret_cast<Instr*>(instr3)) &&
    607      IsSlli(*reinterpret_cast<Instr*>(instr4)) &&
    608      IsOri(*reinterpret_cast<Instr*>(instr5))) {
    609    // Assemble the 64 bit value.
    610    int64_t addr = (int64_t)(instr0->Imm20UValue() << kImm20Shift) +
    611                   (int64_t)instr1->Imm12Value();
    612    MOZ_ASSERT(instr2->Imm12Value() == 11);
    613    addr <<= 11;
    614    addr |= (int64_t)instr3->Imm12Value();
    615    MOZ_ASSERT(instr4->Imm12Value() == 6);
    616    addr <<= 6;
    617    addr |= (int64_t)instr5->Imm12Value();
    618 
    619    DEBUG_PRINTF("addr: %" PRIx64 "\n", addr);
    620    return static_cast<uint64_t>(addr);
    621  }
    622  // We should never get here, force a bad address if we do.
    623  MOZ_CRASH("RISC-V  UNREACHABLE");
    624 }
    625 
    626 void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
    627                                        ImmPtr newValue, ImmPtr expectedValue) {
    628  PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
    629                          PatchedImmPtr(expectedValue.value));
    630 }
    631 
    632 void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
    633                                        PatchedImmPtr newValue,
    634                                        PatchedImmPtr expectedValue) {
    635  Instruction* inst = (Instruction*)label.raw();
    636 
    637  // Extract old Value
    638  DebugOnly<uint64_t> value = Assembler::ExtractLoad64Value(inst);
    639  MOZ_ASSERT(value == uint64_t(expectedValue.value));
    640 
    641  // Replace with new value
    642  Assembler::UpdateLoad64Value(inst, uint64_t(newValue.value));
    643 }
    644 
    645 uint64_t Assembler::ExtractLoad64Value(Instruction* inst0) {
    646  DEBUG_PRINTF("\tExtractLoad64Value: \tpc:%p ", inst0);
    647  if (IsJal(*reinterpret_cast<Instr*>(inst0))) {
    648    int offset = inst0->Imm20JValue();
    649    inst0 = inst0 + offset;
    650  }
    651  Instruction* instr1 = inst0 + 1 * kInstrSize;
    652  if (IsAddiw(*reinterpret_cast<Instr*>(instr1))) {
    653    // Li64
    654    Instruction* instr2 = inst0 + 2 * kInstrSize;
    655    Instruction* instr3 = inst0 + 3 * kInstrSize;
    656    Instruction* instr4 = inst0 + 4 * kInstrSize;
    657    Instruction* instr5 = inst0 + 5 * kInstrSize;
    658    Instruction* instr6 = inst0 + 6 * kInstrSize;
    659    Instruction* instr7 = inst0 + 7 * kInstrSize;
    660    if (IsLui(*reinterpret_cast<Instr*>(inst0)) &&
    661        IsAddiw(*reinterpret_cast<Instr*>(instr1)) &&
    662        IsSlli(*reinterpret_cast<Instr*>(instr2)) &&
    663        IsAddi(*reinterpret_cast<Instr*>(instr3)) &&
    664        IsSlli(*reinterpret_cast<Instr*>(instr4)) &&
    665        IsAddi(*reinterpret_cast<Instr*>(instr5)) &&
    666        IsSlli(*reinterpret_cast<Instr*>(instr6)) &&
    667        IsAddi(*reinterpret_cast<Instr*>(instr7))) {
    668      int64_t imm = (int64_t)(inst0->Imm20UValue() << kImm20Shift) +
    669                    (int64_t)instr1->Imm12Value();
    670      MOZ_ASSERT(instr2->Imm12Value() == 12);
    671      imm <<= 12;
    672      imm += (int64_t)instr3->Imm12Value();
    673      MOZ_ASSERT(instr4->Imm12Value() == 12);
    674      imm <<= 12;
    675      imm += (int64_t)instr5->Imm12Value();
    676      MOZ_ASSERT(instr6->Imm12Value() == 12);
    677      imm <<= 12;
    678      imm += (int64_t)instr7->Imm12Value();
    679      DEBUG_PRINTF("imm:%" PRIx64 "\n", imm);
    680      return imm;
    681    } else {
    682      FLAG_riscv_debug = true;
    683      disassembleInstr(inst0->InstructionBits());
    684      disassembleInstr(instr1->InstructionBits());
    685      disassembleInstr(instr2->InstructionBits());
    686      disassembleInstr(instr3->InstructionBits());
    687      disassembleInstr(instr4->InstructionBits());
    688      disassembleInstr(instr5->InstructionBits());
    689      disassembleInstr(instr6->InstructionBits());
    690      disassembleInstr(instr7->InstructionBits());
    691      MOZ_CRASH();
    692    }
    693  } else {
    694    DEBUG_PRINTF("\n");
    695    Instruction* instrf1 = (inst0 - 1 * kInstrSize);
    696    Instruction* instr2 = inst0 + 2 * kInstrSize;
    697    Instruction* instr3 = inst0 + 3 * kInstrSize;
    698    Instruction* instr4 = inst0 + 4 * kInstrSize;
    699    Instruction* instr5 = inst0 + 5 * kInstrSize;
    700    Instruction* instr6 = inst0 + 6 * kInstrSize;
    701    Instruction* instr7 = inst0 + 7 * kInstrSize;
    702    disassembleInstr(instrf1->InstructionBits());
    703    disassembleInstr(inst0->InstructionBits());
    704    disassembleInstr(instr1->InstructionBits());
    705    disassembleInstr(instr2->InstructionBits());
    706    disassembleInstr(instr3->InstructionBits());
    707    disassembleInstr(instr4->InstructionBits());
    708    disassembleInstr(instr5->InstructionBits());
    709    disassembleInstr(instr6->InstructionBits());
    710    disassembleInstr(instr7->InstructionBits());
    711    MOZ_ASSERT(IsAddi(*reinterpret_cast<Instr*>(instr1)));
    712    // Li48
    713    return jumpChainTargetAddressAt(inst0);
    714  }
    715 }
    716 
    717 void Assembler::UpdateLoad64Value(Instruction* pc, uint64_t value) {
    718  DEBUG_PRINTF("\tUpdateLoad64Value: pc: %p\tvalue: %" PRIx64 "\n", pc, value);
    719  Instruction* instr1 = pc + 1 * kInstrSize;
    720  if (IsJal(*reinterpret_cast<Instr*>(pc))) {
    721    pc = pc + pc->Imm20JValue();
    722    instr1 = pc + 1 * kInstrSize;
    723  }
    724  if (IsAddiw(*reinterpret_cast<Instr*>(instr1))) {
    725    Instruction* instr0 = pc;
    726    Instruction* instr2 = pc + 2 * kInstrSize;
    727    Instruction* instr3 = pc + 3 * kInstrSize;
    728    Instruction* instr4 = pc + 4 * kInstrSize;
    729    Instruction* instr5 = pc + 5 * kInstrSize;
    730    Instruction* instr6 = pc + 6 * kInstrSize;
    731    Instruction* instr7 = pc + 7 * kInstrSize;
    732    MOZ_ASSERT(IsLui(*reinterpret_cast<Instr*>(pc)) &&
    733               IsAddiw(*reinterpret_cast<Instr*>(instr1)) &&
    734               IsSlli(*reinterpret_cast<Instr*>(instr2)) &&
    735               IsAddi(*reinterpret_cast<Instr*>(instr3)) &&
    736               IsSlli(*reinterpret_cast<Instr*>(instr4)) &&
    737               IsAddi(*reinterpret_cast<Instr*>(instr5)) &&
    738               IsSlli(*reinterpret_cast<Instr*>(instr6)) &&
    739               IsAddi(*reinterpret_cast<Instr*>(instr7)));
    740    // lui(rd, (imm + (1LL << 47) + (1LL << 35) + (1LL << 23) + (1LL << 11)) >>
    741    //             48);  // Bits 63:48
    742    // addiw(rd, rd,
    743    //       (imm + (1LL << 35) + (1LL << 23) + (1LL << 11)) << 16 >>
    744    //           52);  // Bits 47:36
    745    // slli(rd, rd, 12);
    746    // addi(rd, rd, (imm + (1LL << 23) + (1LL << 11)) << 28 >> 52);  // Bits
    747    // 35:24 slli(rd, rd, 12); addi(rd, rd, (imm + (1LL << 11)) << 40 >> 52); //
    748    // Bits 23:12 slli(rd, rd, 12); addi(rd, rd, imm << 52 >> 52);  // Bits 11:0
    749    *reinterpret_cast<Instr*>(instr0) &= 0xfff;
    750    *reinterpret_cast<Instr*>(instr0) |=
    751        (((value + (1LL << 47) + (1LL << 35) + (1LL << 23) + (1LL << 11)) >> 48)
    752         << 12);
    753    *reinterpret_cast<Instr*>(instr1) &= 0xfffff;
    754    *reinterpret_cast<Instr*>(instr1) |=
    755        (((value + (1LL << 35) + (1LL << 23) + (1LL << 11)) << 16 >> 52) << 20);
    756    *reinterpret_cast<Instr*>(instr3) &= 0xfffff;
    757    *reinterpret_cast<Instr*>(instr3) |=
    758        (((value + (1LL << 23) + (1LL << 11)) << 28 >> 52) << 20);
    759    *reinterpret_cast<Instr*>(instr5) &= 0xfffff;
    760    *reinterpret_cast<Instr*>(instr5) |=
    761        (((value + (1LL << 11)) << 40 >> 52) << 20);
    762    *reinterpret_cast<Instr*>(instr7) &= 0xfffff;
    763    *reinterpret_cast<Instr*>(instr7) |= ((value << 52 >> 52) << 20);
    764    disassembleInstr(instr0->InstructionBits());
    765    disassembleInstr(instr1->InstructionBits());
    766    disassembleInstr(instr2->InstructionBits());
    767    disassembleInstr(instr3->InstructionBits());
    768    disassembleInstr(instr4->InstructionBits());
    769    disassembleInstr(instr5->InstructionBits());
    770    disassembleInstr(instr6->InstructionBits());
    771    disassembleInstr(instr7->InstructionBits());
    772    MOZ_ASSERT(ExtractLoad64Value(pc) == value);
    773  } else {
    774    Instruction* instr0 = pc;
    775    Instruction* instr2 = pc + 2 * kInstrSize;
    776    Instruction* instr3 = pc + 3 * kInstrSize;
    777    Instruction* instr4 = pc + 4 * kInstrSize;
    778    Instruction* instr5 = pc + 5 * kInstrSize;
    779    Instruction* instr6 = pc + 6 * kInstrSize;
    780    Instruction* instr7 = pc + 7 * kInstrSize;
    781    disassembleInstr(instr0->InstructionBits());
    782    disassembleInstr(instr1->InstructionBits());
    783    disassembleInstr(instr2->InstructionBits());
    784    disassembleInstr(instr3->InstructionBits());
    785    disassembleInstr(instr4->InstructionBits());
    786    disassembleInstr(instr5->InstructionBits());
    787    disassembleInstr(instr6->InstructionBits());
    788    disassembleInstr(instr7->InstructionBits());
    789    MOZ_ASSERT(IsAddi(*reinterpret_cast<Instr*>(instr1)));
    790    jumpChainSetTargetValueAt(pc, value);
    791  }
    792 }
    793 
    794 void Assembler::jumpChainSetTargetValueAt(Instruction* pc, uint64_t target) {
    795  DEBUG_PRINTF("\tjumpChainSetTargetValueAt: pc: %p\ttarget: %" PRIx64 "\n", pc,
    796               target);
    797  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
    798  MOZ_ASSERT((target & 0xffff000000000000ll) == 0);
    799 #ifdef DEBUG
    800  // Check we have the result from a li macro-instruction.
    801  Instruction* instr0 = pc;
    802  Instruction* instr1 = pc + 1 * kInstrSize;
    803  Instruction* instr3 = pc + 3 * kInstrSize;
    804  Instruction* instr5 = pc + 5 * kInstrSize;
    805  MOZ_ASSERT(IsLui(*reinterpret_cast<Instr*>(instr0)) &&
    806             IsAddi(*reinterpret_cast<Instr*>(instr1)) &&
    807             IsOri(*reinterpret_cast<Instr*>(instr3)) &&
    808             IsOri(*reinterpret_cast<Instr*>(instr5)));
    809 #endif
    810  int64_t a6 = target & 0x3f;                     // bits 0:6. 6 bits
    811  int64_t b11 = (target >> 6) & 0x7ff;            // bits 6:11. 11 bits
    812  int64_t high_31 = (target >> 17) & 0x7fffffff;  // 31 bits
    813  int64_t high_20 = ((high_31 + 0x800) >> 12);    // 19 bits
    814  int64_t low_12 = high_31 & 0xfff;               // 12 bits
    815  *p = *p & 0xfff;
    816  *p = *p | ((int32_t)high_20 << 12);
    817  *(p + 1) = *(p + 1) & 0xfffff;
    818  *(p + 1) = *(p + 1) | ((int32_t)low_12 << 20);
    819  *(p + 2) = *(p + 2) & 0xfffff;
    820  *(p + 2) = *(p + 2) | (11 << 20);
    821  *(p + 3) = *(p + 3) & 0xfffff;
    822  *(p + 3) = *(p + 3) | ((int32_t)b11 << 20);
    823  *(p + 4) = *(p + 4) & 0xfffff;
    824  *(p + 4) = *(p + 4) | (6 << 20);
    825  *(p + 5) = *(p + 5) & 0xfffff;
    826  *(p + 5) = *(p + 5) | ((int32_t)a6 << 20);
    827  MOZ_ASSERT(jumpChainTargetAddressAt(pc) == target);
    828 }
    829 
    830 void Assembler::WriteLoad64Instructions(Instruction* inst0, Register reg,
    831                                        uint64_t value) {
    832  DEBUG_PRINTF("\tWriteLoad64Instructions\n");
    833  // Initialize rd with an address
    834  // Pointers are 48 bits
    835  // 6 fixed instructions are generated
    836  MOZ_ASSERT((value & 0xfff0000000000000ll) == 0);
    837  int64_t a6 = value & 0x3f;                     // bits 0:5. 6 bits
    838  int64_t b11 = (value >> 6) & 0x7ff;            // bits 6:11. 11 bits
    839  int64_t high_31 = (value >> 17) & 0x7fffffff;  // 31 bits
    840  int64_t high_20 = ((high_31 + 0x800) >> 12);   // 19 bits
    841  int64_t low_12 = high_31 & 0xfff;              // 12 bits
    842  Instr lui_ = LUI | (reg.code() << kRdShift) |
    843               ((int32_t)high_20 << kImm20Shift);  // lui(rd, (int32_t)high_20);
    844  *reinterpret_cast<Instr*>(inst0) = lui_;
    845 
    846  Instr addi_ =
    847      OP_IMM | (reg.code() << kRdShift) | (0b000 << kFunct3Shift) |
    848      (reg.code() << kRs1Shift) |
    849      (low_12 << kImm12Shift);  // addi(rd, rd, low_12);  // 31 bits in rd.
    850  *reinterpret_cast<Instr*>(inst0 + 1 * kInstrSize) = addi_;
    851 
    852  Instr slli_ =
    853      OP_IMM | (reg.code() << kRdShift) | (0b001 << kFunct3Shift) |
    854      (reg.code() << kRs1Shift) |
    855      (11 << kImm12Shift);  // slli(rd, rd, 11);      // Space for next 11 bis
    856  *reinterpret_cast<Instr*>(inst0 + 2 * kInstrSize) = slli_;
    857 
    858  Instr ori_b11 = OP_IMM | (reg.code() << kRdShift) | (0b110 << kFunct3Shift) |
    859                  (reg.code() << kRs1Shift) |
    860                  (b11 << kImm12Shift);  // ori(rd, rd, b11);      // 11 bits
    861                                         // are put in. 42 bit in rd
    862  *reinterpret_cast<Instr*>(inst0 + 3 * kInstrSize) = ori_b11;
    863 
    864  slli_ = OP_IMM | (reg.code() << kRdShift) | (0b001 << kFunct3Shift) |
    865          (reg.code() << kRs1Shift) |
    866          (6 << kImm12Shift);  // slli(rd, rd, 6);      // Space for next 11 bis
    867  *reinterpret_cast<Instr*>(inst0 + 4 * kInstrSize) =
    868      slli_;  // slli(rd, rd, 6);       // Space for next 6 bits
    869 
    870  Instr ori_a6 = OP_IMM | (reg.code() << kRdShift) | (0b110 << kFunct3Shift) |
    871                 (reg.code() << kRs1Shift) |
    872                 (a6 << kImm12Shift);  // ori(rd, rd, a6);       // 6 bits are
    873                                       // put in. 48 bis in rd
    874  *reinterpret_cast<Instr*>(inst0 + 5 * kInstrSize) = ori_a6;
    875  disassembleInstr((inst0 + 0 * kInstrSize)->InstructionBits());
    876  disassembleInstr((inst0 + 1 * kInstrSize)->InstructionBits());
    877  disassembleInstr((inst0 + 2 * kInstrSize)->InstructionBits());
    878  disassembleInstr((inst0 + 3 * kInstrSize)->InstructionBits());
    879  disassembleInstr((inst0 + 4 * kInstrSize)->InstructionBits());
    880  disassembleInstr((inst0 + 5 * kInstrSize)->InstructionBits());
    881  disassembleInstr((inst0 + 6 * kInstrSize)->InstructionBits());
    882  MOZ_ASSERT(ExtractLoad64Value(inst0) == value);
    883 }
    884 
    885 // This just stomps over memory with 32 bits of raw data. Its purpose is to
    886 // overwrite the call of JITed code with 32 bits worth of an offset. This will
    887 // is only meant to function on code that has been invalidated, so it should
    888 // be totally safe. Since that instruction will never be executed again, a
    889 // ICache flush should not be necessary
    890 void Assembler::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
    891  // Raw is going to be the return address.
    892  uint32_t* raw = (uint32_t*)label.raw();
    893  // Overwrite the 4 bytes before the return address, which will
    894  // end up being the call instruction.
    895  *(raw - 1) = imm.value;
    896 }
    897 
    898 bool Assembler::jumpChainPutTargetAt(BufferOffset pos, BufferOffset target_pos,
    899                                     bool trampoline) {
    900  if (m_buffer.oom()) {
    901    return true;
    902  }
    903  DEBUG_PRINTF("\tjumpChainPutTargetAt: %p (%d) to %p (%d)\n",
    904               reinterpret_cast<Instr*>(editSrc(pos)), pos.getOffset(),
    905               reinterpret_cast<Instr*>(editSrc(pos)) + target_pos.getOffset() -
    906                   pos.getOffset(),
    907               target_pos.getOffset());
    908  Instruction* instruction = editSrc(pos);
    909  Instr instr = instruction->InstructionBits();
    910  switch (instruction->InstructionOpcodeType()) {
    911    case BRANCH: {
    912      if (!is_intn(pos.getOffset() - target_pos.getOffset(),
    913                   kBranchOffsetBits)) {
    914        return false;
    915      }
    916      instr = SetBranchOffset(pos.getOffset(), target_pos.getOffset(), instr);
    917      instr_at_put(pos, instr);
    918    } break;
    919    case JAL: {
    920      MOZ_ASSERT(IsJal(instr));
    921      if (!is_intn(pos.getOffset() - target_pos.getOffset(), kJumpOffsetBits)) {
    922        return false;
    923      }
    924      instr = SetJalOffset(pos.getOffset(), target_pos.getOffset(), instr);
    925      instr_at_put(pos, instr);
    926    } break;
    927    case LUI: {
    928      jumpChainSetTargetValueAt(
    929          instruction, reinterpret_cast<uintptr_t>(editSrc(target_pos)));
    930    } break;
    931    case AUIPC: {
    932      Instr instr_auipc = instr;
    933      Instr instr_I =
    934          editSrc(BufferOffset(pos.getOffset() + 4))->InstructionBits();
    935      MOZ_ASSERT(IsJalr(instr_I) || IsAddi(instr_I));
    936 
    937      intptr_t offset = target_pos.getOffset() - pos.getOffset();
    938      if (is_int21(offset) && IsJalr(instr_I) && trampoline) {
    939        MOZ_ASSERT(is_int21(offset) && ((offset & 1) == 0));
    940        Instr instr = JAL;
    941        instr = SetJalOffset(pos.getOffset(), target_pos.getOffset(), instr);
    942        MOZ_ASSERT(IsJal(instr));
    943        MOZ_ASSERT(JumpOffset(instr) == offset);
    944        instr_at_put(pos, instr);
    945        instr_at_put(BufferOffset(pos.getOffset() + 4), kNopByte);
    946      } else {
    947        MOZ_RELEASE_ASSERT(is_int32(offset + 0x800));
    948        MOZ_ASSERT(instruction->RdValue() ==
    949                   editSrc(BufferOffset(pos.getOffset() + 4))->Rs1Value());
    950        int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
    951        int32_t Lo12 = (int32_t)offset << 20 >> 20;
    952 
    953        instr_auipc = SetAuipcOffset(Hi20, instr_auipc);
    954        instr_at_put(pos, instr_auipc);
    955 
    956        const int kImm31_20Mask = ((1 << 12) - 1) << 20;
    957        const int kImm11_0Mask = ((1 << 12) - 1);
    958        instr_I = (instr_I & ~kImm31_20Mask) | ((Lo12 & kImm11_0Mask) << 20);
    959        instr_at_put(BufferOffset(pos.getOffset() + 4), instr_I);
    960      }
    961    } break;
    962    default:
    963      UNIMPLEMENTED_RISCV();
    964      break;
    965  }
    966  return true;
    967 }
    968 
    969 const int kEndOfChain = -1;
    970 const int32_t kEndOfJumpChain = 0;
    971 
    972 int Assembler::jumpChainTargetAt(BufferOffset pos, bool is_internal) {
    973  if (oom()) {
    974    return kEndOfChain;
    975  }
    976  Instruction* instruction = editSrc(pos);
    977  Instruction* instruction2 = nullptr;
    978  if (IsAuipc(instruction->InstructionBits())) {
    979    instruction2 = editSrc(BufferOffset(pos.getOffset() + kInstrSize));
    980  }
    981  return jumpChainTargetAt(instruction, pos, is_internal, instruction2);
    982 }
    983 
    984 int Assembler::jumpChainTargetAt(Instruction* instruction, BufferOffset pos,
    985                                 bool is_internal, Instruction* instruction2) {
    986  DEBUG_PRINTF("\t jumpChainTargetAt: %p(%x)\n\t",
    987               reinterpret_cast<Instr*>(instruction), pos.getOffset());
    988  disassembleInstr(instruction->InstructionBits());
    989  Instr instr = instruction->InstructionBits();
    990  switch (instruction->InstructionOpcodeType()) {
    991    case BRANCH: {
    992      int32_t imm13 = BranchOffset(instr);
    993      if (imm13 == kEndOfJumpChain) {
    994        // EndOfChain sentinel is returned directly, not relative to pc or pos.
    995        return kEndOfChain;
    996      }
    997      DEBUG_PRINTF("\t jumpChainTargetAt: %d %d\n", imm13,
    998                   pos.getOffset() + imm13);
    999      return pos.getOffset() + imm13;
   1000    }
   1001    case JAL: {
   1002      int32_t imm21 = JumpOffset(instr);
   1003      if (imm21 == kEndOfJumpChain) {
   1004        // EndOfChain sentinel is returned directly, not relative to pc or pos.
   1005        return kEndOfChain;
   1006      }
   1007      DEBUG_PRINTF("\t jumpChainTargetAt: %d %d\n", imm21,
   1008                   pos.getOffset() + imm21);
   1009      return pos.getOffset() + imm21;
   1010    }
   1011    case JALR: {
   1012      int32_t imm12 = instr >> 20;
   1013      if (imm12 == kEndOfJumpChain) {
   1014        // EndOfChain sentinel is returned directly, not relative to pc or pos.
   1015        return kEndOfChain;
   1016      }
   1017      DEBUG_PRINTF("\t jumpChainTargetAt: %d %d\n", imm12,
   1018                   pos.getOffset() + imm12);
   1019      return pos.getOffset() + imm12;
   1020    }
   1021    case LUI: {
   1022      uintptr_t imm = jumpChainTargetAddressAt(instruction);
   1023      uintptr_t instr_address = reinterpret_cast<uintptr_t>(instruction);
   1024      if (imm == kEndOfJumpChain) {
   1025        return kEndOfChain;
   1026      }
   1027      MOZ_ASSERT(instr_address - imm < INT_MAX);
   1028      int32_t delta = static_cast<int32_t>(instr_address - imm);
   1029      MOZ_ASSERT(pos.getOffset() > delta);
   1030      return pos.getOffset() - delta;
   1031    }
   1032    case AUIPC: {
   1033      MOZ_ASSERT(instruction2 != nullptr);
   1034      Instr instr_auipc = instr;
   1035      Instr instr_I = instruction2->InstructionBits();
   1036      MOZ_ASSERT(IsJalr(instr_I) || IsAddi(instr_I));
   1037      int32_t offset = BrachlongOffset(instr_auipc, instr_I);
   1038      if (offset == kEndOfJumpChain) return kEndOfChain;
   1039      DEBUG_PRINTF("\t jumpChainTargetAt: %d %d\n", offset,
   1040                   pos.getOffset() + offset);
   1041      return offset + pos.getOffset();
   1042    }
   1043    default: {
   1044      UNIMPLEMENTED_RISCV();
   1045    }
   1046  }
   1047 }
   1048 
   1049 BufferOffset Assembler::jumpChainGetNextLink(BufferOffset pos,
   1050                                             bool is_internal) {
   1051  int link = jumpChainTargetAt(pos, is_internal);
   1052  return link == kEndOfChain ? BufferOffset() : BufferOffset(link);
   1053 }
   1054 
   1055 uint32_t Assembler::jumpChainUseNextLink(Label* L, bool is_internal) {
   1056  MOZ_ASSERT(L->used());
   1057  BufferOffset link = jumpChainGetNextLink(BufferOffset(L), is_internal);
   1058  if (!link.assigned()) {
   1059    L->reset();
   1060    return LabelBase::INVALID_OFFSET;
   1061  }
   1062  int offset = link.getOffset();
   1063  DEBUG_PRINTF("next: %p to offset %d\n", L, offset);
   1064  L->use(offset);
   1065  return offset;
   1066 }
   1067 
   1068 void Assembler::bind(Label* label, BufferOffset boff) {
   1069  JitSpew(JitSpew_Codegen, ".set Llabel %p %u", label, currentOffset());
   1070  DEBUG_PRINTF(".set Llabel %p %u\n", label, currentOffset());
   1071  // If our caller didn't give us an explicit target to bind to
   1072  // then we want to bind to the location of the next instruction
   1073  BufferOffset dest = boff.assigned() ? boff : nextOffset();
   1074  if (label->used()) {
   1075    uint32_t next;
   1076 
   1077    do {
   1078      // A used label holds a link to branch that uses it.
   1079      // It's okay we use it here since jumpChainUseNextLink() mutates `label`.
   1080      BufferOffset b(label);
   1081      DEBUG_PRINTF("\tbind next:%d\n", b.getOffset());
   1082      // Even a 0 offset may be invalid if we're out of memory.
   1083      if (oom()) {
   1084        return;
   1085      }
   1086      int fixup_pos = b.getOffset();
   1087      int dist = dest.getOffset() - fixup_pos;
   1088      next = jumpChainUseNextLink(label, false);
   1089      DEBUG_PRINTF(
   1090          "\t%p fixup: %d next: %u dest: %d dist: %d nextOffset: %d "
   1091          "currOffset: %d\n",
   1092          label, fixup_pos, next, dest.getOffset(), dist,
   1093          nextOffset().getOffset(), currentOffset());
   1094      Instr instr = editSrc(b)->InstructionBits();
   1095      if (IsBranch(instr)) {
   1096        if (!is_intn(dist, kBranchOffsetBits)) {
   1097          MOZ_ASSERT(next != LabelBase::INVALID_OFFSET);
   1098          MOZ_RELEASE_ASSERT(
   1099              is_intn(static_cast<int>(next) - fixup_pos, kJumpOffsetBits));
   1100          MOZ_ASSERT(IsAuipc(editSrc(BufferOffset(next))->InstructionBits()));
   1101          MOZ_ASSERT(
   1102              IsJalr(editSrc(BufferOffset(next + 4))->InstructionBits()));
   1103          DEBUG_PRINTF("\t\ttrampolining: %d\n", next);
   1104        } else {
   1105          jumpChainPutTargetAt(b, dest);
   1106          BufferOffset deadline(b.getOffset() +
   1107                                ImmBranchMaxForwardOffset(CondBranchRangeType));
   1108          m_buffer.unregisterBranchDeadline(CondBranchRangeType, deadline);
   1109        }
   1110      } else if (IsJal(instr)) {
   1111        if (!is_intn(dist, kJumpOffsetBits)) {
   1112          MOZ_ASSERT(next != LabelBase::INVALID_OFFSET);
   1113          MOZ_RELEASE_ASSERT(
   1114              is_intn(static_cast<int>(next) - fixup_pos, kJumpOffsetBits));
   1115          MOZ_ASSERT(IsAuipc(editSrc(BufferOffset(next))->InstructionBits()));
   1116          MOZ_ASSERT(
   1117              IsJalr(editSrc(BufferOffset(next + 4))->InstructionBits()));
   1118          DEBUG_PRINTF("\t\ttrampolining: %d\n", next);
   1119        } else {
   1120          jumpChainPutTargetAt(b, dest);
   1121          BufferOffset deadline(
   1122              b.getOffset() + ImmBranchMaxForwardOffset(UncondBranchRangeType));
   1123          m_buffer.unregisterBranchDeadline(UncondBranchRangeType, deadline);
   1124        }
   1125      } else {
   1126        MOZ_ASSERT(IsAuipc(instr));
   1127        jumpChainPutTargetAt(b, dest);
   1128      }
   1129    } while (next != LabelBase::INVALID_OFFSET);
   1130  }
   1131  label->bind(dest.getOffset());
   1132 }
   1133 
   1134 void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
   1135  if (label.patchAt().bound()) {
   1136    auto mode = label.linkMode();
   1137    intptr_t offset = label.patchAt().offset();
   1138    intptr_t target = label.target().offset();
   1139 
   1140    if (mode == CodeLabel::RawPointer) {
   1141      *reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
   1142    } else {
   1143      MOZ_ASSERT(mode == CodeLabel::MoveImmediate ||
   1144                 mode == CodeLabel::JumpImmediate);
   1145      Instruction* inst = (Instruction*)(rawCode + offset);
   1146      Assembler::UpdateLoad64Value(inst, (uint64_t)(rawCode + target));
   1147    }
   1148  }
   1149 }
   1150 
   1151 bool Assembler::is_near(Label* L) {
   1152  MOZ_ASSERT(L->bound());
   1153  return is_intn((currentOffset() - L->offset()), kJumpOffsetBits);
   1154 }
   1155 
   1156 bool Assembler::is_near(Label* L, OffsetSize bits) {
   1157  if (L == nullptr || !L->bound()) return true;
   1158  return is_intn((currentOffset() - L->offset()), bits);
   1159 }
   1160 
   1161 bool Assembler::is_near_branch(Label* L) {
   1162  MOZ_ASSERT(L->bound());
   1163  return is_intn((currentOffset() - L->offset()), kBranchOffsetBits);
   1164 }
   1165 
   1166 int32_t Assembler::branchLongOffsetHelper(Label* L) {
   1167  if (oom()) {
   1168    return kEndOfJumpChain;
   1169  }
   1170 
   1171  BufferOffset next_instr_offset = nextInstrOffset(2);
   1172  DEBUG_PRINTF("\tbranchLongOffsetHelper: %p to (%d)\n", L,
   1173               next_instr_offset.getOffset());
   1174 
   1175  if (L->bound()) {
   1176    // The label is bound: all uses are already linked.
   1177    JitSpew(JitSpew_Codegen, ".use Llabel %p on %d", L,
   1178            next_instr_offset.getOffset());
   1179    intptr_t offset = L->offset() - next_instr_offset.getOffset();
   1180    MOZ_ASSERT((offset & 3) == 0);
   1181    MOZ_ASSERT(is_int32(offset));
   1182    return static_cast<int32_t>(offset);
   1183  }
   1184 
   1185  // The label is unbound and previously unused: Store the offset in the label
   1186  // itself for patching by bind().
   1187  if (!L->used()) {
   1188    JitSpew(JitSpew_Codegen, ".use Llabel %p on %d", L,
   1189            next_instr_offset.getOffset());
   1190    L->use(next_instr_offset.getOffset());
   1191    DEBUG_PRINTF("\tLabel %p added to link: %d\n", L,
   1192                 next_instr_offset.getOffset());
   1193    if (!label_cache_.putNew(L->offset(), next_instr_offset)) {
   1194      NoEnoughLabelCache();
   1195    }
   1196    return kEndOfJumpChain;
   1197  }
   1198 
   1199  LabelCache::Ptr p = label_cache_.lookup(L->offset());
   1200  MOZ_ASSERT(p);
   1201  MOZ_ASSERT(p->key() == L->offset());
   1202  const int32_t target_pos = p->value().getOffset();
   1203 
   1204  // If the existing instruction at the head of the list is within reach of the
   1205  // new branch, we can simply insert the new branch at the front of the list.
   1206  if (jumpChainPutTargetAt(BufferOffset(target_pos), next_instr_offset)) {
   1207    DEBUG_PRINTF("\tLabel %p added to link: %d\n", L,
   1208                 next_instr_offset.getOffset());
   1209    if (!label_cache_.put(L->offset(), next_instr_offset)) {
   1210      NoEnoughLabelCache();
   1211    }
   1212  } else {
   1213    DEBUG_PRINTF("\tLabel  %p can't be added to link: %d -> %d\n", L,
   1214                 BufferOffset(target_pos).getOffset(),
   1215                 next_instr_offset.getOffset());
   1216 
   1217    // The label already has a linked list of uses, but we can't reach the head
   1218    // of the list with the allowed branch range. Insert this branch at a
   1219    // different position in the list. We need to find an existing branch
   1220    // `exbr`.
   1221    //
   1222    // In particular, the end of the list is always a viable candidate, so we'll
   1223    // just get that.
   1224    //
   1225    // See also vixl::MozBaseAssembler::LinkAndGetOffsetTo.
   1226 
   1227    BufferOffset next(L);
   1228    BufferOffset exbr;
   1229    do {
   1230      exbr = next;
   1231      next = jumpChainGetNextLink(next, false);
   1232    } while (next.assigned());
   1233    mozilla::DebugOnly<bool> ok = jumpChainPutTargetAt(exbr, next_instr_offset);
   1234    MOZ_ASSERT(ok, "Still can't reach list head");
   1235  }
   1236 
   1237  return kEndOfJumpChain;
   1238 }
   1239 
   1240 int32_t Assembler::branchOffsetHelper(Label* L, OffsetSize bits) {
   1241  if (oom()) {
   1242    return kEndOfJumpChain;
   1243  }
   1244 
   1245  BufferOffset next_instr_offset = nextInstrOffset();
   1246  DEBUG_PRINTF("\tbranchOffsetHelper: %p to %d\n", L,
   1247               next_instr_offset.getOffset());
   1248 
   1249  if (L->bound()) {
   1250    // The label is bound: all uses are already linked.
   1251    JitSpew(JitSpew_Codegen, ".use Llabel %p on %d", L,
   1252            next_instr_offset.getOffset());
   1253    int32_t offset = L->offset() - next_instr_offset.getOffset();
   1254    DEBUG_PRINTF("\toffset = %d\n", offset);
   1255    MOZ_ASSERT(is_intn(offset, bits));
   1256    MOZ_ASSERT((offset & 1) == 0);
   1257    return offset;
   1258  }
   1259 
   1260  BufferOffset deadline(next_instr_offset.getOffset() +
   1261                        ImmBranchMaxForwardOffset(bits));
   1262  DEBUG_PRINTF("\tregisterBranchDeadline %d type %d\n", deadline.getOffset(),
   1263               OffsetSizeToImmBranchRangeType(bits));
   1264  m_buffer.registerBranchDeadline(OffsetSizeToImmBranchRangeType(bits),
   1265                                  deadline);
   1266 
   1267  // The label is unbound and previously unused: Store the offset in the label
   1268  // itself for patching by bind().
   1269  if (!L->used()) {
   1270    JitSpew(JitSpew_Codegen, ".use Llabel %p on %d", L,
   1271            next_instr_offset.getOffset());
   1272    L->use(next_instr_offset.getOffset());
   1273    if (!label_cache_.putNew(L->offset(), next_instr_offset)) {
   1274      NoEnoughLabelCache();
   1275    }
   1276    DEBUG_PRINTF("\tLabel  %p added to link: %d\n", L,
   1277                 next_instr_offset.getOffset());
   1278    return kEndOfJumpChain;
   1279  }
   1280 
   1281  // The label is unbound and has multiple users. Create a linked list between
   1282  // the branches, and update the linked list head in the label struct. This is
   1283  // not always trivial since the branches in the linked list have limited
   1284  // ranges.
   1285 
   1286  LabelCache::Ptr p = label_cache_.lookup(L->offset());
   1287  MOZ_ASSERT(p);
   1288  MOZ_ASSERT(p->key() == L->offset());
   1289  const int32_t target_pos = p->value().getOffset();
   1290 
   1291  // If the existing instruction at the head of the list is within reach of the
   1292  // new branch, we can simply insert the new branch at the front of the list.
   1293  if (jumpChainPutTargetAt(BufferOffset(target_pos), next_instr_offset)) {
   1294    DEBUG_PRINTF("\tLabel  %p added to link: %d\n", L,
   1295                 next_instr_offset.getOffset());
   1296    if (!label_cache_.put(L->offset(), next_instr_offset)) {
   1297      NoEnoughLabelCache();
   1298    }
   1299  } else {
   1300    DEBUG_PRINTF("\tLabel  %p can't be added to link: %d -> %d\n", L,
   1301                 BufferOffset(target_pos).getOffset(),
   1302                 next_instr_offset.getOffset());
   1303 
   1304    // The label already has a linked list of uses, but we can't reach the head
   1305    // of the list with the allowed branch range. Insert this branch at a
   1306    // different position in the list. We need to find an existing branch
   1307    // `exbr`.
   1308    //
   1309    // In particular, the end of the list is always a viable candidate, so we'll
   1310    // just get that.
   1311    //
   1312    // See also vixl::MozBaseAssembler::LinkAndGetOffsetTo.
   1313 
   1314    BufferOffset next(L);
   1315    BufferOffset exbr;
   1316    do {
   1317      exbr = next;
   1318      next = jumpChainGetNextLink(next, false);
   1319    } while (next.assigned());
   1320    mozilla::DebugOnly<bool> ok = jumpChainPutTargetAt(exbr, next_instr_offset);
   1321    MOZ_ASSERT(ok, "Still can't reach list head");
   1322  }
   1323 
   1324  return kEndOfJumpChain;
   1325 }
   1326 
   1327 Assembler::Condition Assembler::InvertCondition(Condition cond) {
   1328  switch (cond) {
   1329    case Equal:
   1330      return NotEqual;
   1331    case NotEqual:
   1332      return Equal;
   1333    case Zero:
   1334      return NonZero;
   1335    case NonZero:
   1336      return Zero;
   1337    case LessThan:
   1338      return GreaterThanOrEqual;
   1339    case LessThanOrEqual:
   1340      return GreaterThan;
   1341    case GreaterThan:
   1342      return LessThanOrEqual;
   1343    case GreaterThanOrEqual:
   1344      return LessThan;
   1345    case Above:
   1346      return BelowOrEqual;
   1347    case AboveOrEqual:
   1348      return Below;
   1349    case Below:
   1350      return AboveOrEqual;
   1351    case BelowOrEqual:
   1352      return Above;
   1353    case Signed:
   1354      return NotSigned;
   1355    case NotSigned:
   1356      return Signed;
   1357    default:
   1358      MOZ_CRASH("unexpected condition");
   1359  }
   1360 }
   1361 
   1362 Assembler::DoubleCondition Assembler::InvertCondition(DoubleCondition cond) {
   1363  switch (cond) {
   1364    case DoubleOrdered:
   1365      return DoubleUnordered;
   1366    case DoubleEqual:
   1367      return DoubleNotEqualOrUnordered;
   1368    case DoubleNotEqual:
   1369      return DoubleEqualOrUnordered;
   1370    case DoubleGreaterThan:
   1371      return DoubleLessThanOrEqualOrUnordered;
   1372    case DoubleGreaterThanOrEqual:
   1373      return DoubleLessThanOrUnordered;
   1374    case DoubleLessThan:
   1375      return DoubleGreaterThanOrEqualOrUnordered;
   1376    case DoubleLessThanOrEqual:
   1377      return DoubleGreaterThanOrUnordered;
   1378    case DoubleUnordered:
   1379      return DoubleOrdered;
   1380    case DoubleEqualOrUnordered:
   1381      return DoubleNotEqual;
   1382    case DoubleNotEqualOrUnordered:
   1383      return DoubleEqual;
   1384    case DoubleGreaterThanOrUnordered:
   1385      return DoubleLessThanOrEqual;
   1386    case DoubleGreaterThanOrEqualOrUnordered:
   1387      return DoubleLessThan;
   1388    case DoubleLessThanOrUnordered:
   1389      return DoubleGreaterThanOrEqual;
   1390    case DoubleLessThanOrEqualOrUnordered:
   1391      return DoubleGreaterThan;
   1392    default:
   1393      MOZ_CRASH("unexpected condition");
   1394  }
   1395 }
   1396 
   1397 // Break / Trap instructions.
   1398 void Assembler::break_(uint32_t code, bool break_as_stop) {
   1399  // We need to invalidate breaks that could be stops as well because the
   1400  // simulator expects a char pointer after the stop instruction.
   1401  // See constants-mips.h for explanation.
   1402  MOZ_ASSERT(
   1403      (break_as_stop && code <= kMaxStopCode && code > kMaxTracepointCode) ||
   1404      (!break_as_stop && (code > kMaxStopCode || code <= kMaxTracepointCode)));
   1405 
   1406  // since ebreak does not allow additional immediate field, we use the
   1407  // immediate field of lui instruction immediately following the ebreak to
   1408  // encode the "code" info
   1409  ebreak();
   1410  MOZ_ASSERT(is_uint20(code));
   1411  lui(zero_reg, code);
   1412 }
   1413 
   1414 void Assembler::ToggleToJmp(CodeLocationLabel inst_) {
   1415  Instruction* inst = (Instruction*)inst_.raw();
   1416  MOZ_ASSERT(IsAddi(inst->InstructionBits()));
   1417  int32_t offset = inst->Imm12Value();
   1418  MOZ_ASSERT(is_int12(offset));
   1419  Instr jal_ = JAL | (0b000 << kFunct3Shift) |
   1420               (offset & 0xff000) |          // bits 19-12
   1421               ((offset & 0x800) << 9) |     // bit  11
   1422               ((offset & 0x7fe) << 20) |    // bits 10-1
   1423               ((offset & 0x100000) << 11);  // bit  20
   1424  // jal(zero, offset);
   1425  *reinterpret_cast<Instr*>(inst) = jal_;
   1426 }
   1427 
   1428 void Assembler::ToggleToCmp(CodeLocationLabel inst_) {
   1429  Instruction* inst = (Instruction*)inst_.raw();
   1430 
   1431  // toggledJump is allways used for short jumps.
   1432  MOZ_ASSERT(IsJal(inst->InstructionBits()));
   1433  // Replace "jal zero_reg, offset" with "addi $zero, $zero, offset"
   1434  int32_t offset = inst->Imm20JValue();
   1435  MOZ_ASSERT(is_int12(offset));
   1436  Instr addi_ = OP_IMM | (0b000 << kFunct3Shift) |
   1437                (offset << kImm12Shift);  // addi(zero, zero, low_12);
   1438  *reinterpret_cast<Instr*>(inst) = addi_;
   1439 }
   1440 
   1441 bool Assembler::reserve(size_t size) {
   1442  // This buffer uses fixed-size chunks so there's no point in reserving
   1443  // now vs. on-demand.
   1444  return !oom();
   1445 }
   1446 
   1447 static JitCode* CodeFromJump(Instruction* jump) {
   1448  uint8_t* target = (uint8_t*)Assembler::ExtractLoad64Value(jump);
   1449  return JitCode::FromExecutable(target);
   1450 }
   1451 
   1452 void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
   1453                                     CompactBufferReader& reader) {
   1454  while (reader.more()) {
   1455    JitCode* child =
   1456        CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned()));
   1457    TraceManuallyBarrieredEdge(trc, &child, "rel32");
   1458  }
   1459 }
   1460 
   1461 static void TraceOneDataRelocation(JSTracer* trc,
   1462                                   mozilla::Maybe<AutoWritableJitCode>& awjc,
   1463                                   JitCode* code, Instruction* inst) {
   1464  void* ptr = (void*)Assembler::ExtractLoad64Value(inst);
   1465  void* prior = ptr;
   1466 
   1467  // Data relocations can be for Values or for raw pointers. If a Value is
   1468  // zero-tagged, we can trace it as if it were a raw pointer. If a Value
   1469  // is not zero-tagged, we have to interpret it as a Value to ensure that the
   1470  // tag bits are masked off to recover the actual pointer.
   1471  uintptr_t word = reinterpret_cast<uintptr_t>(ptr);
   1472  if (word >> JSVAL_TAG_SHIFT) {
   1473    // This relocation is a Value with a non-zero tag.
   1474    Value v = Value::fromRawBits(word);
   1475    TraceManuallyBarrieredEdge(trc, &v, "jit-masm-value");
   1476    ptr = (void*)v.bitsAsPunboxPointer();
   1477  } else {
   1478    // This relocation is a raw pointer or a Value with a zero tag.
   1479    // No barrier needed since these are constants.
   1480    TraceManuallyBarrieredGenericPointerEdge(
   1481        trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr");
   1482  }
   1483 
   1484  if (ptr != prior) {
   1485    if (awjc.isNothing()) {
   1486      awjc.emplace(code);
   1487    }
   1488    Assembler::UpdateLoad64Value(inst, uint64_t(ptr));
   1489  }
   1490 }
   1491 
   1492 /* static */
   1493 void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
   1494                                     CompactBufferReader& reader) {
   1495  mozilla::Maybe<AutoWritableJitCode> awjc;
   1496  while (reader.more()) {
   1497    size_t offset = reader.readUnsigned();
   1498    Instruction* inst = (Instruction*)(code->raw() + offset);
   1499    TraceOneDataRelocation(trc, awjc, code, inst);
   1500  }
   1501 }
   1502 
   1503 UseScratchRegisterScope::UseScratchRegisterScope(Assembler& assembler)
   1504    : available_(assembler.GetScratchRegisterList()),
   1505      old_available_(*available_) {}
   1506 
   1507 UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
   1508    : available_(assembler->GetScratchRegisterList()),
   1509      old_available_(*available_) {}
   1510 
   1511 UseScratchRegisterScope::~UseScratchRegisterScope() {
   1512  *available_ = old_available_;
   1513 }
   1514 
   1515 Register UseScratchRegisterScope::Acquire() {
   1516  MOZ_ASSERT(available_ != nullptr);
   1517  MOZ_ASSERT(!available_->empty());
   1518  Register index = GeneralRegisterSet::FirstRegister(available_->bits());
   1519  available_->takeRegisterIndex(index);
   1520  return index;
   1521 }
   1522 
   1523 void UseScratchRegisterScope::Release(const Register& reg) {
   1524  MOZ_ASSERT(available_ != nullptr);
   1525  MOZ_ASSERT(old_available_.hasRegisterIndex(reg));
   1526  MOZ_ASSERT(!available_->hasRegisterIndex(reg));
   1527  Include(GeneralRegisterSet(1 << reg.code()));
   1528 }
   1529 
   1530 bool UseScratchRegisterScope::hasAvailable() const {
   1531  return (available_->size()) != 0;
   1532 }
   1533 
   1534 void Assembler::retarget(Label* label, Label* target) {
   1535  spew("retarget %p -> %p", label, target);
   1536  if (label->used() && !oom()) {
   1537    if (target->bound()) {
   1538      bind(label, BufferOffset(target));
   1539    } else if (target->used()) {
   1540      // The target is not bound but used. Prepend label's branch list
   1541      // onto target's.
   1542      int32_t next;
   1543      BufferOffset labelBranchOffset(label);
   1544 
   1545      // Find the head of the use chain for label.
   1546      do {
   1547        next = jumpChainUseNextLink(label, false);
   1548        labelBranchOffset = BufferOffset(next);
   1549      } while (next != LabelBase::INVALID_OFFSET);
   1550 
   1551      // Then patch the head of label's use chain to the tail of
   1552      // target's use chain, prepending the entire use chain of target.
   1553      target->use(label->offset());
   1554      jumpChainPutTargetAt(labelBranchOffset, BufferOffset(target));
   1555      MOZ_CRASH("check");
   1556    } else {
   1557      // The target is unbound and unused.  We can just take the head of
   1558      // the list hanging off of label, and dump that into target.
   1559      target->use(label->offset());
   1560    }
   1561  }
   1562  label->reset();
   1563 }
   1564 
   1565 bool Assembler::appendRawCode(const uint8_t* code, size_t numBytes) {
   1566  if (m_buffer.oom()) {
   1567    return false;
   1568  }
   1569  while (numBytes > SliceSize) {
   1570    m_buffer.putBytes(SliceSize, code);
   1571    numBytes -= SliceSize;
   1572    code += SliceSize;
   1573  }
   1574  m_buffer.putBytes(numBytes, code);
   1575  return !m_buffer.oom();
   1576 }
   1577 
   1578 void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
   1579 #ifdef DEBUG
   1580  Instruction* i0 = (Instruction*)inst_.raw();
   1581  Instruction* i1 = (Instruction*)(inst_.raw() + 1 * kInstrSize);
   1582  Instruction* i2 = (Instruction*)(inst_.raw() + 2 * kInstrSize);
   1583  Instruction* i3 = (Instruction*)(inst_.raw() + 3 * kInstrSize);
   1584  Instruction* i4 = (Instruction*)(inst_.raw() + 4 * kInstrSize);
   1585 #endif
   1586  Instruction* i5 = (Instruction*)(inst_.raw() + 5 * kInstrSize);
   1587  Instruction* i6 = (Instruction*)(inst_.raw() + 6 * kInstrSize);
   1588 
   1589  MOZ_ASSERT(IsLui(i0->InstructionBits()));
   1590  MOZ_ASSERT(IsAddi(i1->InstructionBits()));
   1591  MOZ_ASSERT(IsSlli(i2->InstructionBits()));
   1592  MOZ_ASSERT(IsOri(i3->InstructionBits()));
   1593  MOZ_ASSERT(IsSlli(i4->InstructionBits()));
   1594  MOZ_ASSERT(IsOri(i5->InstructionBits()));
   1595 
   1596  if (enabled) {
   1597    Instr jalr_ = JALR | (ra.code() << kRdShift) | (0x0 << kFunct3Shift) |
   1598                  (i5->RdValue() << kRs1Shift) | (0x0 << kImm12Shift);
   1599    *((Instr*)i6) = jalr_;
   1600  } else {
   1601    *((Instr*)i6) = kNopByte;
   1602  }
   1603 }
   1604 
   1605 void Assembler::PatchShortRangeBranchToVeneer(Buffer* buffer, unsigned rangeIdx,
   1606                                              BufferOffset deadline,
   1607                                              BufferOffset veneer) {
   1608  if (buffer->oom()) {
   1609    return;
   1610  }
   1611  DEBUG_PRINTF("\tPatchShortRangeBranchToVeneer\n");
   1612  // Reconstruct the position of the branch from (rangeIdx, deadline).
   1613  ImmBranchRangeType branchRange = static_cast<ImmBranchRangeType>(rangeIdx);
   1614  BufferOffset branch(deadline.getOffset() -
   1615                      ImmBranchMaxForwardOffset(branchRange));
   1616  Instruction* branchInst = buffer->getInst(branch);
   1617  Instruction* veneerInst_1 = buffer->getInst(veneer);
   1618  Instruction* veneerInst_2 =
   1619      buffer->getInst(BufferOffset(veneer.getOffset() + 4));
   1620  // Verify that the branch range matches what's encoded.
   1621  DEBUG_PRINTF("\t%p(%x): ", branchInst, branch.getOffset());
   1622  disassembleInstr(branchInst->InstructionBits(), JitSpew_Codegen);
   1623  DEBUG_PRINTF("\t insert veneer %x, branch: %x deadline: %x\n",
   1624               veneer.getOffset(), branch.getOffset(), deadline.getOffset());
   1625  MOZ_ASSERT(branchRange <= UncondBranchRangeType);
   1626  MOZ_ASSERT(branchInst->GetImmBranchRangeType() == branchRange);
   1627  // emit a long jump slot
   1628  Instr auipc = AUIPC | (t6.code() << kRdShift) | (0x0 << kImm20Shift);
   1629  Instr jalr = JALR | (zero_reg.code() << kRdShift) | (0x0 << kFunct3Shift) |
   1630               (t6.code() << kRs1Shift) | (0x0 << kImm12Shift);
   1631 
   1632  // We want to insert veneer after branch in the linked list of instructions
   1633  // that use the same unbound label.
   1634  // The veneer should be an unconditional branch.
   1635  int32_t nextElemOffset =
   1636      jumpChainTargetAt(buffer->getInst(branch), branch, false);
   1637  int32_t dist;
   1638  // If offset is kEndOfChain, this is the end of the linked list.
   1639  if (nextElemOffset != kEndOfChain) {
   1640    // Make the offset relative to veneer so it targets the same instruction
   1641    // as branchInst.
   1642    dist = nextElemOffset - veneer.getOffset();
   1643  } else {
   1644    dist = kEndOfJumpChain;
   1645  }
   1646  int32_t Hi20 = (((int32_t)dist + 0x800) >> 12);
   1647  int32_t Lo12 = (int32_t)dist << 20 >> 20;
   1648  auipc = SetAuipcOffset(Hi20, auipc);
   1649  jalr = SetJalrOffset(Lo12, jalr);
   1650  // insert veneer
   1651  veneerInst_1->SetInstructionBits(auipc);
   1652  veneerInst_2->SetInstructionBits(jalr);
   1653  // Now link branchInst to veneer.
   1654  if (IsBranch(branchInst->InstructionBits())) {
   1655    branchInst->SetInstructionBits(SetBranchOffset(
   1656        branch.getOffset(), veneer.getOffset(), branchInst->InstructionBits()));
   1657  } else {
   1658    MOZ_ASSERT(IsJal(branchInst->InstructionBits()));
   1659    branchInst->SetInstructionBits(SetJalOffset(
   1660        branch.getOffset(), veneer.getOffset(), branchInst->InstructionBits()));
   1661  }
   1662  DEBUG_PRINTF("\tfix to veneer:");
   1663  disassembleInstr(branchInst->InstructionBits());
   1664 }
   1665 }  // namespace jit
   1666 }  // namespace js