tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Assembler-riscv64.h (25567B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 // Copyright (c) 1994-2006 Sun Microsystems Inc.
      8 // All Rights Reserved.
      9 //
     10 // Redistribution and use in source and binary forms, with or without
     11 // modification, are permitted provided that the following conditions are
     12 // met:
     13 //
     14 // - Redistributions of source code must retain the above copyright notice,
     15 // this list of conditions and the following disclaimer.
     16 //
     17 // - Redistribution in binary form must reproduce the above copyright
     18 // notice, this list of conditions and the following disclaimer in the
     19 // documentation and/or other materials provided with the distribution.
     20 //
     21 // - Neither the name of Sun Microsystems or the names of contributors may
     22 // be used to endorse or promote products derived from this software without
     23 // specific prior written permission.
     24 //
     25 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
     26 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     27 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
     29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
     33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     36 
     37 // The original source code covered by the above license above has been
     38 // modified significantly by Google Inc.
     39 // Copyright 2021 the V8 project authors. All rights reserved.
     40 
     41 #ifndef jit_riscv64_Assembler_riscv64_h
     42 #define jit_riscv64_Assembler_riscv64_h
     43 
     44 #include "mozilla/Assertions.h"
     45 #include "mozilla/Sprintf.h"
     46 
     47 #include <stdint.h>
     48 
     49 #include "jit/CompactBuffer.h"
     50 #include "jit/JitCode.h"
     51 #include "jit/JitSpewer.h"
     52 #include "jit/Registers.h"
     53 #include "jit/RegisterSets.h"
     54 #include "jit/riscv64/Architecture-riscv64.h"
     55 #include "jit/riscv64/constant/Constant-riscv64.h"
     56 #include "jit/riscv64/extension/base-assembler-riscv.h"
     57 #include "jit/riscv64/extension/base-riscv-i.h"
     58 #include "jit/riscv64/extension/extension-riscv-a.h"
     59 #include "jit/riscv64/extension/extension-riscv-b.h"
     60 #include "jit/riscv64/extension/extension-riscv-c.h"
     61 #include "jit/riscv64/extension/extension-riscv-d.h"
     62 #include "jit/riscv64/extension/extension-riscv-f.h"
     63 #include "jit/riscv64/extension/extension-riscv-m.h"
     64 #include "jit/riscv64/extension/extension-riscv-v.h"
     65 #include "jit/riscv64/extension/extension-riscv-zicsr.h"
     66 #include "jit/riscv64/extension/extension-riscv-zifencei.h"
     67 #include "jit/riscv64/Register-riscv64.h"
     68 #include "jit/shared/Assembler-shared.h"
     69 #include "jit/shared/Disassembler-shared.h"
     70 #include "jit/shared/IonAssemblerBufferWithConstantPools.h"
     71 #include "js/HashTable.h"
     72 #include "wasm/WasmTypeDecls.h"
     73 namespace js {
     74 namespace jit {
     75 
     76 class RVFlags final {
     77 public:
     78  static void Init();
     79 
     80  static bool FlagsHaveBeenComputed() { return sComputed; }
     81 
     82  static bool HasZbaExtension() { return sZbaExtension; }
     83 
     84  static bool HasZbbExtension() { return sZbbExtension; }
     85 
     86 private:
     87  static inline bool sZbaExtension = false;
     88  static inline bool sZbbExtension = false;
     89  static inline bool sComputed = false;
     90 };
     91 
     92 struct ScratchFloat32Scope : public AutoFloatRegisterScope {
     93  explicit ScratchFloat32Scope(MacroAssembler& masm)
     94      : AutoFloatRegisterScope(masm, ScratchFloat32Reg) {}
     95 };
     96 
     97 struct ScratchDoubleScope : public AutoFloatRegisterScope {
     98  explicit ScratchDoubleScope(MacroAssembler& masm)
     99      : AutoFloatRegisterScope(masm, ScratchDoubleReg) {}
    100 };
    101 
    102 struct ScratchFloat32Scope2 : public AutoFloatRegisterScope {
    103  explicit ScratchFloat32Scope2(MacroAssembler& masm)
    104      : AutoFloatRegisterScope(masm, ScratchFloat32Reg2) {}
    105 };
    106 
    107 struct ScratchDoubleScope2 : public AutoFloatRegisterScope {
    108  explicit ScratchDoubleScope2(MacroAssembler& masm)
    109      : AutoFloatRegisterScope(masm, ScratchDoubleReg2) {}
    110 };
    111 
    112 class MacroAssembler;
    113 
    114 static constexpr uint32_t ABIStackAlignment = 16;
    115 static constexpr uint32_t CodeAlignment = 16;
    116 static constexpr uint32_t JitStackAlignment = 16;
    117 static constexpr uint32_t JitStackValueAlignment =
    118    JitStackAlignment / sizeof(Value);
    119 static const uint32_t WasmStackAlignment = 16;
    120 static const uint32_t WasmTrapInstructionLength = 2 * sizeof(uint32_t);
    121 // See comments in wasm::GenerateFunctionPrologue.  The difference between these
    122 // is the size of the largest callable prologue on the platform.
    123 static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
    124 static constexpr uint32_t WasmCheckedTailEntryOffset = 20u;
    125 
    126 static const Scale ScalePointer = TimesEight;
    127 
    128 class Assembler;
    129 
    130 static constexpr int32_t SliceSize = 1024;
    131 
    132 typedef js::jit::AssemblerBufferWithConstantPools<
    133    SliceSize, 4, Instruction, Assembler, NumShortBranchRangeTypes>
    134    Buffer;
    135 
    136 class Assembler : public AssemblerShared,
    137                  public AssemblerRISCVI,
    138                  public AssemblerRISCVA,
    139                  public AssemblerRISCVB,
    140                  public AssemblerRISCVF,
    141                  public AssemblerRISCVD,
    142                  public AssemblerRISCVM,
    143                  public AssemblerRISCVC,
    144                  public AssemblerRISCVZicsr,
    145                  public AssemblerRISCVZifencei {
    146  GeneralRegisterSet scratch_register_list_;
    147 
    148  static constexpr int kInvalidSlotPos = -1;
    149 
    150 #ifdef JS_JITSPEW
    151  Sprinter* printer;
    152 #endif
    153  bool enoughLabelCache_ = true;
    154 
    155 protected:
    156  using LabelOffset = int32_t;
    157  using LabelCache =
    158      HashMap<LabelOffset, BufferOffset, js::DefaultHasher<LabelOffset>,
    159              js::SystemAllocPolicy>;
    160  LabelCache label_cache_;
    161  void NoEnoughLabelCache() { enoughLabelCache_ = false; }
    162  CompactBufferWriter jumpRelocations_;
    163  CompactBufferWriter dataRelocations_;
    164  Buffer m_buffer;
    165  bool isFinished = false;
    166  Instruction* editSrc(BufferOffset bo) { return m_buffer.getInst(bo); }
    167 
    168  struct RelativePatch {
    169    // the offset within the code buffer where the value is loaded that
    170    // we want to fix-up
    171    BufferOffset offset;
    172    void* target;
    173    RelocationKind kind;
    174 
    175    RelativePatch(BufferOffset offset, void* target, RelocationKind kind)
    176        : offset(offset), target(target), kind(kind) {}
    177  };
    178 
    179  js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
    180 
    181  void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind) {
    182    enoughMemory_ &= jumps_.append(RelativePatch(src, target.value, kind));
    183    if (kind == RelocationKind::JITCODE) {
    184      jumpRelocations_.writeUnsigned(src.getOffset());
    185    }
    186  }
    187 
    188  void addLongJump(BufferOffset src, BufferOffset dst) {
    189    CodeLabel cl;
    190    cl.patchAt()->bind(src.getOffset());
    191    cl.target()->bind(dst.getOffset());
    192    cl.setLinkMode(CodeLabel::JumpImmediate);
    193    addCodeLabel(std::move(cl));
    194  }
    195 
    196 public:
    197  static bool FLAG_riscv_debug;
    198 
    199  Assembler()
    200      : scratch_register_list_((1 << t5.code()) | (1 << t4.code()) |
    201                               (1 << t6.code())),
    202 #ifdef JS_JITSPEW
    203        printer(nullptr),
    204 #endif
    205        m_buffer(/*guardSize*/ 2, /*headerSize*/ 2, /*instBufferAlign*/ 8,
    206                 /*poolMaxOffset*/ GetPoolMaxOffset(), /*pcBias*/ 8,
    207                 /*alignFillInst*/ kNopByte, /*nopFillInst*/ kNopByte),
    208        isFinished(false) {
    209  }
    210  static uint32_t NopFill;
    211  static uint32_t AsmPoolMaxOffset;
    212  static uint32_t GetPoolMaxOffset();
    213  bool reserve(size_t size);
    214  bool oom() const;
    215  void setPrinter(Sprinter* sp) {
    216 #ifdef JS_JITSPEW
    217    printer = sp;
    218 #endif
    219  }
    220  void finish() {
    221    MOZ_ASSERT(!isFinished);
    222    isFinished = true;
    223  }
    224  void enterNoPool(size_t maxInst, size_t maxNewDeadlines = 0) {
    225    m_buffer.enterNoPool(maxInst, maxNewDeadlines);
    226  }
    227  void leaveNoPool() { m_buffer.leaveNoPool(); }
    228  bool swapBuffer(wasm::Bytes& bytes);
    229  // Size of the instruction stream, in bytes.
    230  size_t size() const;
    231  // Size of the data table, in bytes.
    232  size_t bytesNeeded() const;
    233  // Size of the jump relocation table, in bytes.
    234  size_t jumpRelocationTableBytes() const;
    235  size_t dataRelocationTableBytes() const;
    236  void copyJumpRelocationTable(uint8_t* dest);
    237  void copyDataRelocationTable(uint8_t* dest);
    238  // Copy the assembly code to the given buffer, and perform any pending
    239  // relocations relying on the target address.
    240  void executableCopy(uint8_t* buffer);
    241  // API for speaking with the IonAssemblerBufferWithConstantPools generate an
    242  // initial placeholder instruction that we want to later fix up.
    243  static void InsertIndexIntoTag(uint8_t* load, uint32_t index);
    244  static void PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
    245  // We're not tracking short-range branches for ARM for now.
    246  static void PatchShortRangeBranchToVeneer(Buffer*, unsigned rangeIdx,
    247                                            BufferOffset deadline,
    248                                            BufferOffset veneer);
    249  struct PoolHeader {
    250    uint32_t data;
    251 
    252    struct Header {
    253      // The size should take into account the pool header.
    254      // The size is in units of Instruction (4bytes), not byte.
    255      union {
    256        struct {
    257          uint32_t size : 15;
    258 
    259          // "Natural" guards are part of the normal instruction stream,
    260          // while "non-natural" guards are inserted for the sole purpose
    261          // of skipping around a pool.
    262          uint32_t isNatural : 1;
    263          uint32_t ONES : 16;
    264        };
    265        uint32_t data;
    266      };
    267 
    268      Header(int size_, bool isNatural_)
    269          : size(size_), isNatural(isNatural_), ONES(0xffff) {}
    270 
    271      explicit Header(uint32_t data) : data(data) {
    272        static_assert(sizeof(Header) == sizeof(uint32_t));
    273        MOZ_ASSERT(ONES == 0xffff);
    274      }
    275 
    276      uint32_t raw() const {
    277        static_assert(sizeof(Header) == sizeof(uint32_t));
    278        return data;
    279      }
    280    };
    281 
    282    PoolHeader(int size_, bool isNatural_)
    283        : data(Header(size_, isNatural_).raw()) {}
    284 
    285    uint32_t size() const {
    286      Header tmp(data);
    287      return tmp.size;
    288    }
    289 
    290    uint32_t isNatural() const {
    291      Header tmp(data);
    292      return tmp.isNatural;
    293    }
    294  };
    295 
    296  static void WritePoolHeader(uint8_t* start, Pool* p, bool isNatural);
    297  static void WritePoolGuard(BufferOffset branch, Instruction* inst,
    298                             BufferOffset dest);
    299  void processCodeLabels(uint8_t* rawCode);
    300  BufferOffset nextOffset() { return m_buffer.nextOffset(); }
    301  // Get the buffer offset of the next inserted instruction. This may flush
    302  // constant pools.
    303  BufferOffset nextInstrOffset(int numInstr = 1) {
    304    return m_buffer.nextInstrOffset(numInstr);
    305  }
    306  void comment(const char* msg) { spew("; %s", msg); }
    307 
    308 #ifdef JS_JITSPEW
    309  inline void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3) {
    310    if (MOZ_UNLIKELY(printer || JitSpewEnabled(JitSpew_Codegen))) {
    311      va_list va;
    312      va_start(va, fmt);
    313      spew(fmt, va);
    314      va_end(va);
    315    }
    316  }
    317 
    318 #else
    319  MOZ_ALWAYS_INLINE void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3) {}
    320 #endif
    321 
    322 #ifdef JS_JITSPEW
    323  MOZ_COLD void spew(const char* fmt, va_list va) MOZ_FORMAT_PRINTF(2, 0) {
    324    // Buffer to hold the formatted string. Note that this may contain
    325    // '%' characters, so do not pass it directly to printf functions.
    326    char buf[200];
    327 
    328    int i = VsprintfLiteral(buf, fmt, va);
    329    if (i > -1) {
    330      if (printer) {
    331        printer->printf("%s\n", buf);
    332      }
    333      js::jit::JitSpew(js::jit::JitSpew_Codegen, "%s", buf);
    334    }
    335  }
    336 #endif
    337 
    338  enum Condition {
    339    Overflow = overflow,
    340    Below = Uless,
    341    BelowOrEqual = Uless_equal,
    342    Above = Ugreater,
    343    AboveOrEqual = Ugreater_equal,
    344    Equal = equal,
    345    NotEqual = not_equal,
    346    GreaterThan = greater,
    347    GreaterThanOrEqual = greater_equal,
    348    LessThan = less,
    349    LessThanOrEqual = less_equal,
    350    Always = cc_always,
    351    CarrySet,
    352    CarryClear,
    353    Signed,
    354    NotSigned,
    355    Zero,
    356    NonZero,
    357  };
    358 
    359  enum DoubleCondition {
    360    // These conditions will only evaluate to true if the comparison is ordered
    361    // - i.e. neither operand is NaN.
    362    DoubleOrdered,
    363    DoubleEqual,
    364    DoubleNotEqual,
    365    DoubleGreaterThan,
    366    DoubleGreaterThanOrEqual,
    367    DoubleLessThan,
    368    DoubleLessThanOrEqual,
    369    // If either operand is NaN, these conditions always evaluate to true.
    370    DoubleUnordered,
    371    DoubleEqualOrUnordered,
    372    DoubleNotEqualOrUnordered,
    373    DoubleGreaterThanOrUnordered,
    374    DoubleGreaterThanOrEqualOrUnordered,
    375    DoubleLessThanOrUnordered,
    376    DoubleLessThanOrEqualOrUnordered,
    377  };
    378 
    379  Register getStackPointer() const { return StackPointer; }
    380  void flushBuffer() {}
    381  static int disassembleInstr(Instr instr, bool enable_spew = false);
    382  int jumpChainTargetAt(BufferOffset pos, bool is_internal);
    383  static int jumpChainTargetAt(Instruction* instruction, BufferOffset pos,
    384                               bool is_internal,
    385                               Instruction* instruction2 = nullptr);
    386  BufferOffset jumpChainGetNextLink(BufferOffset pos, bool is_internal);
    387  uint32_t jumpChainUseNextLink(Label* label, bool is_internal);
    388  static uint64_t jumpChainTargetAddressAt(Instruction* pos);
    389  static void jumpChainSetTargetValueAt(Instruction* pc, uint64_t target);
    390  // Returns true if the target was successfully assembled and spewed.
    391  bool jumpChainPutTargetAt(BufferOffset pos, BufferOffset target_pos,
    392                            bool trampoline = false);
    393  int32_t branchOffsetHelper(Label* L, OffsetSize bits);
    394  int32_t branchLongOffsetHelper(Label* L);
    395 
    396  // Determines if Label is bound and near enough so that branch instruction
    397  // can be used to reach it, instead of jump instruction.
    398  bool is_near(Label* L);
    399  bool is_near(Label* L, OffsetSize bits);
    400  bool is_near_branch(Label* L);
    401 
    402  void nopAlign(int m) {
    403    MOZ_ASSERT(m >= 4 && (m & (m - 1)) == 0);
    404    while ((currentOffset() & (m - 1)) != 0) {
    405      nop();
    406    }
    407  }
    408  virtual BufferOffset emit(Instr x) {
    409    MOZ_ASSERT(hasCreator());
    410    BufferOffset offset = m_buffer.putInt(x);
    411 #if defined(DEBUG) || defined(JS_JITSPEW)
    412    if (!oom()) {
    413      DEBUG_PRINTF(
    414          "0x%" PRIx64 "(%" PRIxPTR "):",
    415          (uint64_t)editSrc(BufferOffset(currentOffset() - sizeof(Instr))),
    416          currentOffset() - sizeof(Instr));
    417      disassembleInstr(x, JitSpewEnabled(JitSpew_Codegen));
    418    }
    419 #endif
    420    return offset;
    421  }
    422  virtual BufferOffset emit(ShortInstr x) { MOZ_CRASH(); }
    423  virtual BufferOffset emit(uint64_t x) { MOZ_CRASH(); }
    424  virtual BufferOffset emit(uint32_t x) {
    425    DEBUG_PRINTF(
    426        "0x%" PRIx64 "(%" PRIxPTR "): uint32_t: %" PRId32 "\n",
    427        (uint64_t)editSrc(BufferOffset(currentOffset() - sizeof(Instr))),
    428        currentOffset() - sizeof(Instr), x);
    429    return m_buffer.putInt(x);
    430  }
    431 
    432  void instr_at_put(BufferOffset offset, Instr instr) {
    433    DEBUG_PRINTF("\t[instr_at_put\n");
    434    DEBUG_PRINTF("\t%p %d \n\t", editSrc(offset), offset.getOffset());
    435    disassembleInstr(editSrc(offset)->InstructionBits());
    436    DEBUG_PRINTF("\t");
    437    *reinterpret_cast<Instr*>(editSrc(offset)) = instr;
    438    disassembleInstr(editSrc(offset)->InstructionBits());
    439    DEBUG_PRINTF("\t]\n");
    440  }
    441 
    442  static Condition InvertCondition(Condition);
    443 
    444  static DoubleCondition InvertCondition(DoubleCondition);
    445 
    446  static uint64_t ExtractLoad64Value(Instruction* inst0);
    447  static void UpdateLoad64Value(Instruction* inst0, uint64_t value);
    448  static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
    449                                      ImmPtr expectedValue);
    450  static void PatchDataWithValueCheck(CodeLocationLabel label,
    451                                      PatchedImmPtr newValue,
    452                                      PatchedImmPtr expectedValue);
    453  static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
    454 
    455  static void PatchWrite_NearCall(CodeLocationLabel start,
    456                                  CodeLocationLabel toCall) {
    457    Instruction* inst = (Instruction*)start.raw();
    458    uint8_t* dest = toCall.raw();
    459 
    460    // Overwrite whatever instruction used to be here with a call.
    461    // Always use long jump for two reasons:
    462    // - Jump has to be the same size because of PatchWrite_NearCallSize.
    463    // - Return address has to be at the end of replaced block.
    464    // Short jump wouldn't be more efficient.
    465    // WriteLoad64Instructions will emit 6 instrs to load a addr.
    466    Assembler::WriteLoad64Instructions(inst, SavedScratchRegister,
    467                                       (uint64_t)dest);
    468    Instr jalr_ = JALR | (ra.code() << kRdShift) | (0x0 << kFunct3Shift) |
    469                  (SavedScratchRegister.code() << kRs1Shift) |
    470                  (0x0 << kImm12Shift);
    471    *reinterpret_cast<Instr*>(inst + 6 * kInstrSize) = jalr_;
    472  }
    473  static void WriteLoad64Instructions(Instruction* inst0, Register reg,
    474                                      uint64_t value);
    475 
    476  static uint32_t PatchWrite_NearCallSize() { return 7 * sizeof(uint32_t); }
    477 
    478  static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
    479                                   CompactBufferReader& reader);
    480  static void TraceDataRelocations(JSTracer* trc, JitCode* code,
    481                                   CompactBufferReader& reader);
    482 
    483  static void ToggleToJmp(CodeLocationLabel inst_);
    484  static void ToggleToCmp(CodeLocationLabel inst_);
    485  static void ToggleCall(CodeLocationLabel inst_, bool enable);
    486 
    487  static void Bind(uint8_t* rawCode, const CodeLabel& label);
    488  // label operations
    489  void bind(Label* label, BufferOffset boff = BufferOffset());
    490  void bind(CodeLabel* label) { label->target()->bind(currentOffset()); }
    491  uint32_t currentOffset() { return nextOffset().getOffset(); }
    492  void retarget(Label* label, Label* target);
    493  static uint32_t NopSize() { return 4; }
    494 
    495  static uint64_t GetPointer(uint8_t* instPtr) {
    496    Instruction* inst = (Instruction*)instPtr;
    497    return Assembler::ExtractLoad64Value(inst);
    498  }
    499 
    500  static bool HasRoundInstruction(RoundingMode mode) {
    501    switch (mode) {
    502      case RoundingMode::Up:
    503      case RoundingMode::Down:
    504      case RoundingMode::NearestTiesToEven:
    505      case RoundingMode::TowardsZero:
    506        return true;
    507    }
    508    MOZ_CRASH("unexpected mode");
    509  }
    510 
    511  static bool HasZbaExtension() { return RVFlags::HasZbaExtension(); }
    512 
    513  static bool HasZbbExtension() { return RVFlags::HasZbbExtension(); }
    514 
    515  void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
    516                                   const Disassembler::HeapAccess& heapAccess) {
    517    MOZ_CRASH();
    518  }
    519 
    520  void setUnlimitedBuffer() { m_buffer.setUnlimited(); }
    521 
    522  GeneralRegisterSet* GetScratchRegisterList() {
    523    return &scratch_register_list_;
    524  }
    525 
    526  // As opposed to x86/x64 version, the data relocation has to be executed
    527  // before to recover the pointer, and not after.
    528  void writeDataRelocation(ImmGCPtr ptr) {
    529    // Raw GC pointer relocations and Value relocations both end up in
    530    // TraceOneDataRelocation.
    531    if (ptr.value) {
    532      if (gc::IsInsideNursery(ptr.value)) {
    533        embedsNurseryPointers_ = true;
    534      }
    535      dataRelocations_.writeUnsigned(nextOffset().getOffset());
    536    }
    537  }
    538 
    539  bool appendRawCode(const uint8_t* code, size_t numBytes);
    540 
    541  void assertNoGCThings() const {
    542 #ifdef DEBUG
    543    MOZ_ASSERT(dataRelocations_.length() == 0);
    544    for (auto& j : jumps_) {
    545      MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
    546    }
    547 #endif
    548  }
    549 
    550  // Assembler Pseudo Instructions (Tables 25.2, 25.3, RISC-V Unprivileged ISA)
    551  void break_(uint32_t code, bool break_as_stop = false);
    552  void nop();
    553  void RV_li(Register rd, int64_t imm);
    554  static int RV_li_count(int64_t imm, bool is_get_temp_reg = false);
    555  void GeneralLi(Register rd, int64_t imm);
    556  static int GeneralLiCount(int64_t imm, bool is_get_temp_reg = false);
    557  void RecursiveLiImpl(Register rd, int64_t imm);
    558  void RecursiveLi(Register rd, int64_t imm);
    559  static int RecursiveLiCount(int64_t imm);
    560  static int RecursiveLiImplCount(int64_t imm);
    561  // Returns the number of instructions required to load the immediate
    562  static int li_estimate(int64_t imm, bool is_get_temp_reg = false);
    563  // Loads an immediate, always using 8 instructions, regardless of the value,
    564  // so that it can be modified later.
    565  void li_constant(Register rd, int64_t imm);
    566  void li_ptr(Register rd, int64_t imm);
    567 };
    568 
    569 class ABIArgGenerator : public ABIArgGeneratorShared {
    570 public:
    571  explicit ABIArgGenerator(ABIKind kind)
    572      : ABIArgGeneratorShared(kind),
    573        intRegIndex_(0),
    574        floatRegIndex_(0),
    575        current_() {}
    576 
    577  ABIArg next(MIRType);
    578  ABIArg& current() { return current_; }
    579 
    580 protected:
    581  unsigned intRegIndex_;
    582  unsigned floatRegIndex_;
    583  ABIArg current_;
    584 };
    585 
    586 // Note that nested uses of these are allowed, but the inner calls must imply
    587 // an area of code which exists only inside the area of code implied by the
    588 // outermost call.  Otherwise AssemblerBufferWithConstantPools::enterNoPool
    589 // will assert.
    590 class BlockTrampolinePoolScope {
    591 public:
    592  explicit BlockTrampolinePoolScope(Assembler* assem, size_t margin,
    593                                    size_t maxBranches = 0)
    594      : assem_(assem) {
    595    assem_->enterNoPool(margin, maxBranches);
    596  }
    597  ~BlockTrampolinePoolScope() { assem_->leaveNoPool(); }
    598 
    599 private:
    600  Assembler* assem_;
    601  BlockTrampolinePoolScope() = delete;
    602  BlockTrampolinePoolScope(const BlockTrampolinePoolScope&) = delete;
    603  BlockTrampolinePoolScope& operator=(const BlockTrampolinePoolScope&) = delete;
    604 };
    605 
    606 class UseScratchRegisterScope {
    607 public:
    608  explicit UseScratchRegisterScope(Assembler& assembler);
    609  explicit UseScratchRegisterScope(Assembler* assembler);
    610  ~UseScratchRegisterScope();
    611 
    612  Register Acquire();
    613  void Release(const Register& reg);
    614  bool hasAvailable() const;
    615  void Include(const GeneralRegisterSet& list) {
    616    *available_ = GeneralRegisterSet::Union(*available_, list);
    617  }
    618  void Exclude(const GeneralRegisterSet& list) {
    619    *available_ = GeneralRegisterSet::Subtract(*available_, list);
    620  }
    621 
    622 private:
    623  GeneralRegisterSet* available_;
    624  GeneralRegisterSet old_available_;
    625 };
    626 
    627 // Class Operand represents a shifter operand in data processing instructions.
    628 class Operand {
    629  enum Tag { REG, FREG, MEM, IMM };
    630 
    631 public:
    632  MOZ_IMPLICIT Operand(Register rm) : tag(REG), rm_(rm.code()) {}
    633 
    634  explicit Operand(FloatRegister freg) : tag(FREG), rm_(freg.encoding()) {}
    635 
    636  explicit Operand(Register base, Imm32 off)
    637      : tag(MEM), rm_(base.code()), offset_(off.value) {}
    638 
    639  explicit Operand(Register base, int32_t off)
    640      : tag(MEM), rm_(base.code()), offset_(off) {}
    641 
    642  explicit Operand(const Address& addr)
    643      : tag(MEM), rm_(addr.base.code()), offset_(addr.offset) {}
    644 
    645  explicit Operand(int64_t immediate) : tag(IMM), value_(immediate) {}
    646 
    647  bool is_reg() const { return tag == REG; }
    648  bool is_freg() const { return tag == FREG; }
    649  bool is_mem() const { return tag == MEM; }
    650  bool is_imm() const { return tag == IMM; }
    651 
    652  int64_t immediate() const {
    653    MOZ_ASSERT(is_imm());
    654    return value_;
    655  }
    656 
    657  Register rm() const {
    658    MOZ_ASSERT(is_reg() || is_mem());
    659    return Register::FromCode(rm_);
    660  }
    661 
    662  int32_t offset() const {
    663    MOZ_ASSERT(is_mem());
    664    return offset_;
    665  }
    666 
    667  FloatRegister toFReg() const {
    668    MOZ_ASSERT(is_freg());
    669    return FloatRegister::FromCode(rm_);
    670  }
    671 
    672  Register toReg() const {
    673    MOZ_ASSERT(is_reg());
    674    return Register::FromCode(rm_);
    675  }
    676 
    677  Address toAddress() const {
    678    MOZ_ASSERT(is_mem());
    679    return Address(Register::FromCode(rm_), offset());
    680  }
    681 
    682 private:
    683  Tag tag;
    684  union {
    685    struct {
    686      uint32_t rm_;
    687      int32_t offset_;
    688    };
    689    int64_t value_;  // valid if tag == IMM
    690  };
    691 };
    692 
    693 static const uint32_t NumIntArgRegs = 8;
    694 static const uint32_t NumFloatArgRegs = 8;
    695 static inline bool GetIntArgReg(uint32_t usedIntArgs, Register* out) {
    696  if (usedIntArgs < NumIntArgRegs) {
    697    *out = Register::FromCode(a0.code() + usedIntArgs);
    698    return true;
    699  }
    700  return false;
    701 }
    702 
    703 static inline bool GetFloatArgReg(uint32_t usedFloatArgs, FloatRegister* out) {
    704  if (usedFloatArgs < NumFloatArgRegs) {
    705    *out = FloatRegister::FromCode(fa0.encoding() + usedFloatArgs);
    706    return true;
    707  }
    708  return false;
    709 }
    710 
    711 // Get a register in which we plan to put a quantity that will be used as an
    712 // integer argument. This differs from GetIntArgReg in that if we have no more
    713 // actual argument registers to use we will fall back on using whatever
    714 // CallTempReg* don't overlap the argument registers, and only fail once those
    715 // run out too.
    716 static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
    717                                       uint32_t usedFloatArgs, Register* out) {
    718  // NOTE: We can't properly determine which regs are used if there are
    719  // float arguments. If this is needed, we will have to guess.
    720  MOZ_ASSERT(usedFloatArgs == 0);
    721 
    722  if (GetIntArgReg(usedIntArgs, out)) {
    723    return true;
    724  }
    725  // Unfortunately, we have to assume things about the point at which
    726  // GetIntArgReg returns false, because we need to know how many registers it
    727  // can allocate.
    728  usedIntArgs -= NumIntArgRegs;
    729  if (usedIntArgs >= NumCallTempNonArgRegs) {
    730    return false;
    731  }
    732  *out = CallTempNonArgRegs[usedIntArgs];
    733  return true;
    734 }
    735 
    736 }  // namespace jit
    737 }  // namespace js
    738 #endif /* jit_riscv64_Assembler_riscv64_h */