tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

MacroAssembler.h (278127B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef jit_MacroAssembler_h
      8 #define jit_MacroAssembler_h
      9 
     10 #include "mozilla/MacroForEach.h"
     11 #include "mozilla/Maybe.h"
     12 #include "mozilla/Variant.h"
     13 
     14 #if defined(JS_CODEGEN_X86)
     15 #  include "jit/x86/MacroAssembler-x86.h"
     16 #elif defined(JS_CODEGEN_X64)
     17 #  include "jit/x64/MacroAssembler-x64.h"
     18 #elif defined(JS_CODEGEN_ARM)
     19 #  include "jit/arm/MacroAssembler-arm.h"
     20 #elif defined(JS_CODEGEN_ARM64)
     21 #  include "jit/arm64/MacroAssembler-arm64.h"
     22 #elif defined(JS_CODEGEN_MIPS64)
     23 #  include "jit/mips64/MacroAssembler-mips64.h"
     24 #elif defined(JS_CODEGEN_LOONG64)
     25 #  include "jit/loong64/MacroAssembler-loong64.h"
     26 #elif defined(JS_CODEGEN_RISCV64)
     27 #  include "jit/riscv64/MacroAssembler-riscv64.h"
     28 #elif defined(JS_CODEGEN_WASM32)
     29 #  include "jit/wasm32/MacroAssembler-wasm32.h"
     30 #elif defined(JS_CODEGEN_NONE)
     31 #  include "jit/none/MacroAssembler-none.h"
     32 #else
     33 #  error "Unknown architecture!"
     34 #endif
     35 #include "jit/ABIArgGenerator.h"
     36 #include "jit/ABIFunctions.h"
     37 #include "jit/AtomicOp.h"
     38 #include "jit/IonTypes.h"
     39 #include "jit/MoveResolver.h"
     40 #include "jit/VMFunctions.h"
     41 #include "js/ScalarType.h"  // js::Scalar::Type
     42 #include "util/Memory.h"
     43 #include "vm/FunctionFlags.h"
     44 #include "vm/Opcodes.h"
     45 #include "vm/RealmFuses.h"
     46 #include "vm/RuntimeFuses.h"
     47 #include "wasm/WasmAnyRef.h"
     48 
     49 // [SMDOC] MacroAssembler multi-platform overview
     50 //
     51 // * How to read/write MacroAssembler method declarations:
     52 //
     53 // The following macros are made to avoid #ifdef around each method declarations
     54 // of the Macro Assembler, and they are also used as an hint on the location of
     55 // the implementations of each method.  For example, the following declaration
     56 //
     57 //   void Pop(FloatRegister t) DEFINED_ON(x86_shared, arm);
     58 //
     59 // suggests the MacroAssembler::Pop(FloatRegister) method is implemented in
     60 // x86-shared/MacroAssembler-x86-shared.h, and also in arm/MacroAssembler-arm.h.
     61 //
     62 // - If there is no annotation, then there is only one generic definition in
     63 //   MacroAssembler.cpp.
     64 //
     65 // - If the declaration is "inline", then the method definition(s) would be in
     66 //   the "-inl.h" variant of the same file(s).
     67 //
     68 // The script check_macroassembler_style.py (which runs on every build) is
     69 // used to verify that method definitions match the annotation on the method
     70 // declarations.  If there is any difference, then you either forgot to define
     71 // the method in one of the macro assembler, or you forgot to update the
     72 // annotation of the macro assembler declaration.
     73 //
     74 // Some convenient short-cuts are used to avoid repeating the same list of
     75 // architectures on each method declaration, such as PER_ARCH and
     76 // PER_SHARED_ARCH.
     77 //
     78 // Functions that are architecture-agnostic and are the same for all
     79 // architectures, that it's necessary to define inline *in this header* to
     80 // avoid used-before-defined warnings/errors that would occur if the
     81 // definitions were in MacroAssembler-inl.h, should use the OOL_IN_HEADER
     82 // marker at end of the declaration:
     83 //
     84 //   inline uint32_t framePushed() const OOL_IN_HEADER;
     85 //
     86 // Such functions should then be defined immediately after MacroAssembler's
     87 // definition, for example:
     88 //
     89 //   //{{{ check_macroassembler_style
     90 //   inline uint32_t
     91 //   MacroAssembler::framePushed() const
     92 //   {
     93 //       return framePushed_;
     94 //   }
     95 //   ////}}} check_macroassembler_style
     96 
     97 #define ALL_ARCH mips64, arm, arm64, x86, x64, loong64, riscv64, wasm32
     98 #define ALL_SHARED_ARCH arm, arm64, loong64, mips64, riscv64, x86_shared, wasm32
     99 
    100 // * How this macro works:
    101 //
    102 // DEFINED_ON is a macro which check if, for the current architecture, the
    103 // method is defined on the macro assembler or not.
    104 //
    105 // For each architecture, we have a macro named DEFINED_ON_arch.  This macro is
    106 // empty if this is not the current architecture.  Otherwise it must be either
    107 // set to "define" or "crash" (only used for the none target so far).
    108 //
    109 // The DEFINED_ON macro maps the list of architecture names given as arguments
    110 // to a list of macro names.  For example,
    111 //
    112 //   DEFINED_ON(arm, x86_shared)
    113 //
    114 // is expanded to
    115 //
    116 //   DEFINED_ON_none DEFINED_ON_arm DEFINED_ON_x86_shared
    117 //
    118 // which are later expanded on ARM, x86, x64 by DEFINED_ON_EXPAND_ARCH_RESULTS
    119 // to
    120 //
    121 //   define
    122 //
    123 // or if the JIT is disabled or set to no architecture to
    124 //
    125 //   crash
    126 //
    127 // or to nothing, if the current architecture is not listed in the list of
    128 // arguments of DEFINED_ON.  Note, only one of the DEFINED_ON_arch macro
    129 // contributes to the non-empty result, which is the macro of the current
    130 // architecture if it is listed in the arguments of DEFINED_ON.
    131 //
    132 // This result is appended to DEFINED_ON_RESULT_ before expanding the macro,
    133 // which results in either no annotation, a MOZ_CRASH(), or a "= delete"
    134 // annotation on the method declaration.
    135 
    136 #define DEFINED_ON_x86
    137 #define DEFINED_ON_x64
    138 #define DEFINED_ON_x86_shared
    139 #define DEFINED_ON_arm
    140 #define DEFINED_ON_arm64
    141 #define DEFINED_ON_mips64
    142 #define DEFINED_ON_loong64
    143 #define DEFINED_ON_riscv64
    144 #define DEFINED_ON_wasm32
    145 #define DEFINED_ON_none
    146 
    147 // Specialize for each architecture.
    148 #if defined(JS_CODEGEN_X86)
    149 #  undef DEFINED_ON_x86
    150 #  define DEFINED_ON_x86 define
    151 #  undef DEFINED_ON_x86_shared
    152 #  define DEFINED_ON_x86_shared define
    153 #elif defined(JS_CODEGEN_X64)
    154 #  undef DEFINED_ON_x64
    155 #  define DEFINED_ON_x64 define
    156 #  undef DEFINED_ON_x86_shared
    157 #  define DEFINED_ON_x86_shared define
    158 #elif defined(JS_CODEGEN_ARM)
    159 #  undef DEFINED_ON_arm
    160 #  define DEFINED_ON_arm define
    161 #elif defined(JS_CODEGEN_ARM64)
    162 #  undef DEFINED_ON_arm64
    163 #  define DEFINED_ON_arm64 define
    164 #elif defined(JS_CODEGEN_MIPS64)
    165 #  undef DEFINED_ON_mips64
    166 #  define DEFINED_ON_mips64 define
    167 #elif defined(JS_CODEGEN_LOONG64)
    168 #  undef DEFINED_ON_loong64
    169 #  define DEFINED_ON_loong64 define
    170 #elif defined(JS_CODEGEN_RISCV64)
    171 #  undef DEFINED_ON_riscv64
    172 #  define DEFINED_ON_riscv64 define
    173 #elif defined(JS_CODEGEN_WASM32)
    174 #  undef DEFINED_ON_wasm32
    175 #  define DEFINED_ON_wasm32 define
    176 #elif defined(JS_CODEGEN_NONE)
    177 #  undef DEFINED_ON_none
    178 #  define DEFINED_ON_none crash
    179 #else
    180 #  error "Unknown architecture!"
    181 #endif
    182 
    183 #define DEFINED_ON_RESULT_crash \
    184  {                             \
    185    MOZ_CRASH();                \
    186  }
    187 #define DEFINED_ON_RESULT_define
    188 #define DEFINED_ON_RESULT_ = delete
    189 
    190 #define DEFINED_ON_DISPATCH_RESULT_2(Macro, Result) Macro##Result
    191 #define DEFINED_ON_DISPATCH_RESULT(...) \
    192  DEFINED_ON_DISPATCH_RESULT_2(DEFINED_ON_RESULT_, __VA_ARGS__)
    193 
    194 // We need to let the evaluation of MOZ_FOR_EACH terminates.
    195 #define DEFINED_ON_EXPAND_ARCH_RESULTS_3(ParenResult) \
    196  DEFINED_ON_DISPATCH_RESULT ParenResult
    197 #define DEFINED_ON_EXPAND_ARCH_RESULTS_2(ParenResult) \
    198  DEFINED_ON_EXPAND_ARCH_RESULTS_3(ParenResult)
    199 #define DEFINED_ON_EXPAND_ARCH_RESULTS(ParenResult) \
    200  DEFINED_ON_EXPAND_ARCH_RESULTS_2(ParenResult)
    201 
    202 #define DEFINED_ON_FWDARCH(Arch) DEFINED_ON_##Arch
    203 #define DEFINED_ON_MAP_ON_ARCHS(ArchList) \
    204  DEFINED_ON_EXPAND_ARCH_RESULTS(         \
    205      (MOZ_FOR_EACH(DEFINED_ON_FWDARCH, (), ArchList)))
    206 
    207 #define DEFINED_ON(...) DEFINED_ON_MAP_ON_ARCHS((none, __VA_ARGS__))
    208 
    209 #define PER_ARCH DEFINED_ON(ALL_ARCH)
    210 #define PER_SHARED_ARCH DEFINED_ON(ALL_SHARED_ARCH)
    211 #define OOL_IN_HEADER
    212 
    213 namespace JS {
    214 struct ExpandoAndGeneration;
    215 }
    216 
    217 namespace js {
    218 
    219 class StaticStrings;
    220 class FixedLengthTypedArrayObject;
    221 
    222 namespace wasm {
    223 class CalleeDesc;
    224 class CallSiteDesc;
    225 class BytecodeOffset;
    226 class MemoryAccessDesc;
    227 
    228 enum class FailureMode : uint8_t;
    229 enum class SimdOp;
    230 enum class SymbolicAddress;
    231 enum class Trap;
    232 }  // namespace wasm
    233 
    234 namespace jit {
    235 
    236 // Defined in JitFrames.h
    237 class FrameDescriptor;
    238 enum class ExitFrameType : uint8_t;
    239 
    240 class AutoSaveLiveRegisters;
    241 class CompileZone;
    242 class TemplateNativeObject;
    243 class TemplateObject;
    244 
    245 enum class CheckUnsafeCallWithABI {
    246  // Require the callee to use AutoUnsafeCallWithABI.
    247  Check,
    248 
    249  // We pushed an exit frame so this callWithABI can safely GC and walk the
    250  // stack.
    251  DontCheckHasExitFrame,
    252 
    253  // Don't check this callWithABI uses AutoUnsafeCallWithABI, for instance
    254  // because we're calling a simple helper function (like malloc or js_free)
    255  // that we can't change and/or that we know won't GC.
    256  DontCheckOther,
    257 };
    258 
    259 // This is a global function made to create the DynFn type in a controlled
    260 // environment which would check if the function signature has been registered
    261 // as an ABI function signature.
    262 template <typename Sig>
    263 static inline DynFn DynamicFunction(Sig fun);
    264 
    265 enum class CharEncoding { Latin1, TwoByte };
    266 
    267 constexpr uint32_t WasmCallerInstanceOffsetBeforeCall =
    268    wasm::FrameWithInstances::callerInstanceOffsetWithoutFrame();
    269 constexpr uint32_t WasmCalleeInstanceOffsetBeforeCall =
    270    wasm::FrameWithInstances::calleeInstanceOffsetWithoutFrame();
    271 
    272 // Allocation sites may be passed to GC thing allocation methods either via a
    273 // register (for baseline compilation) or an enum indicating one of the
    274 // catch-all allocation sites (for optimized compilation).
    275 struct AllocSiteInput
    276    : public mozilla::Variant<Register, gc::CatchAllAllocSite> {
    277  using Base = mozilla::Variant<Register, gc::CatchAllAllocSite>;
    278  AllocSiteInput() : Base(gc::CatchAllAllocSite::Unknown) {}
    279  explicit AllocSiteInput(gc::CatchAllAllocSite catchAll) : Base(catchAll) {}
    280  explicit AllocSiteInput(Register reg) : Base(reg) {}
    281 };
    282 
    283 // Instance slots (including ShadowStackArea) and arguments size information
    284 // from two neighboring frames.
    285 // Used in Wasm tail calls to remove frame.
    286 struct ReturnCallAdjustmentInfo {
    287  uint32_t newSlotsAndStackArgBytes;
    288  uint32_t oldSlotsAndStackArgBytes;
    289 
    290  ReturnCallAdjustmentInfo(uint32_t newSlotsAndStackArgBytes,
    291                           uint32_t oldSlotsAndStackArgBytes)
    292      : newSlotsAndStackArgBytes(newSlotsAndStackArgBytes),
    293        oldSlotsAndStackArgBytes(oldSlotsAndStackArgBytes) {}
    294 };
    295 
    296 struct BranchWasmRefIsSubtypeRegisters {
    297  bool needSuperSTV;
    298  bool needScratch1;
    299  bool needScratch2;
    300 };
    301 
    302 // [SMDOC] Code generation invariants (incomplete)
    303 //
    304 // ## 64-bit GPRs carrying 32-bit values
    305 //
    306 // At least at the end of every JS or Wasm operation (= SpiderMonkey bytecode or
    307 // Wasm bytecode; this is necessarily a little vague), if a 64-bit GPR has a
    308 // 32-bit value, then the upper 32 bits of the register may be predictable in
    309 // accordance with platform-specific rules, as follows.
    310 //
    311 // - On x64 and arm64, the upper bits are zero
    312 // - On mips64 and loongarch64 the upper bits are the sign extension of the
    313 //   lower bits
    314 // - (On risc-v we have no rule, having no port yet.  Sign extension is the most
    315 //   likely rule, but "unpredictable" is an option.)
    316 //
    317 // In most cases no extra work needs to be done to maintain the invariant:
    318 //
    319 // - 32-bit operations on x64 and arm64 zero-extend the result to 64 bits.
    320 //   These operations ignore the upper bits of the inputs.
    321 // - 32-bit operations on mips64 sign-extend the result to 64 bits (even many
    322 //   that are labeled as "unsigned", eg ADDU, though not all, eg LU).
    323 //   Additionally, the inputs to many 32-bit operations must be properly
    324 //   sign-extended to avoid "unpredictable" behavior, and our simulators check
    325 //   that inputs conform.
    326 // - (32-bit operations on risc-v and loongarch64 sign-extend, much as mips, but
    327 //   appear to ignore the upper bits of the inputs.)
    328 //
    329 // The upshot of these invariants is, among other things, that:
    330 //
    331 // - No code needs to be generated when a 32-bit value is extended to 64 bits
    332 //   or a 64-bit value is wrapped to 32 bits, if the upper bits are known to be
    333 //   correct because they resulted from an operation that produced them
    334 //   predictably.
    335 // - Literal loads must be careful to avoid instructions that might extend the
    336 //   literal in the wrong way.
    337 // - Code that produces values using intermediate values with non-canonical
    338 //   extensions must extend according to platform conventions before being
    339 //   "done".
    340 //
    341 // All optimizations are necessarily platform-specific and should only be used
    342 // in platform-specific code.  We may add architectures in the future that do
    343 // not follow the patterns of the few architectures we already have.
    344 //
    345 // Also see MacroAssembler::debugAssertCanonicalInt32().
    346 
    347 // The public entrypoint for emitting assembly. Note that a MacroAssembler can
    348 // use cx->lifoAlloc, so take care not to interleave masm use with other
    349 // lifoAlloc use if one will be destroyed before the other.
    350 class MacroAssembler : public MacroAssemblerSpecific {
    351 private:
    352  // Information about the current JSRuntime. This is nullptr only for Wasm
    353  // compilations.
    354  CompileRuntime* maybeRuntime_ = nullptr;
    355 
    356  // Information about the current Realm. This is nullptr for Wasm compilations
    357  // and when compiling runtime-wide jitcode that will live in the Atom zone:
    358  // for example, trampolines, the baseline interpreter, and (if
    359  // self_hosted_cache is enabled) self-hosted baseline code.
    360  CompileRealm* maybeRealm_ = nullptr;
    361 
    362  // Labels for handling exceptions and failures.
    363  NonAssertingLabel failureLabel_;
    364 
    365 protected:
    366  // Constructor is protected. Use one of the derived classes!
    367  explicit MacroAssembler(TempAllocator& alloc,
    368                          CompileRuntime* maybeRuntime = nullptr,
    369                          CompileRealm* maybeRealm = nullptr);
    370 
    371 public:
    372  MoveResolver& moveResolver() {
    373    // As an optimization, the MoveResolver is a persistent data structure
    374    // shared between visitors in the CodeGenerator. This assertion
    375    // checks that state is not leaking from visitor to visitor
    376    // via an unresolved addMove().
    377    MOZ_ASSERT(moveResolver_.hasNoPendingMoves());
    378    return moveResolver_;
    379  }
    380 
    381  size_t instructionsSize() const { return size(); }
    382 
    383  CompileRealm* realm() const {
    384    MOZ_ASSERT(maybeRealm());
    385    return maybeRealm();
    386  }
    387  CompileRealm* maybeRealm() const { return maybeRealm_; }
    388  CompileRuntime* runtime() const {
    389    MOZ_ASSERT(maybeRuntime_);
    390    return maybeRuntime_;
    391  }
    392 
    393 #ifdef JS_HAS_HIDDEN_SP
    394  void Push(RegisterOrSP reg);
    395 #endif
    396 
    397 #ifdef ENABLE_WASM_SIMD
    398  // `op` should be a shift operation. Return true if a variable-width shift
    399  // operation on this architecture should pre-mask the shift count, and if so,
    400  // return the mask in `*mask`.
    401  static bool MustMaskShiftCountSimd128(wasm::SimdOp op, int32_t* mask);
    402 #endif
    403 
    404  //{{{ check_macroassembler_decl_style
    405 public:
    406  // ===============================================================
    407  // MacroAssembler high-level usage.
    408 
    409  // Flushes the assembly buffer, on platforms that need it.
    410  void flush() PER_SHARED_ARCH;
    411 
    412  // Add a comment that is visible in the pretty printed assembly code.
    413  void comment(const char* msg) PER_SHARED_ARCH;
    414 
    415  // ===============================================================
    416  // Frame manipulation functions.
    417 
    418  inline uint32_t framePushed() const OOL_IN_HEADER;
    419  inline void setFramePushed(uint32_t framePushed) OOL_IN_HEADER;
    420  inline void adjustFrame(int32_t value) OOL_IN_HEADER;
    421 
    422  // Adjust the frame, to account for implicit modification of the stack
    423  // pointer, such that callee can remove arguments on the behalf of the
    424  // caller.
    425  inline void implicitPop(uint32_t bytes) OOL_IN_HEADER;
    426 
    427 private:
    428  // This field is used to statically (at compilation time) emulate a frame
    429  // pointer by keeping track of stack manipulations.
    430  //
    431  // It is maintained by all stack manipulation functions below.
    432  uint32_t framePushed_;
    433 
    434 public:
    435  // ===============================================================
    436  // Stack manipulation functions -- sets of registers.
    437 
    438  // Approximately speaking, the following routines must use the same memory
    439  // layout.  Any inconsistencies will certainly lead to crashing in generated
    440  // code:
    441  //
    442  //   MacroAssembler::PushRegsInMaskSizeInBytes
    443  //   MacroAssembler::PushRegsInMask
    444  //   MacroAssembler::storeRegsInMask
    445  //   MacroAssembler::PopRegsInMask
    446  //   MacroAssembler::PopRegsInMaskIgnore
    447  //   FloatRegister::getRegisterDumpOffsetInBytes
    448  //   (no class) PushRegisterDump
    449  //   (union) RegisterContent
    450  //   JitRuntime::generateInvalidator
    451  //   JitRuntime::generateBailoutHandler
    452  //   JSJitFrameIter::machineState
    453  //
    454  // To be more exact, the invariants are:
    455  //
    456  // * The save area is conceptually viewed as starting at a highest address
    457  //   (really, at "highest address - 1") and working down to some lower
    458  //   address.
    459  //
    460  // * PushRegsInMask, storeRegsInMask and PopRegsInMask{Ignore} must use
    461  //   exactly the same memory layout, when starting from the abovementioned
    462  //   highest address.
    463  //
    464  // * PushRegsInMaskSizeInBytes must produce a value which is exactly equal
    465  //   to the change in the machine's stack pointer register as a result of
    466  //   calling PushRegsInMask or PopRegsInMask{Ignore}.  This value must be at
    467  //   least uintptr_t-aligned on the target, and may be more aligned than that.
    468  //
    469  // * PushRegsInMaskSizeInBytes must produce a value which is greater than or
    470  //   equal to the amount of space used by storeRegsInMask.
    471  //
    472  // * Hence, regardless of whether the save area is created with
    473  //   storeRegsInMask or PushRegsInMask, it is guaranteed to fit inside an
    474  //   area of size calculated by PushRegsInMaskSizeInBytes.
    475  //
    476  // * For the `ignore` argument of PopRegsInMaskIgnore, equality checking
    477  //   for the floating point/SIMD registers is done on the basis of the
    478  //   underlying physical register, regardless of width.  For example, if the
    479  //   to-restore set contains v17 (the SIMD register with encoding 17) and
    480  //   the ignore set contains d17 (the double register with encoding 17) then
    481  //   no part of the physical register with encoding 17 will be restored.
    482  //   (This is probably not true on arm32, since that has aliased float32
    483  //   registers; but none of our other targets do.)
    484  //
    485  // * {Push,store}RegsInMask/storeRegsInMask are further constrained as
    486  //   follows: when given the argument AllFloatRegisters, the resulting
    487  //   memory area must contain exactly all the SIMD/FP registers for the
    488  //   target at their widest width (that we care about).  [We have no targets
    489  //   where the SIMD registers and FP register sets are disjoint.]  They must
    490  //   be packed end-to-end with no holes, with the register with the lowest
    491  //   encoding number (0), as returned by FloatRegister::encoding(), at the
    492  //   abovementioned highest address, register 1 just below that, etc.
    493  //
    494  //   Furthermore the sizeof(RegisterContent) must equal the size of a SIMD
    495  //   register in the abovementioned array.
    496  //
    497  //   Furthermore the value returned by
    498  //   FloatRegister::getRegisterDumpOffsetInBytes must be a correct index
    499  //   into the abovementioned array.  Given the constraints, the only correct
    500  //   value is `reg.encoding() * sizeof(RegisterContent)`.
    501  //
    502  // Note that some of the routines listed above are JS-only, and do not support
    503  // SIMD registers. They are otherwise part of the same equivalence class.
    504  // Register spilling for e.g. OOL VM calls is implemented using
    505  // PushRegsInMask, and recovered on bailout using machineState. This requires
    506  // the same layout to be used in machineState, and therefore in all other code
    507  // that can spill registers that are recovered on bailout. Implementations of
    508  // JitRuntime::generate{Invalidator,BailoutHandler} should either call
    509  // PushRegsInMask, or check carefully to be sure that they generate the same
    510  // layout.
    511 
    512  // The size of the area used by PushRegsInMask.
    513  static size_t PushRegsInMaskSizeInBytes(LiveRegisterSet set) PER_SHARED_ARCH;
    514 
    515  void PushRegsInMask(LiveRegisterSet set) PER_SHARED_ARCH;
    516  void PushRegsInMask(LiveGeneralRegisterSet set);
    517 
    518  // Like PushRegsInMask, but instead of pushing the registers, store them to
    519  // |dest|. |dest| should point to the end of the reserved space, so the
    520  // first register will be stored at |dest.offset - sizeof(register)|.  It is
    521  // required that |dest.offset| is at least as large as the value computed by
    522  // PushRegsInMaskSizeInBytes for this |set|.  In other words, |dest.base|
    523  // must point to either the lowest address in the save area, or some address
    524  // below that.
    525  void storeRegsInMask(LiveRegisterSet set, Address dest,
    526                       Register scratch) PER_SHARED_ARCH;
    527 
    528  void PopRegsInMask(LiveRegisterSet set);
    529  void PopRegsInMask(LiveGeneralRegisterSet set);
    530  void PopRegsInMaskIgnore(LiveRegisterSet set,
    531                           LiveRegisterSet ignore) PER_SHARED_ARCH;
    532 
    533  // ===============================================================
    534  // Stack manipulation functions -- single registers/values.
    535 
    536  void Push(const Operand op) DEFINED_ON(x86_shared);
    537  void Push(Register reg) PER_SHARED_ARCH;
    538  void Push(Register reg1, Register reg2, Register reg3, Register reg4)
    539      DEFINED_ON(arm64);
    540  void Push(const Imm32 imm) PER_SHARED_ARCH;
    541  void Push(const ImmWord imm) PER_SHARED_ARCH;
    542  void Push(const ImmPtr imm) PER_SHARED_ARCH;
    543  void Push(const ImmGCPtr ptr) PER_SHARED_ARCH;
    544  void Push(FloatRegister reg) PER_SHARED_ARCH;
    545  void PushBoxed(FloatRegister reg) PER_ARCH;
    546  void PushFlags() DEFINED_ON(x86_shared);
    547  void Push(PropertyKey key, Register scratchReg);
    548  void Push(const Address& addr);
    549  void Push(TypedOrValueRegister v);
    550  void Push(const ConstantOrRegister& v);
    551  void Push(const ValueOperand& val);
    552  void Push(const Value& val);
    553  void Push(JSValueType type, Register reg);
    554  void Push(const Register64 reg);
    555  void PushEmptyRooted(VMFunctionData::RootType rootType);
    556  inline CodeOffset PushWithPatch(ImmWord word);
    557  inline CodeOffset PushWithPatch(ImmPtr imm);
    558 
    559  using MacroAssemblerSpecific::push;
    560 
    561  void Pop(const Operand op) DEFINED_ON(x86_shared);
    562  void Pop(Register reg) PER_SHARED_ARCH;
    563  void Pop(FloatRegister t) PER_SHARED_ARCH;
    564  void Pop(const ValueOperand& val) PER_SHARED_ARCH;
    565  void Pop(const Register64 reg);
    566  void PopFlags() DEFINED_ON(x86_shared);
    567  void PopStackPtr()
    568      DEFINED_ON(arm, mips64, x86_shared, loong64, riscv64, wasm32);
    569 
    570  // Move the stack pointer based on the requested amount.
    571  void adjustStack(int amount);
    572  void freeStack(uint32_t amount);
    573 
    574  // Move the stack pointer to the specified position. It assumes the SP
    575  // register is not valid -- it uses FP to set the position.
    576  void freeStackTo(uint32_t framePushed) PER_SHARED_ARCH;
    577 
    578 private:
    579  // ===============================================================
    580  // Register allocation fields.
    581 #ifdef DEBUG
    582  friend AutoRegisterScope;
    583  friend AutoFloatRegisterScope;
    584  // Used to track register scopes for debug builds.
    585  // Manipulated by the AutoGenericRegisterScope class.
    586  AllocatableRegisterSet debugTrackedRegisters_;
    587 #endif  // DEBUG
    588 
    589 public:
    590  // ===============================================================
    591  // Simple call functions.
    592 
    593  // The returned CodeOffset is the assembler offset for the instruction
    594  // immediately following the call; that is, for the return point.
    595  CodeOffset call(Register reg) PER_SHARED_ARCH;
    596  CodeOffset call(Label* label) PER_SHARED_ARCH;
    597  CodeOffset call(const Address& addr) PER_SHARED_ARCH;
    598 
    599  void call(ImmWord imm) PER_SHARED_ARCH;
    600  // Call a target native function, which is neither traceable nor movable.
    601  void call(ImmPtr imm) PER_SHARED_ARCH;
    602  CodeOffset call(wasm::SymbolicAddress imm) PER_SHARED_ARCH;
    603  inline CodeOffset call(const wasm::CallSiteDesc& desc,
    604                         wasm::SymbolicAddress imm);
    605 
    606  // Call a target JitCode, which must be traceable, and may be movable.
    607  void call(JitCode* c) PER_SHARED_ARCH;
    608 
    609  inline void call(TrampolinePtr code);
    610 
    611  inline CodeOffset call(const wasm::CallSiteDesc& desc, const Register reg);
    612  inline CodeOffset call(const wasm::CallSiteDesc& desc, uint32_t funcDefIndex);
    613  inline void call(const wasm::CallSiteDesc& desc, wasm::Trap trap);
    614 
    615  CodeOffset callWithPatch() PER_SHARED_ARCH;
    616  void patchCall(uint32_t callerOffset, uint32_t calleeOffset) PER_SHARED_ARCH;
    617 
    618  // Push the return address and make a call. On platforms where this function
    619  // is not defined, push the link register (pushReturnAddress) at the entry
    620  // point of the callee.
    621  void callAndPushReturnAddress(Register reg) DEFINED_ON(x86_shared);
    622  void callAndPushReturnAddress(Label* label) DEFINED_ON(x86_shared);
    623 
    624  // These do not adjust framePushed().
    625  void pushReturnAddress()
    626      DEFINED_ON(mips64, arm, arm64, loong64, riscv64, wasm32);
    627  void popReturnAddress()
    628      DEFINED_ON(mips64, arm, arm64, loong64, riscv64, wasm32);
    629 
    630  // Useful for dealing with two-valued returns.
    631  void moveRegPair(Register src0, Register src1, Register dst0, Register dst1,
    632                   MoveOp::Type type = MoveOp::GENERAL);
    633 
    634  void reserveVMFunctionOutParamSpace(const VMFunctionData& f);
    635  void loadVMFunctionOutParam(const VMFunctionData& f, const Address& addr);
    636 
    637 public:
    638  // ===============================================================
    639  // Patchable near/far jumps.
    640 
    641  // "Far jumps" provide the ability to jump to any uint32_t offset from any
    642  // other uint32_t offset without using a constant pool (thus returning a
    643  // simple CodeOffset instead of a CodeOffsetJump).
    644  CodeOffset farJumpWithPatch() PER_SHARED_ARCH;
    645  void patchFarJump(CodeOffset farJump, uint32_t targetOffset) PER_SHARED_ARCH;
    646  static void patchFarJump(uint8_t* farJump, uint8_t* target)
    647      DEFINED_ON(arm, arm64, x86_shared, loong64, mips64, riscv64);
    648 
    649  // Emit a nop that can be patched to and from a nop and a call with int32
    650  // relative displacement.
    651  CodeOffset nopPatchableToCall() PER_SHARED_ARCH;
    652  void nopPatchableToCall(const wasm::CallSiteDesc& desc);
    653  static void patchNopToCall(uint8_t* callsite,
    654                             uint8_t* target) PER_SHARED_ARCH;
    655  static void patchCallToNop(uint8_t* callsite) PER_SHARED_ARCH;
    656 
    657  // These methods are like movWithPatch/PatchDataWithValueCheck but allow
    658  // using pc-relative addressing on certain platforms (RIP-relative LEA on x64,
    659  // ADR instruction on arm64).
    660  //
    661  // Note: "Near" applies to ARM64 where the target must be within 1 MB (this is
    662  // release-asserted).
    663  CodeOffset moveNearAddressWithPatch(Register dest) PER_ARCH;
    664  static void patchNearAddressMove(CodeLocationLabel loc,
    665                                   CodeLocationLabel target) PER_ARCH;
    666 
    667  // Creates a move of a patchable 32-bit value into `dest`.  On 64-bit
    668  // targets, the value (`n`) is extended to 64 bits using the target
    669  // architecture's standard 32-to-64 extension rule.  Hence consistent cross
    670  // target behaviour is only provided for `n` in the range 0 .. 2^31-1
    671  // inclusive.
    672  CodeOffset move32WithPatch(Register dest)
    673      DEFINED_ON(x86_shared, arm, arm64, loong64, mips64, riscv64);
    674  void patchMove32(CodeOffset offset, Imm32 n)
    675      DEFINED_ON(x86_shared, arm, arm64, loong64, mips64, riscv64);
    676 
    677 public:
    678  // ===============================================================
    679  // [SMDOC] JIT-to-C++ Function Calls (callWithABI)
    680  //
    681  // callWithABI is used to make a call using the standard C/C++ system ABI.
    682  //
    683  // callWithABI is a low level interface for making calls, as such every call
    684  // made with callWithABI should be organized with 6 steps: spilling live
    685  // registers, aligning the stack, listing arguments of the called function,
    686  // calling a function pointer, extracting the returned value and restoring
    687  // live registers.
    688  //
    689  // A more detailed example of the six stages:
    690  //
    691  // 1) Saving of registers that are live. This will vary depending on which
    692  //    SpiderMonkey compiler you are working on. Registers that shouldn't be
    693  //    restored can be excluded.
    694  //
    695  //      LiveRegisterSet volatileRegs(...);
    696  //      volatileRegs.take(scratch);
    697  //      masm.PushRegsInMask(volatileRegs);
    698  //
    699  // 2) Align the stack to perform the call with the correct stack alignment.
    700  //
    701  //    When the stack pointer alignment is unknown and cannot be corrected
    702  //    when generating the code, setupUnalignedABICall must be used to
    703  //    dynamically align the stack pointer to the expectation of the ABI.
    704  //    When the stack pointer is known at JIT compilation time, the stack can
    705  //    be fixed manually and setupAlignedABICall and setupWasmABICall can be
    706  //    used.
    707  //
    708  //    setupWasmABICall is a special case of setupAlignedABICall as
    709  //    SpiderMonkey's WebAssembly implementation mostly follow the system
    710  //    ABI, except for float/double arguments, which always use floating
    711  //    point registers, even if this is not supported by the system ABI.
    712  //
    713  //      masm.setupUnalignedABICall(scratch);
    714  //
    715  // 3) Passing arguments. Arguments are passed left-to-right.
    716  //
    717  //      masm.passABIArg(scratch);
    718  //      masm.passABIArg(FloatOp0, ABIType::Float64);
    719  //
    720  //    Note how float register arguments are annotated with ABIType::Float64.
    721  //
    722  //    Concerning stack-relative address, see the note on passABIArg.
    723  //
    724  // 4) Make the call:
    725  //
    726  //      using Fn = int32_t (*)(int32_t)
    727  //      masm.callWithABI<Fn, Callee>();
    728  //
    729  //    In the case where the call returns a double, that needs to be
    730  //    indicated to the callWithABI like this:
    731  //
    732  //      using Fn = double (*)(int32_t)
    733  //      masm.callWithABI<Fn, Callee>(ABIType::Float64);
    734  //
    735  //    There are overloads to allow calls to registers and addresses.
    736  //
    737  // 5) Take care of the result
    738  //
    739  //      masm.storeCallPointerResult(scratch1);
    740  //      masm.storeCallBoolResult(scratch1);
    741  //      masm.storeCallInt32Result(scratch1);
    742  //      masm.storeCallFloatResult(scratch1);
    743  //
    744  // 6) Restore the potentially clobbered volatile registers
    745  //
    746  //      masm.PopRegsInMask(volatileRegs);
    747  //
    748  //    If expecting a returned value, this call should use
    749  //    PopRegsInMaskIgnore to filter out the registers which are containing
    750  //    the returned value.
    751  //
    752  // Unless an exit frame is pushed prior to the setupABICall, the callee
    753  // should not GC. To ensure this is the case callWithABI is instrumented to
    754  // make sure that in the default case callees are annotated with an
    755  // AutoUnsafeCallWithABI on the stack.
    756  //
    757  // A callWithABI can opt out of checking, if for example it is known there
    758  // is an exit frame, or the callee is known not to GC.
    759  //
    760  // If your callee needs to be able to GC, consider using a VMFunction, or
    761  // create a fake exit frame, and instrument the TraceJitExitFrame
    762  // accordingly.
    763 
    764  // Setup a call to C/C++ code, given the assumption that the framePushed
    765  // accurately defines the state of the stack, and that the top of the stack
    766  // was properly aligned. Note that this only supports cdecl.
    767  //
    768  // As a rule of thumb, this can be used in CodeGenerator but not in CacheIR or
    769  // Baseline code (because the stack is not aligned to ABIStackAlignment).
    770  void setupAlignedABICall();
    771 
    772  // As setupAlignedABICall, but for WebAssembly native ABI calls, which pass
    773  // through a builtin thunk that uses the wasm ABI. All the wasm ABI calls
    774  // can be native, since we always know the stack alignment a priori.
    775  void setupWasmABICall(wasm::SymbolicAddress builtin);
    776 
    777  // Setup an ABI call for when the alignment is not known. This may need a
    778  // scratch register.
    779  void setupUnalignedABICall(Register scratch) PER_ARCH;
    780 
    781  // Like setupUnalignedABICall, but more efficient because it doesn't push/pop
    782  // the unaligned stack pointer. The caller is responsible for restoring SP
    783  // after the callWithABI, for example using the frame pointer register.
    784  void setupUnalignedABICallDontSaveRestoreSP();
    785 
    786  // Arguments must be assigned to a C/C++ call in order. They are moved
    787  // in parallel immediately before performing the call. This process may
    788  // temporarily use more stack, in which case esp-relative addresses will be
    789  // automatically adjusted. It is extremely important that esp-relative
    790  // addresses are computed *after* setupABICall(). Furthermore, no
    791  // operations should be emitted while setting arguments.
    792  void passABIArg(const MoveOperand& from, ABIType type);
    793  inline void passABIArg(Register reg);
    794  void passABIArg(Register64 reg);
    795  inline void passABIArg(FloatRegister reg, ABIType type);
    796 
    797  inline void callWithABI(
    798      DynFn fun, ABIType result = ABIType::General,
    799      CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check);
    800  template <typename Sig, Sig fun>
    801  inline void callWithABI(
    802      ABIType result = ABIType::General,
    803      CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check);
    804  inline void callWithABI(Register fun, ABIType result = ABIType::General);
    805  inline void callWithABI(const Address& fun,
    806                          ABIType result = ABIType::General);
    807 
    808  CodeOffset callWithABI(wasm::BytecodeOffset offset, wasm::SymbolicAddress fun,
    809                         mozilla::Maybe<int32_t> instanceOffset,
    810                         ABIType result = ABIType::General);
    811  void callDebugWithABI(wasm::SymbolicAddress fun,
    812                        ABIType result = ABIType::General);
    813 
    814 private:
    815  // Reinitialize the variables which have to be cleared before making a call
    816  // with callWithABI.
    817  void setupABICallHelper(ABIKind kind);
    818 
    819  // Reinitialize the variables which have to be cleared before making a call
    820  // with native abi.
    821  void setupNativeABICall();
    822 
    823  // Reserve the stack and resolve the arguments move.
    824  void callWithABIPre(uint32_t* stackAdjust,
    825                      bool callFromWasm = false) PER_ARCH;
    826 
    827  // Emits a call to a C/C++ function, resolving all argument moves.
    828  void callWithABINoProfiler(void* fun, ABIType result,
    829                             CheckUnsafeCallWithABI check);
    830  void callWithABINoProfiler(Register fun, ABIType result) PER_ARCH;
    831  void callWithABINoProfiler(const Address& fun, ABIType result) PER_ARCH;
    832 
    833  // Restore the stack to its state before the setup function call.
    834  void callWithABIPost(uint32_t stackAdjust, ABIType result) PER_ARCH;
    835 
    836 #ifdef JS_CHECK_UNSAFE_CALL_WITH_ABI
    837  // Set the JSContext::inUnsafeCallWithABI flag using InstanceReg.
    838  void wasmCheckUnsafeCallWithABIPre();
    839  // Check JSContext::inUnsafeCallWithABI was cleared as expected.
    840  void wasmCheckUnsafeCallWithABIPost();
    841 #endif
    842 
    843  // Create the signature to be able to decode the arguments of a native
    844  // function, when calling a function within the simulator.
    845  inline void appendSignatureType(ABIType type);
    846  inline ABIFunctionType signature() const;
    847 
    848  // Private variables used to handle moves between registers given as
    849  // arguments to passABIArg and the list of ABI registers expected for the
    850  // signature of the function.
    851  MoveResolver moveResolver_;
    852 
    853  // Architecture specific implementation which specify how registers & stack
    854  // offsets are used for calling a function.
    855  ABIArgGenerator abiArgs_;
    856 
    857 #ifdef DEBUG
    858  // Flag use to assert that we use ABI function in the right context.
    859  bool inCall_;
    860 #endif
    861 
    862  // If set by setupUnalignedABICall then callWithABI will pop the stack
    863  // register which is on the stack.
    864  bool dynamicAlignment_;
    865 
    866 #ifdef JS_SIMULATOR
    867  // The signature is used to accumulate all types of arguments which are used
    868  // by the caller. This is used by the simulators to decode the arguments
    869  // properly, and cast the function pointer to the right type.
    870  uint32_t signature_;
    871 #endif
    872 
    873 public:
    874  // ===============================================================
    875  // Jit Frames.
    876  //
    877  // These functions are used to build the content of the Jit frames.  See
    878  // CommonFrameLayout class, and all its derivatives. The content should be
    879  // pushed in the opposite order as the fields of the structures, such that
    880  // the structures can be used to interpret the content of the stack.
    881 
    882  // Call the Jit function, and push the return address (or let the callee
    883  // push the return address).
    884  //
    885  // These functions return the offset of the return address, in order to use
    886  // the return address to index the safepoints, which are used to list all
    887  // live registers.
    888  inline uint32_t callJitNoProfiler(Register callee);
    889  inline uint32_t callJit(Register callee);
    890  inline uint32_t callJit(JitCode* code);
    891  inline uint32_t callJit(TrampolinePtr code);
    892  inline uint32_t callJit(ImmPtr callee);
    893 
    894  // The frame descriptor is the second field of all Jit frames, pushed before
    895  // calling the Jit function. See CommonFrameLayout::descriptor_.
    896  inline void push(FrameDescriptor descriptor);
    897  inline void Push(FrameDescriptor descriptor);
    898 
    899  // For JitFrameLayout, the descriptor also stores the number of arguments
    900  // passed by the caller. See MakeFrameDescriptorForJitCall.
    901  inline void pushFrameDescriptorForJitCall(FrameType type, Register argc,
    902                                            Register scratch,
    903                                            bool hasInlineICScript = false);
    904  inline void PushFrameDescriptorForJitCall(FrameType type, Register argc,
    905                                            Register scratch,
    906                                            bool hasInlineICScript = false);
    907  inline void makeFrameDescriptorForJitCall(FrameType type, Register argc,
    908                                            Register dest,
    909                                            bool hasInlineICScript = false);
    910 
    911  // Load the number of actual arguments from the frame's JitFrameLayout.
    912  inline void loadNumActualArgs(Register framePtr, Register dest);
    913 
    914  // Push the callee token of a JSFunction which pointer is stored in the
    915  // |callee| register. The callee token is packed with a |constructing| flag
    916  // which correspond to the fact that the JS function is called with "new" or
    917  // not.
    918  inline void PushCalleeToken(Register callee, bool constructing);
    919 
    920  // Unpack a callee token located at the |token| address, and return the
    921  // JSFunction pointer in the |dest| register.
    922  inline void loadFunctionFromCalleeToken(Address token, Register dest);
    923 
    924  // This function emulates a call by pushing an exit frame on the stack,
    925  // except that the fake-function is inlined within the body of the caller.
    926  //
    927  // This function assumes that the current frame is an IonJS frame.
    928  //
    929  // This function returns the offset of the /fake/ return address, in order to
    930  // use the return address to index the safepoints, which are used to list all
    931  // live registers.
    932  //
    933  // This function should be balanced with a call to adjustStack, to pop the
    934  // exit frame and emulate the return statement of the inlined function.
    935  inline uint32_t buildFakeExitFrame(Register scratch);
    936 
    937 private:
    938  // This function is used by buildFakeExitFrame to push a fake return address
    939  // on the stack. This fake return address should never be used for resuming
    940  // any execution, and can even be an invalid pointer into the instruction
    941  // stream, as long as it does not alias any other.
    942  uint32_t pushFakeReturnAddress(Register scratch) PER_SHARED_ARCH;
    943 
    944 public:
    945  // ===============================================================
    946  // Exit frame footer.
    947  //
    948  // When calling outside the Jit we push an exit frame. To mark the stack
    949  // correctly, we have to push additional information, called the Exit frame
    950  // footer, which is used to identify how the stack is marked.
    951  //
    952  // See JitFrames.h, and TraceJitExitFrame in JitFrames.cpp.
    953 
    954  // Links the exit frame and pushes the ExitFooterFrame.
    955  inline void enterExitFrame(Register cxreg, Register scratch, VMFunctionId f);
    956 
    957  // Push an exit frame token to identify which fake exit frame this footer
    958  // corresponds to.
    959  inline void enterFakeExitFrame(Register cxreg, Register scratch,
    960                                 ExitFrameType type);
    961 
    962  // Push an exit frame token for a native call.
    963  inline void enterFakeExitFrameForNative(Register cxreg, Register scratch,
    964                                          bool isConstructing);
    965 
    966  // Pop ExitFrame footer in addition to the extra frame.
    967  inline void leaveExitFrame(size_t extraFrame = 0);
    968 
    969 private:
    970  // Save the top of the stack into JitActivation::packedExitFP of the
    971  // current thread, which should be the location of the latest exit frame.
    972  void linkExitFrame(Register cxreg, Register scratch);
    973 
    974 public:
    975  // ===============================================================
    976  // Move instructions
    977 
    978  inline void move64(Imm64 imm, Register64 dest) PER_ARCH;
    979  inline void move64(Register64 src, Register64 dest) PER_ARCH;
    980 
    981  inline void moveFloat16ToGPR(FloatRegister src,
    982                               Register dest) PER_SHARED_ARCH;
    983  // Clears the high words of `src`.
    984  inline void moveGPRToFloat16(Register src,
    985                               FloatRegister dest) PER_SHARED_ARCH;
    986 
    987  inline void moveFloat32ToGPR(FloatRegister src,
    988                               Register dest) PER_SHARED_ARCH;
    989  inline void moveGPRToFloat32(Register src,
    990                               FloatRegister dest) PER_SHARED_ARCH;
    991 
    992  inline void moveDoubleToGPR64(FloatRegister src, Register64 dest) PER_ARCH;
    993  inline void moveGPR64ToDouble(Register64 src, FloatRegister dest) PER_ARCH;
    994 
    995  // Move the low 32-bits of a double.
    996  inline void moveLowDoubleToGPR(FloatRegister src,
    997                                 Register dest) PER_SHARED_ARCH;
    998 
    999  inline void move8ZeroExtend(Register src, Register dest) PER_SHARED_ARCH;
   1000 
   1001  inline void move8SignExtend(Register src, Register dest) PER_SHARED_ARCH;
   1002  inline void move16SignExtend(Register src, Register dest) PER_SHARED_ARCH;
   1003 
   1004  // move64To32 will clear the high bits of `dest` on 64-bit systems.
   1005  inline void move64To32(Register64 src, Register dest) PER_ARCH;
   1006 
   1007  inline void move32To64ZeroExtend(Register src, Register64 dest) PER_ARCH;
   1008 
   1009  inline void move8To64SignExtend(Register src, Register64 dest) PER_ARCH;
   1010  inline void move16To64SignExtend(Register src, Register64 dest) PER_ARCH;
   1011  inline void move32To64SignExtend(Register src, Register64 dest) PER_ARCH;
   1012 
   1013  inline void move8SignExtendToPtr(Register src, Register dest) PER_ARCH;
   1014  inline void move16SignExtendToPtr(Register src, Register dest) PER_ARCH;
   1015  inline void move32SignExtendToPtr(Register src, Register dest) PER_ARCH;
   1016 
   1017  inline void move32ZeroExtendToPtr(Register src, Register dest) PER_ARCH;
   1018 
   1019  // Copy a constant, typed-register, or a ValueOperand into a ValueOperand
   1020  // destination.
   1021  inline void moveValue(const ConstantOrRegister& src,
   1022                        const ValueOperand& dest);
   1023  void moveValue(const TypedOrValueRegister& src, const ValueOperand& dest);
   1024  void moveValue(const ValueOperand& src, const ValueOperand& dest) PER_ARCH;
   1025  void moveValue(const Value& src, const ValueOperand& dest) PER_ARCH;
   1026 
   1027  void movePropertyKey(PropertyKey key, Register dest);
   1028 
   1029  // ===============================================================
   1030  // Load instructions
   1031 
   1032  inline void load32SignExtendToPtr(const Address& src, Register dest) PER_ARCH;
   1033 
   1034  inline void loadAbiReturnAddress(Register dest) PER_SHARED_ARCH;
   1035 
   1036  // ===============================================================
   1037  // Copy instructions
   1038 
   1039  inline void copy64(const Address& src, const Address& dest, Register scratch);
   1040 
   1041 public:
   1042  // ===============================================================
   1043  // Logical instructions
   1044 
   1045  inline void not32(Register reg) PER_SHARED_ARCH;
   1046  inline void notPtr(Register reg) PER_ARCH;
   1047 
   1048  inline void and32(Register src, Register dest) PER_SHARED_ARCH;
   1049  inline void and32(Imm32 imm, Register dest) PER_SHARED_ARCH;
   1050  inline void and32(Imm32 imm, Register src, Register dest) PER_SHARED_ARCH;
   1051  inline void and32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
   1052  inline void and32(const Address& src, Register dest) PER_SHARED_ARCH;
   1053 
   1054  inline void andPtr(Register src, Register dest) PER_ARCH;
   1055  inline void andPtr(Imm32 imm, Register dest) PER_ARCH;
   1056  inline void andPtr(Imm32 imm, Register src, Register dest) PER_ARCH;
   1057 
   1058  inline void and64(Imm64 imm, Register64 dest) PER_ARCH;
   1059  inline void or64(Imm64 imm, Register64 dest) PER_ARCH;
   1060  inline void xor64(Imm64 imm, Register64 dest) PER_ARCH;
   1061 
   1062  inline void or32(Register src, Register dest) PER_SHARED_ARCH;
   1063  inline void or32(Imm32 imm, Register dest) PER_SHARED_ARCH;
   1064  inline void or32(Imm32 imm, Register src, Register dest) PER_SHARED_ARCH;
   1065  inline void or32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
   1066 
   1067  inline void orPtr(Register src, Register dest) PER_ARCH;
   1068  inline void orPtr(Imm32 imm, Register dest) PER_ARCH;
   1069  inline void orPtr(Imm32 imm, Register src, Register dest) PER_ARCH;
   1070 
   1071  inline void and64(Register64 src, Register64 dest) PER_ARCH;
   1072  inline void or64(Register64 src, Register64 dest) PER_ARCH;
   1073  inline void xor64(Register64 src, Register64 dest) PER_ARCH;
   1074 
   1075  inline void xor32(Register src, Register dest) PER_SHARED_ARCH;
   1076  inline void xor32(Imm32 imm, Register dest) PER_SHARED_ARCH;
   1077  inline void xor32(Imm32 imm, Register src, Register dest) PER_SHARED_ARCH;
   1078  inline void xor32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
   1079  inline void xor32(const Address& src, Register dest) PER_SHARED_ARCH;
   1080 
   1081  inline void xorPtr(Register src, Register dest) PER_ARCH;
   1082  inline void xorPtr(Imm32 imm, Register dest) PER_ARCH;
   1083  inline void xorPtr(Imm32 imm, Register src, Register dest) PER_ARCH;
   1084 
   1085  inline void and64(const Operand& src, Register64 dest) DEFINED_ON(x64);
   1086  inline void or64(const Operand& src, Register64 dest) DEFINED_ON(x64);
   1087  inline void xor64(const Operand& src, Register64 dest) DEFINED_ON(x64);
   1088 
   1089  // ===============================================================
   1090  // Swap instructions
   1091 
   1092  // Swap the two lower bytes and sign extend the result to 32-bit.
   1093  inline void byteSwap16SignExtend(Register reg) PER_SHARED_ARCH;
   1094 
   1095  // Swap the two lower bytes and zero extend the result to 32-bit.
   1096  inline void byteSwap16ZeroExtend(Register reg) PER_SHARED_ARCH;
   1097 
   1098  // Swap all four bytes in a 32-bit integer.
   1099  inline void byteSwap32(Register reg) PER_SHARED_ARCH;
   1100 
   1101  // Swap all eight bytes in a 64-bit integer.
   1102  inline void byteSwap64(Register64 reg) PER_ARCH;
   1103 
   1104  // ===============================================================
   1105  // Arithmetic functions
   1106 
   1107  // Condition flags aren't guaranteed to be set by these functions, for example
   1108  // x86 will always set condition flags, but ARM64 won't do it unless
   1109  // explicitly requested. Instead use branch(Add|Sub|Mul|Neg) to test for
   1110  // condition flags after performing arithmetic operations.
   1111 
   1112  inline void add32(const Address& src, Register dest) PER_SHARED_ARCH;
   1113  inline void add32(Register src, Register dest) PER_SHARED_ARCH;
   1114  inline void add32(Imm32 imm, Register dest) PER_SHARED_ARCH;
   1115  inline void add32(Imm32 imm, Register src, Register dest) PER_SHARED_ARCH;
   1116  inline void add32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
   1117  inline void add32(Imm32 imm, const AbsoluteAddress& dest)
   1118      DEFINED_ON(x86_shared);
   1119 
   1120  inline void addPtr(Register src, Register dest) PER_ARCH;
   1121  inline void addPtr(Register src1, Register src2, Register dest)
   1122      DEFINED_ON(arm64);
   1123  inline void addPtr(Imm32 imm, Register dest) PER_ARCH;
   1124  inline void addPtr(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
   1125  inline void addPtr(ImmWord imm, Register dest) PER_ARCH;
   1126  inline void addPtr(ImmPtr imm, Register dest);
   1127  inline void addPtr(Imm32 imm, const Address& dest) PER_ARCH;
   1128  inline void addPtr(Imm32 imm, const AbsoluteAddress& dest)
   1129      DEFINED_ON(x86, x64);
   1130  inline void addPtr(const Address& src, Register dest) PER_ARCH;
   1131 
   1132  inline void add64(Register64 src, Register64 dest) PER_ARCH;
   1133  inline void add64(Imm32 imm, Register64 dest) PER_ARCH;
   1134  inline void add64(Imm64 imm, Register64 dest) PER_ARCH;
   1135  inline void add64(const Operand& src, Register64 dest) DEFINED_ON(x64);
   1136 
   1137  inline void addFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
   1138 
   1139  // Compute dest=SP-imm where dest is a pointer registers and not SP.  The
   1140  // offset returned from sub32FromStackPtrWithPatch() must be passed to
   1141  // patchSub32FromStackPtr().
   1142  inline CodeOffset sub32FromStackPtrWithPatch(Register dest) PER_ARCH;
   1143  inline void patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) PER_ARCH;
   1144 
   1145  inline void addDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
   1146  inline void addConstantDouble(double d, FloatRegister dest) DEFINED_ON(x86);
   1147 
   1148  inline void sub32(const Address& src, Register dest) PER_SHARED_ARCH;
   1149  inline void sub32(Register src, Register dest) PER_SHARED_ARCH;
   1150  inline void sub32(Imm32 imm, Register dest) PER_SHARED_ARCH;
   1151 
   1152  inline void subPtr(Register src, Register dest) PER_ARCH;
   1153  inline void subPtr(Register src, const Address& dest) PER_ARCH;
   1154  inline void subPtr(Imm32 imm, Register dest) PER_ARCH;
   1155  inline void subPtr(ImmWord imm, Register dest) DEFINED_ON(x86, x64);
   1156  inline void subPtr(const Address& addr, Register dest) PER_ARCH;
   1157 
   1158  inline void sub64(Register64 src, Register64 dest) PER_ARCH;
   1159  inline void sub64(Imm64 imm, Register64 dest) PER_ARCH;
   1160  inline void sub64(const Operand& src, Register64 dest) DEFINED_ON(x64);
   1161 
   1162  inline void subFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
   1163 
   1164  inline void subDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
   1165 
   1166  inline void mul32(Register rhs, Register srcDest) PER_SHARED_ARCH;
   1167  inline void mul32(Imm32 imm, Register srcDest) PER_SHARED_ARCH;
   1168 
   1169  inline void mul32(Register src1, Register src2, Register dest, Label* onOver)
   1170      DEFINED_ON(arm64);
   1171 
   1172  // Return the high word of the unsigned multiplication into |dest|.
   1173  inline void mulHighUnsigned32(Imm32 imm, Register src,
   1174                                Register dest) PER_ARCH;
   1175 
   1176  inline void mulPtr(Register rhs, Register srcDest) PER_ARCH;
   1177  inline void mulPtr(ImmWord rhs, Register srcDest) PER_ARCH;
   1178 
   1179  inline void mul64(const Operand& src, const Register64& dest) DEFINED_ON(x64);
   1180  inline void mul64(const Operand& src, const Register64& dest,
   1181                    const Register temp) DEFINED_ON(x64);
   1182  inline void mul64(Imm64 imm, const Register64& dest) PER_ARCH;
   1183  inline void mul64(Imm64 imm, const Register64& dest, const Register temp)
   1184      DEFINED_ON(x86, x64, arm, mips64, loong64, riscv64);
   1185  inline void mul64(const Register64& src, const Register64& dest,
   1186                    const Register temp) PER_ARCH;
   1187  inline void mul64(const Register64& src1, const Register64& src2,
   1188                    const Register64& dest) DEFINED_ON(arm64);
   1189  inline void mul64(Imm64 src1, const Register64& src2, const Register64& dest)
   1190      DEFINED_ON(arm64);
   1191 
   1192  inline void mulBy3(Register src, Register dest) PER_ARCH;
   1193 
   1194  inline void mulFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
   1195  inline void mulDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
   1196 
   1197  inline void mulDoublePtr(ImmPtr imm, Register temp,
   1198                           FloatRegister dest) PER_ARCH;
   1199 
   1200  // Perform an integer division, returning the integer part rounded toward
   1201  // zero. rhs must not be zero, and the division must not overflow.
   1202  //
   1203  // On ARM, the chip must have hardware division instructions.
   1204  inline void quotient32(Register lhs, Register rhs, Register dest,
   1205                         bool isUnsigned)
   1206      DEFINED_ON(mips64, arm, arm64, loong64, riscv64, wasm32);
   1207 
   1208  inline void quotient64(Register lhs, Register rhs, Register dest,
   1209                         bool isUnsigned)
   1210      DEFINED_ON(arm64, loong64, mips64, riscv64);
   1211 
   1212  // As above, but lhs and dest must be eax and tempEdx must be edx.
   1213  inline void quotient32(Register lhs, Register rhs, Register dest,
   1214                         Register tempEdx, bool isUnsigned)
   1215      DEFINED_ON(x86_shared);
   1216 
   1217  // Perform an integer division, returning the remainder part.
   1218  // rhs must not be zero, and the division must not overflow.
   1219  //
   1220  // On ARM, the chip must have hardware division instructions.
   1221  inline void remainder32(Register lhs, Register rhs, Register dest,
   1222                          bool isUnsigned)
   1223      DEFINED_ON(mips64, arm, arm64, loong64, riscv64, wasm32);
   1224 
   1225  inline void remainder64(Register lhs, Register rhs, Register dest,
   1226                          bool isUnsigned)
   1227      DEFINED_ON(arm64, loong64, mips64, riscv64);
   1228 
   1229  // As above, but lhs and dest must be eax and tempEdx must be edx.
   1230  inline void remainder32(Register lhs, Register rhs, Register dest,
   1231                          Register tempEdx, bool isUnsigned)
   1232      DEFINED_ON(x86_shared);
   1233 
   1234  // Perform an integer division, returning the integer part rounded toward
   1235  // zero. rhs must not be zero, and the division must not overflow.
   1236  //
   1237  // This variant preserves registers, and doesn't require hardware division
   1238  // instructions on ARM (will call out to a runtime routine).
   1239  void flexibleRemainder32(
   1240      Register lhs, Register rhs, Register dest, bool isUnsigned,
   1241      const LiveRegisterSet& volatileLiveRegs) PER_SHARED_ARCH;
   1242  void flexibleRemainderPtr(Register lhs, Register rhs, Register dest,
   1243                            bool isUnsigned,
   1244                            const LiveRegisterSet& volatileLiveRegs) PER_ARCH;
   1245 
   1246  // Perform an integer division, returning the integer part rounded toward
   1247  // zero. rhs must not be zero, and the division must not overflow.
   1248  //
   1249  // This variant preserves registers, and doesn't require hardware division
   1250  // instructions on ARM (will call out to a runtime routine).
   1251  void flexibleQuotient32(
   1252      Register lhs, Register rhs, Register dest, bool isUnsigned,
   1253      const LiveRegisterSet& volatileLiveRegs) PER_SHARED_ARCH;
   1254  void flexibleQuotientPtr(Register lhs, Register rhs, Register dest,
   1255                           bool isUnsigned,
   1256                           const LiveRegisterSet& volatileLiveRegs) PER_ARCH;
   1257 
   1258  // Perform an integer division, returning the integer part rounded toward
   1259  // zero in the third argument register. rhs must not be zero, and the division
   1260  // must not overflow. The remainder is stored into the fourth argument
   1261  // register here.
   1262  //
   1263  // This variant preserves registers, and doesn't require hardware division
   1264  // instructions on ARM (will call out to a runtime routine).
   1265  //
   1266  // lhs and rhs are preserved, divOutput and remOutput are clobbered.
   1267  void flexibleDivMod32(
   1268      Register lhs, Register rhs, Register divOutput, Register remOutput,
   1269      bool isUnsigned, const LiveRegisterSet& volatileLiveRegs) PER_SHARED_ARCH;
   1270 
   1271  inline void divFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
   1272  inline void divDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
   1273 
   1274  inline void inc64(AbsoluteAddress dest) PER_ARCH;
   1275 
   1276  inline void neg32(Register reg) PER_SHARED_ARCH;
   1277  inline void neg64(Register64 reg) PER_ARCH;
   1278  inline void negPtr(Register reg) PER_ARCH;
   1279 
   1280  inline void negateFloat(FloatRegister reg) PER_SHARED_ARCH;
   1281 
   1282  inline void negateDouble(FloatRegister reg) PER_SHARED_ARCH;
   1283 
   1284  inline void abs32(Register src, Register dest) PER_SHARED_ARCH;
   1285  inline void absFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
   1286  inline void absDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
   1287 
   1288  inline void sqrtFloat32(FloatRegister src,
   1289                          FloatRegister dest) PER_SHARED_ARCH;
   1290  inline void sqrtDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
   1291 
   1292  void floorFloat32ToInt32(FloatRegister src, Register dest,
   1293                           Label* fail) PER_SHARED_ARCH;
   1294  void floorDoubleToInt32(FloatRegister src, Register dest,
   1295                          Label* fail) PER_SHARED_ARCH;
   1296 
   1297  void ceilFloat32ToInt32(FloatRegister src, Register dest,
   1298                          Label* fail) PER_SHARED_ARCH;
   1299  void ceilDoubleToInt32(FloatRegister src, Register dest,
   1300                         Label* fail) PER_SHARED_ARCH;
   1301 
   1302  void roundFloat32ToInt32(FloatRegister src, Register dest, FloatRegister temp,
   1303                           Label* fail) PER_SHARED_ARCH;
   1304  void roundDoubleToInt32(FloatRegister src, Register dest, FloatRegister temp,
   1305                          Label* fail) PER_SHARED_ARCH;
   1306 
   1307  void truncFloat32ToInt32(FloatRegister src, Register dest,
   1308                           Label* fail) PER_SHARED_ARCH;
   1309  void truncDoubleToInt32(FloatRegister src, Register dest,
   1310                          Label* fail) PER_SHARED_ARCH;
   1311 
   1312  void nearbyIntDouble(RoundingMode mode, FloatRegister src,
   1313                       FloatRegister dest) PER_SHARED_ARCH;
   1314  void nearbyIntFloat32(RoundingMode mode, FloatRegister src,
   1315                        FloatRegister dest) PER_SHARED_ARCH;
   1316 
   1317  void signInt32(Register input, Register output);
   1318  void signDouble(FloatRegister input, FloatRegister output);
   1319  void signDoubleToInt32(FloatRegister input, Register output,
   1320                         FloatRegister temp, Label* fail);
   1321 
   1322  void copySignDouble(FloatRegister lhs, FloatRegister rhs,
   1323                      FloatRegister output) PER_SHARED_ARCH;
   1324  void copySignFloat32(FloatRegister lhs, FloatRegister rhs,
   1325                       FloatRegister output) PER_SHARED_ARCH;
   1326 
   1327  // Returns a random double in range [0, 1) in |dest|. The |rng| register must
   1328  // hold a pointer to a mozilla::non_crypto::XorShift128PlusRNG.
   1329  void randomDouble(Register rng, FloatRegister dest, Register64 temp0,
   1330                    Register64 temp1);
   1331 
   1332  inline void min32(Register lhs, Register rhs, Register result) PER_ARCH;
   1333  inline void min32(Register lhs, Imm32 rhs, Register result) PER_ARCH;
   1334 
   1335  inline void max32(Register lhs, Register rhs, Register result) PER_ARCH;
   1336  inline void max32(Register lhs, Imm32 rhs, Register result) PER_ARCH;
   1337 
   1338  inline void minPtr(Register lhs, Register rhs, Register result) PER_ARCH;
   1339  inline void minPtr(Register lhs, ImmWord rhs, Register result) PER_ARCH;
   1340 
   1341  inline void maxPtr(Register lhs, Register rhs, Register result) PER_ARCH;
   1342  inline void maxPtr(Register lhs, ImmWord rhs, Register result) PER_ARCH;
   1343 
   1344  // srcDest = {min,max}{Float32,Double}(srcDest, other)
   1345  // For min and max, handle NaN specially if handleNaN is true.
   1346 
   1347  inline void minFloat32(FloatRegister other, FloatRegister srcDest,
   1348                         bool handleNaN) PER_SHARED_ARCH;
   1349  inline void minDouble(FloatRegister other, FloatRegister srcDest,
   1350                        bool handleNaN) PER_SHARED_ARCH;
   1351 
   1352  inline void maxFloat32(FloatRegister other, FloatRegister srcDest,
   1353                         bool handleNaN) PER_SHARED_ARCH;
   1354  inline void maxDouble(FloatRegister other, FloatRegister srcDest,
   1355                        bool handleNaN) PER_SHARED_ARCH;
   1356 
   1357  void minMaxArrayInt32(Register array, Register result, Register temp1,
   1358                        Register temp2, Register temp3, bool isMax,
   1359                        Label* fail);
   1360  void minMaxArrayNumber(Register array, FloatRegister result,
   1361                         FloatRegister floatTemp, Register temp1,
   1362                         Register temp2, bool isMax, Label* fail);
   1363 
   1364  // Compute |pow(base, power)| and store the result in |dest|. If the result
   1365  // exceeds the int32 range, jumps to |onOver|.
   1366  // |base| and |power| are preserved, the other input registers are clobbered.
   1367  void pow32(Register base, Register power, Register dest, Register temp1,
   1368             Register temp2, Label* onOver);
   1369  void powPtr(Register base, Register power, Register dest, Register temp1,
   1370              Register temp2, Label* onOver);
   1371 
   1372  // Inline implementation of Math.round.
   1373  void roundFloat32(FloatRegister src, FloatRegister dest);
   1374  void roundDouble(FloatRegister src, FloatRegister dest);
   1375 
   1376  void sameValueDouble(FloatRegister left, FloatRegister right,
   1377                       FloatRegister temp, Register dest);
   1378 
   1379  void loadRegExpLastIndex(Register regexp, Register string, Register lastIndex,
   1380                           Label* notFoundZeroLastIndex);
   1381 
   1382  void loadAndClearRegExpSearcherLastLimit(Register result, Register scratch);
   1383 
   1384  void loadParsedRegExpShared(Register regexp, Register result,
   1385                              Label* unparsed);
   1386 
   1387  // ===============================================================
   1388  // Shift functions
   1389 
   1390  // For shift-by-register there may be platform-specific variations, for
   1391  // example, x86 will perform the shift mod 32 but ARM will perform the shift
   1392  // mod 256.
   1393  //
   1394  // For shift-by-immediate the platform assembler may restrict the immediate,
   1395  // for example, the ARM assembler requires the count for 32-bit shifts to be
   1396  // in the range [0,31].
   1397 
   1398  inline void lshift32(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
   1399  inline void lshift32(Imm32 shift, Register src,
   1400                       Register dest) PER_SHARED_ARCH;
   1401  inline void rshift32(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
   1402  inline void rshift32(Imm32 shift, Register src,
   1403                       Register dest) PER_SHARED_ARCH;
   1404  inline void rshift32Arithmetic(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
   1405  inline void rshift32Arithmetic(Imm32 shift, Register src,
   1406                                 Register dest) PER_SHARED_ARCH;
   1407 
   1408  inline void lshiftPtr(Imm32 imm, Register dest) PER_ARCH;
   1409  inline void lshiftPtr(Imm32 imm, Register src, Register dest) PER_ARCH;
   1410  inline void rshiftPtr(Imm32 imm, Register dest) PER_ARCH;
   1411  inline void rshiftPtr(Imm32 imm, Register src, Register dest) PER_ARCH;
   1412  inline void rshiftPtrArithmetic(Imm32 imm, Register dest) PER_ARCH;
   1413  inline void rshiftPtrArithmetic(Imm32 imm, Register src,
   1414                                  Register dest) PER_ARCH;
   1415 
   1416  inline void lshift64(Imm32 imm, Register64 dest) PER_ARCH;
   1417  inline void rshift64(Imm32 imm, Register64 dest) PER_ARCH;
   1418  inline void rshift64Arithmetic(Imm32 imm, Register64 dest) PER_ARCH;
   1419 
   1420  // On x86_shared these have the constraint that shift must be in CL.
   1421  inline void lshift32(Register shift, Register srcDest) PER_SHARED_ARCH;
   1422  inline void rshift32(Register shift, Register srcDest) PER_SHARED_ARCH;
   1423  inline void rshift32Arithmetic(Register shift,
   1424                                 Register srcDest) PER_SHARED_ARCH;
   1425  inline void lshiftPtr(Register shift, Register srcDest) PER_ARCH;
   1426  inline void rshiftPtr(Register shift, Register srcDest) PER_ARCH;
   1427  inline void rshiftPtrArithmetic(Register shift, Register srcDest) PER_ARCH;
   1428 
   1429  // These variants do not have the above constraint, but may emit some extra
   1430  // instructions on x86_shared. They also handle shift >= 32 consistently by
   1431  // masking with 0x1F (either explicitly or relying on the hardware to do
   1432  // that).
   1433  inline void flexibleLshift32(Register shift,
   1434                               Register srcDest) PER_SHARED_ARCH;
   1435  inline void flexibleRshift32(Register shift,
   1436                               Register srcDest) PER_SHARED_ARCH;
   1437  inline void flexibleRshift32Arithmetic(Register shift,
   1438                                         Register srcDest) PER_SHARED_ARCH;
   1439  inline void flexibleLshiftPtr(Register shift, Register srcDest) PER_ARCH;
   1440  inline void flexibleRshiftPtr(Register shift, Register srcDest) PER_ARCH;
   1441  inline void flexibleRshiftPtrArithmetic(Register shift,
   1442                                          Register srcDest) PER_ARCH;
   1443 
   1444  inline void lshift64(Register shift, Register64 srcDest) PER_ARCH;
   1445  inline void rshift64(Register shift, Register64 srcDest) PER_ARCH;
   1446  inline void rshift64Arithmetic(Register shift, Register64 srcDest) PER_ARCH;
   1447 
   1448  // ===============================================================
   1449  // Rotation functions
   1450  // Note: - on x86 and x64 the count register must be in CL.
   1451  //       - on x64 the temp register should be InvalidReg.
   1452 
   1453  inline void rotateLeft(Imm32 count, Register input,
   1454                         Register dest) PER_SHARED_ARCH;
   1455  inline void rotateLeft(Register count, Register input,
   1456                         Register dest) PER_SHARED_ARCH;
   1457  inline void rotateLeft64(Imm32 count, Register64 input, Register64 dest)
   1458      DEFINED_ON(x64);
   1459  inline void rotateLeft64(Register count, Register64 input, Register64 dest)
   1460      DEFINED_ON(x64);
   1461  inline void rotateLeft64(Imm32 count, Register64 input, Register64 dest,
   1462                           Register temp) PER_ARCH;
   1463  inline void rotateLeft64(Register count, Register64 input, Register64 dest,
   1464                           Register temp) PER_ARCH;
   1465 
   1466  inline void rotateRight(Imm32 count, Register input,
   1467                          Register dest) PER_SHARED_ARCH;
   1468  inline void rotateRight(Register count, Register input,
   1469                          Register dest) PER_SHARED_ARCH;
   1470  inline void rotateRight64(Imm32 count, Register64 input, Register64 dest)
   1471      DEFINED_ON(x64);
   1472  inline void rotateRight64(Register count, Register64 input, Register64 dest)
   1473      DEFINED_ON(x64);
   1474  inline void rotateRight64(Imm32 count, Register64 input, Register64 dest,
   1475                            Register temp) PER_ARCH;
   1476  inline void rotateRight64(Register count, Register64 input, Register64 dest,
   1477                            Register temp) PER_ARCH;
   1478 
   1479  // ===============================================================
   1480  // Bit counting functions
   1481 
   1482  // knownNotZero may be true only if the src is known not to be zero.
   1483  inline void clz32(Register src, Register dest,
   1484                    bool knownNotZero) PER_SHARED_ARCH;
   1485  inline void ctz32(Register src, Register dest,
   1486                    bool knownNotZero) PER_SHARED_ARCH;
   1487 
   1488  inline void clz64(Register64 src, Register64 dest) PER_ARCH;
   1489  inline void ctz64(Register64 src, Register64 dest) PER_ARCH;
   1490 
   1491  // On x86_shared, temp may be Invalid only if the chip has the POPCNT
   1492  // instruction. On ARM, temp may never be Invalid.
   1493  inline void popcnt32(Register src, Register dest,
   1494                       Register temp) PER_SHARED_ARCH;
   1495 
   1496  // temp may be invalid only if the chip has the POPCNT instruction.
   1497  inline void popcnt64(Register64 src, Register64 dest, Register temp) PER_ARCH;
   1498 
   1499  // ===============================================================
   1500  // Condition functions
   1501 
   1502  inline void cmp8Set(Condition cond, Address lhs, Imm32 rhs,
   1503                      Register dest) PER_SHARED_ARCH;
   1504 
   1505  inline void cmp16Set(Condition cond, Address lhs, Imm32 rhs,
   1506                       Register dest) PER_SHARED_ARCH;
   1507 
   1508  template <typename T1, typename T2>
   1509  inline void cmp32Set(Condition cond, T1 lhs, T2 rhs,
   1510                       Register dest) PER_SHARED_ARCH;
   1511 
   1512  inline void cmp64Set(Condition cond, Register64 lhs, Register64 rhs,
   1513                       Register dest) PER_ARCH;
   1514 
   1515  inline void cmp64Set(Condition cond, Register64 lhs, Imm64 rhs,
   1516                       Register dest) PER_ARCH;
   1517 
   1518  inline void cmp64Set(Condition cond, Address lhs, Register64 rhs,
   1519                       Register dest) PER_ARCH;
   1520 
   1521  inline void cmp64Set(Condition cond, Address lhs, Imm64 rhs,
   1522                       Register dest) PER_ARCH;
   1523 
   1524  template <typename T1, typename T2>
   1525  inline void cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) PER_ARCH;
   1526 
   1527  // ===============================================================
   1528  // Branch functions
   1529 
   1530  inline void branch8(Condition cond, const Address& lhs, Imm32 rhs,
   1531                      Label* label) PER_SHARED_ARCH;
   1532 
   1533  // Compares the byte in |lhs| against |rhs| using a 8-bit comparison on
   1534  // x86/x64 or a 32-bit comparison (all other platforms). The caller should
   1535  // ensure |rhs| is a zero- resp. sign-extended byte value for cross-platform
   1536  // compatible code.
   1537  inline void branch8(Condition cond, const BaseIndex& lhs, Register rhs,
   1538                      Label* label) PER_SHARED_ARCH;
   1539 
   1540  inline void branch16(Condition cond, const Address& lhs, Imm32 rhs,
   1541                       Label* label) PER_SHARED_ARCH;
   1542 
   1543  inline void branch32(Condition cond, Register lhs, Register rhs,
   1544                       Label* label) PER_SHARED_ARCH;
   1545  inline void branch32(Condition cond, Register lhs, Imm32 rhs,
   1546                       Label* label) PER_SHARED_ARCH;
   1547 
   1548  inline void branch32(Condition cond, Register lhs, const Address& rhs,
   1549                       Label* label) DEFINED_ON(arm64);
   1550 
   1551  inline void branch32(Condition cond, const Address& lhs, Register rhs,
   1552                       Label* label) PER_SHARED_ARCH;
   1553  inline void branch32(Condition cond, const Address& lhs, Imm32 rhs,
   1554                       Label* label) PER_SHARED_ARCH;
   1555 
   1556  inline void branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs,
   1557                       Label* label) PER_ARCH;
   1558  inline void branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs,
   1559                       Label* label) PER_ARCH;
   1560 
   1561  inline void branch32(Condition cond, const BaseIndex& lhs, Register rhs,
   1562                       Label* label) DEFINED_ON(arm, x86_shared);
   1563  inline void branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
   1564                       Label* label) PER_SHARED_ARCH;
   1565 
   1566  inline void branch32(Condition cond, const Operand& lhs, Register rhs,
   1567                       Label* label) DEFINED_ON(x86_shared);
   1568  inline void branch32(Condition cond, const Operand& lhs, Imm32 rhs,
   1569                       Label* label) DEFINED_ON(x86_shared);
   1570 
   1571  inline void branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs,
   1572                       Label* label) PER_ARCH;
   1573 
   1574  // The supported condition are Equal, NotEqual, LessThan(orEqual),
   1575  // GreaterThan(orEqual), Below(orEqual) and Above(orEqual). When a fail label
   1576  // is not defined it will fall through to next instruction, else jump to the
   1577  // fail label.
   1578  inline void branch64(Condition cond, Register64 lhs, Imm64 val,
   1579                       Label* success, Label* fail = nullptr) PER_ARCH;
   1580  inline void branch64(Condition cond, Register64 lhs, Register64 rhs,
   1581                       Label* success, Label* fail = nullptr) PER_ARCH;
   1582  inline void branch64(Condition cond, const Address& lhs, Imm64 val,
   1583                       Label* success, Label* fail = nullptr) PER_ARCH;
   1584  inline void branch64(Condition cond, const Address& lhs, Register64 rhs,
   1585                       Label* success, Label* fail = nullptr) PER_ARCH;
   1586 
   1587  // Compare the value at |lhs| with the value at |rhs|.  The scratch
   1588  // register *must not* be the base of |lhs| or |rhs|.
   1589  // Only the NotEqual and Equal conditions are allowed.
   1590  inline void branch64(Condition cond, const Address& lhs, const Address& rhs,
   1591                       Register scratch, Label* label) PER_ARCH;
   1592 
   1593  inline void branchPtr(Condition cond, Register lhs, Register rhs,
   1594                        Label* label) PER_SHARED_ARCH;
   1595  inline void branchPtr(Condition cond, Register lhs, Imm32 rhs,
   1596                        Label* label) PER_SHARED_ARCH;
   1597  inline void branchPtr(Condition cond, Register lhs, ImmPtr rhs,
   1598                        Label* label) PER_SHARED_ARCH;
   1599  inline void branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
   1600                        Label* label) PER_SHARED_ARCH;
   1601  inline void branchPtr(Condition cond, Register lhs, ImmWord rhs,
   1602                        Label* label) PER_SHARED_ARCH;
   1603 
   1604  inline void branchPtr(Condition cond, const Address& lhs, Register rhs,
   1605                        Label* label) PER_SHARED_ARCH;
   1606  inline void branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
   1607                        Label* label) PER_SHARED_ARCH;
   1608  inline void branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
   1609                        Label* label) PER_SHARED_ARCH;
   1610  inline void branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
   1611                        Label* label) PER_SHARED_ARCH;
   1612 
   1613  inline void branchPtr(Condition cond, const BaseIndex& lhs, ImmWord rhs,
   1614                        Label* label) PER_SHARED_ARCH;
   1615  inline void branchPtr(Condition cond, const BaseIndex& lhs, Register rhs,
   1616                        Label* label) PER_SHARED_ARCH;
   1617 
   1618  inline void branchPtr(Condition cond, const AbsoluteAddress& lhs,
   1619                        Register rhs, Label* label) PER_ARCH;
   1620  inline void branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs,
   1621                        Label* label) PER_ARCH;
   1622 
   1623  inline void branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs,
   1624                        Label* label) PER_ARCH;
   1625 
   1626  // Given a pointer to a GC Cell, retrieve the StoreBuffer pointer from its
   1627  // chunk header, or nullptr if it is in the tenured heap.
   1628  void loadStoreBuffer(Register ptr, Register buffer) PER_ARCH;
   1629 
   1630  void branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp,
   1631                               Label* label) PER_ARCH;
   1632  void branchPtrInNurseryChunk(Condition cond, const Address& address,
   1633                               Register temp, Label* label) DEFINED_ON(x86);
   1634  void branchValueIsNurseryCell(Condition cond, const Address& address,
   1635                                Register temp, Label* label) PER_ARCH;
   1636  void branchValueIsNurseryCell(Condition cond, ValueOperand value,
   1637                                Register temp, Label* label) PER_ARCH;
   1638 
   1639  // This function compares a Value (lhs) which is having a private pointer
   1640  // boxed inside a js::Value, with a raw pointer (rhs).
   1641  inline void branchPrivatePtr(Condition cond, const Address& lhs, Register rhs,
   1642                               Label* label) PER_ARCH;
   1643 
   1644  inline void branchFloat(DoubleCondition cond, FloatRegister lhs,
   1645                          FloatRegister rhs, Label* label) PER_SHARED_ARCH;
   1646 
   1647  // Truncate a double/float32 to int32 and when it doesn't fit an int32 it will
   1648  // jump to the failure label. This particular variant is allowed to return the
   1649  // value module 2**32, which isn't implemented on all architectures.
   1650  inline void branchTruncateFloat32MaybeModUint32(FloatRegister src,
   1651                                                  Register dest,
   1652                                                  Label* fail) PER_ARCH;
   1653  inline void branchTruncateDoubleMaybeModUint32(FloatRegister src,
   1654                                                 Register dest,
   1655                                                 Label* fail) PER_ARCH;
   1656 
   1657  // Truncate a double/float32 to intptr and when it doesn't fit jump to the
   1658  // failure label.
   1659  inline void branchTruncateFloat32ToPtr(FloatRegister src, Register dest,
   1660                                         Label* fail) DEFINED_ON(x86, x64);
   1661  inline void branchTruncateDoubleToPtr(FloatRegister src, Register dest,
   1662                                        Label* fail) DEFINED_ON(x86, x64);
   1663 
   1664  // Truncate a double/float32 to int32 and when it doesn't fit jump to the
   1665  // failure label.
   1666  inline void branchTruncateFloat32ToInt32(FloatRegister src, Register dest,
   1667                                           Label* fail) PER_ARCH;
   1668  inline void branchTruncateDoubleToInt32(FloatRegister src, Register dest,
   1669                                          Label* fail) PER_ARCH;
   1670 
   1671  inline void branchDouble(DoubleCondition cond, FloatRegister lhs,
   1672                           FloatRegister rhs, Label* label) PER_SHARED_ARCH;
   1673 
   1674  inline void branchDoubleNotInInt64Range(Address src, Register temp,
   1675                                          Label* fail);
   1676  inline void branchDoubleNotInUInt64Range(Address src, Register temp,
   1677                                           Label* fail);
   1678  inline void branchFloat32NotInInt64Range(Address src, Register temp,
   1679                                           Label* fail);
   1680  inline void branchFloat32NotInUInt64Range(Address src, Register temp,
   1681                                            Label* fail);
   1682 
   1683  // Branch if the (un)signed int64 is outside the range of a signed intptr.
   1684  inline void branchInt64NotInPtrRange(Register64 src, Label* label) PER_ARCH;
   1685  inline void branchUInt64NotInPtrRange(Register64 src, Label* label) PER_ARCH;
   1686 
   1687  template <typename T>
   1688  inline void branchAdd32(Condition cond, T src, Register dest,
   1689                          Label* label) PER_SHARED_ARCH;
   1690  template <typename T>
   1691  inline void branchSub32(Condition cond, T src, Register dest,
   1692                          Label* label) PER_SHARED_ARCH;
   1693  template <typename T>
   1694  inline void branchMul32(Condition cond, T src, Register dest,
   1695                          Label* label) PER_SHARED_ARCH;
   1696  template <typename T>
   1697  inline void branchRshift32(Condition cond, T src, Register dest,
   1698                             Label* label) PER_SHARED_ARCH;
   1699 
   1700  inline void branchNeg32(Condition cond, Register reg,
   1701                          Label* label) PER_SHARED_ARCH;
   1702 
   1703  inline void branchAdd64(Condition cond, Imm64 imm, Register64 dest,
   1704                          Label* label) DEFINED_ON(x86, arm, wasm32);
   1705 
   1706  template <typename T>
   1707  inline void branchAddPtr(Condition cond, T src, Register dest,
   1708                           Label* label) PER_SHARED_ARCH;
   1709 
   1710  template <typename T>
   1711  inline void branchSubPtr(Condition cond, T src, Register dest,
   1712                           Label* label) PER_SHARED_ARCH;
   1713 
   1714  inline void branchMulPtr(Condition cond, Register src, Register dest,
   1715                           Label* label) PER_SHARED_ARCH;
   1716 
   1717  inline void branchNegPtr(Condition cond, Register reg,
   1718                           Label* label) PER_SHARED_ARCH;
   1719 
   1720  inline void decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
   1721                           Label* label) PER_SHARED_ARCH;
   1722 
   1723  inline void branchTest32(Condition cond, Register lhs, Register rhs,
   1724                           Label* label) PER_SHARED_ARCH;
   1725  inline void branchTest32(Condition cond, Register lhs, Imm32 rhs,
   1726                           Label* label) PER_SHARED_ARCH;
   1727  inline void branchTest32(Condition cond, const Address& lhs, Imm32 rhh,
   1728                           Label* label) PER_SHARED_ARCH;
   1729  inline void branchTest32(Condition cond, const AbsoluteAddress& lhs,
   1730                           Imm32 rhs, Label* label) PER_ARCH;
   1731 
   1732  inline void branchTestPtr(Condition cond, Register lhs, Register rhs,
   1733                            Label* label) PER_SHARED_ARCH;
   1734  inline void branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
   1735                            Label* label) PER_SHARED_ARCH;
   1736  inline void branchTestPtr(Condition cond, Register lhs, ImmWord rhs,
   1737                            Label* label) PER_ARCH;
   1738  inline void branchTestPtr(Condition cond, const Address& lhs, Imm32 rhs,
   1739                            Label* label) PER_SHARED_ARCH;
   1740 
   1741  // When a fail label is not defined it will fall through to next instruction,
   1742  // else jump to the fail label.
   1743  //
   1744  // On x86 if |lhs == rhs|, |temp| is used to generate a single branch
   1745  // instruction. Otherwise |temp| is unused and can be |InvalidReg|.
   1746  inline void branchTest64(Condition cond, Register64 lhs, Register64 rhs,
   1747                           Register temp, Label* success,
   1748                           Label* fail = nullptr) PER_ARCH;
   1749  inline void branchTest64(Condition cond, Register64 lhs, Register64 rhs,
   1750                           Label* success, Label* fail = nullptr);
   1751  inline void branchTest64(Condition cond, Register64 lhs, Imm64 rhs,
   1752                           Label* success, Label* fail = nullptr) PER_ARCH;
   1753 
   1754  // Branches to |label| if |reg| is false. |reg| should be a C++ bool.
   1755  inline void branchIfFalseBool(Register reg, Label* label);
   1756 
   1757  // Branches to |label| if |reg| is true. |reg| should be a C++ bool.
   1758  inline void branchIfTrueBool(Register reg, Label* label);
   1759 
   1760  inline void branchIfNotNullOrUndefined(ValueOperand val, Label* label);
   1761 
   1762  inline void branchIfRope(Register str, Label* label);
   1763  inline void branchIfNotRope(Register str, Label* label);
   1764 
   1765  inline void branchLatin1String(Register string, Label* label);
   1766  inline void branchTwoByteString(Register string, Label* label);
   1767 
   1768  inline void branchIfBigIntIsNegative(Register bigInt, Label* label);
   1769  inline void branchIfBigIntIsNonNegative(Register bigInt, Label* label);
   1770  inline void branchIfBigIntIsZero(Register bigInt, Label* label);
   1771  inline void branchIfBigIntIsNonZero(Register bigInt, Label* label);
   1772 
   1773  inline void branchTestFunctionFlags(Register fun, uint32_t flags,
   1774                                      Condition cond, Label* label);
   1775 
   1776  inline void branchIfNotFunctionIsNonBuiltinCtor(Register fun,
   1777                                                  Register scratch,
   1778                                                  Label* label);
   1779 
   1780  inline void branchIfFunctionHasNoJitEntry(Register fun, Label* label);
   1781  inline void branchIfFunctionHasJitEntry(Register fun, Label* label);
   1782 
   1783  inline void branchIfScriptHasJitScript(Register script, Label* label);
   1784  inline void branchIfScriptHasNoJitScript(Register script, Label* label);
   1785  inline void loadJitScript(Register script, Register dest);
   1786 
   1787  // Loads the function's argument count.
   1788  inline void loadFunctionArgCount(Register func, Register output);
   1789 
   1790  // Loads the function length. This handles interpreted, native, and bound
   1791  // functions. The caller is responsible for checking that INTERPRETED_LAZY and
   1792  // RESOLVED_LENGTH flags are not set.
   1793  void loadFunctionLength(Register func, Register funFlagsAndArgCount,
   1794                          Register output, Label* slowPath);
   1795 
   1796  // Loads the function name. This handles interpreted, native, and bound
   1797  // functions.
   1798  void loadFunctionName(Register func, Register output, ImmGCPtr emptyString,
   1799                        Label* slowPath);
   1800 
   1801  void assertFunctionIsExtended(Register func);
   1802 
   1803  inline void branchFunctionKind(Condition cond,
   1804                                 FunctionFlags::FunctionKind kind, Register fun,
   1805                                 Register scratch, Label* label);
   1806 
   1807  inline void branchIfObjectEmulatesUndefined(Register objReg, Register scratch,
   1808                                              Label* slowCheck, Label* label);
   1809 
   1810  // For all methods below: spectreRegToZero is a register that will be zeroed
   1811  // on speculatively executed code paths (when the branch should be taken but
   1812  // branch prediction speculates it isn't). Usually this will be the object
   1813  // register but the caller may pass a different register.
   1814 
   1815  inline void branchTestObjClass(Condition cond, Register obj,
   1816                                 const JSClass* clasp, Register scratch,
   1817                                 Register spectreRegToZero, Label* label);
   1818  inline void branchTestObjClassNoSpectreMitigations(Condition cond,
   1819                                                     Register obj,
   1820                                                     const JSClass* clasp,
   1821                                                     Register scratch,
   1822                                                     Label* label);
   1823 
   1824  inline void branchTestObjClass(Condition cond, Register obj,
   1825                                 const Address& clasp, Register scratch,
   1826                                 Register spectreRegToZero, Label* label);
   1827  inline void branchTestObjClassNoSpectreMitigations(Condition cond,
   1828                                                     Register obj,
   1829                                                     const Address& clasp,
   1830                                                     Register scratch,
   1831                                                     Label* label);
   1832 
   1833  inline void branchTestObjClass(Condition cond, Register obj, Register clasp,
   1834                                 Register scratch, Register spectreRegToZero,
   1835                                 Label* label);
   1836 
   1837  inline void branchTestObjShape(Condition cond, Register obj,
   1838                                 const Shape* shape, Register scratch,
   1839                                 Register spectreRegToZero, Label* label);
   1840  inline void branchTestObjShapeNoSpectreMitigations(Condition cond,
   1841                                                     Register obj,
   1842                                                     const Shape* shape,
   1843                                                     Label* label);
   1844 
   1845 private:
   1846  void branchTestObjShapeListImpl(Register obj, Register shapeElements,
   1847                                  size_t itemSize, Register shapeScratch,
   1848                                  Register endScratch, Register spectreScratch,
   1849                                  Label* fail);
   1850 
   1851 public:
   1852  void branchTestObjShapeList(Register obj, Register shapeElements,
   1853                              Register shapeScratch, Register endScratch,
   1854                              Register spectreScratch, Label* fail);
   1855 
   1856  void branchTestObjShapeListSetOffset(Register obj, Register shapeElements,
   1857                                       Register offset, Register shapeScratch,
   1858                                       Register endScratch,
   1859                                       Register spectreScratch, Label* fail);
   1860 
   1861  inline void branchTestClassIsFunction(Condition cond, Register clasp,
   1862                                        Label* label);
   1863  inline void branchTestObjIsFunction(Condition cond, Register obj,
   1864                                      Register scratch,
   1865                                      Register spectreRegToZero, Label* label);
   1866  inline void branchTestObjIsFunctionNoSpectreMitigations(Condition cond,
   1867                                                          Register obj,
   1868                                                          Register scratch,
   1869                                                          Label* label);
   1870 
   1871  inline void branchTestObjShape(Condition cond, Register obj, Register shape,
   1872                                 Register scratch, Register spectreRegToZero,
   1873                                 Label* label);
   1874  inline void branchTestObjShapeNoSpectreMitigations(Condition cond,
   1875                                                     Register obj,
   1876                                                     Register shape,
   1877                                                     Label* label);
   1878 
   1879  // TODO: audit/fix callers to be Spectre safe.
   1880  inline void branchTestObjShapeUnsafe(Condition cond, Register obj,
   1881                                       Register shape, Label* label);
   1882 
   1883  void branchTestObjCompartment(Condition cond, Register obj,
   1884                                const Address& compartment, Register scratch,
   1885                                Label* label);
   1886  void branchTestObjCompartment(Condition cond, Register obj,
   1887                                const JS::Compartment* compartment,
   1888                                Register scratch, Label* label);
   1889 
   1890  void branchIfNonNativeObj(Register obj, Register scratch, Label* label);
   1891 
   1892  void branchIfObjectNotExtensible(Register obj, Register scratch,
   1893                                   Label* label);
   1894 
   1895  void branchTestObjectNeedsProxyResultValidation(Condition condition,
   1896                                                  Register obj,
   1897                                                  Register scratch,
   1898                                                  Label* label);
   1899 
   1900  inline void branchTestClassIsProxy(bool proxy, Register clasp, Label* label);
   1901 
   1902  inline void branchTestObjectIsProxy(bool proxy, Register object,
   1903                                      Register scratch, Label* label);
   1904 
   1905  inline void branchTestProxyHandlerFamily(Condition cond, Register proxy,
   1906                                           Register scratch,
   1907                                           const void* handlerp, Label* label);
   1908 
   1909  inline void branchTestNeedsIncrementalBarrier(Condition cond, Label* label);
   1910  inline void branchTestNeedsIncrementalBarrierAnyZone(Condition cond,
   1911                                                       Label* label,
   1912                                                       Register scratch);
   1913 
   1914  // Perform a type-test on a tag of a Value (32bits boxing), or the tagged
   1915  // value (64bits boxing).
   1916  inline void branchTestUndefined(Condition cond, Register tag,
   1917                                  Label* label) PER_SHARED_ARCH;
   1918  inline void branchTestInt32(Condition cond, Register tag,
   1919                              Label* label) PER_SHARED_ARCH;
   1920  inline void branchTestDouble(Condition cond, Register tag,
   1921                               Label* label) PER_SHARED_ARCH;
   1922  inline void branchTestNumber(Condition cond, Register tag,
   1923                               Label* label) PER_SHARED_ARCH;
   1924  inline void branchTestBoolean(Condition cond, Register tag,
   1925                                Label* label) PER_SHARED_ARCH;
   1926  inline void branchTestString(Condition cond, Register tag,
   1927                               Label* label) PER_SHARED_ARCH;
   1928  inline void branchTestSymbol(Condition cond, Register tag,
   1929                               Label* label) PER_SHARED_ARCH;
   1930  inline void branchTestBigInt(Condition cond, Register tag,
   1931                               Label* label) PER_SHARED_ARCH;
   1932  inline void branchTestNull(Condition cond, Register tag,
   1933                             Label* label) PER_SHARED_ARCH;
   1934  inline void branchTestObject(Condition cond, Register tag,
   1935                               Label* label) PER_SHARED_ARCH;
   1936  inline void branchTestPrimitive(Condition cond, Register tag,
   1937                                  Label* label) PER_SHARED_ARCH;
   1938  inline void branchTestMagic(Condition cond, Register tag,
   1939                              Label* label) PER_SHARED_ARCH;
   1940  void branchTestType(Condition cond, Register tag, JSValueType type,
   1941                      Label* label);
   1942 
   1943  // Perform a type-test on a Value, addressed by Address or BaseIndex, or
   1944  // loaded into ValueOperand.
   1945  // BaseIndex and ValueOperand variants clobber the ScratchReg on x64.
   1946  // All Variants clobber the ScratchReg on arm64.
   1947  inline void branchTestUndefined(Condition cond, const Address& address,
   1948                                  Label* label) PER_SHARED_ARCH;
   1949  inline void branchTestUndefined(Condition cond, const BaseIndex& address,
   1950                                  Label* label) PER_SHARED_ARCH;
   1951  inline void branchTestUndefined(Condition cond, const ValueOperand& value,
   1952                                  Label* label) PER_SHARED_ARCH;
   1953 
   1954  inline void branchTestInt32(Condition cond, const Address& address,
   1955                              Label* label) PER_SHARED_ARCH;
   1956  inline void branchTestInt32(Condition cond, const BaseIndex& address,
   1957                              Label* label) PER_SHARED_ARCH;
   1958  inline void branchTestInt32(Condition cond, const ValueOperand& value,
   1959                              Label* label) PER_SHARED_ARCH;
   1960 
   1961  inline void branchTestDouble(Condition cond, const Address& address,
   1962                               Label* label) PER_SHARED_ARCH;
   1963  inline void branchTestDouble(Condition cond, const BaseIndex& address,
   1964                               Label* label) PER_SHARED_ARCH;
   1965  inline void branchTestDouble(Condition cond, const ValueOperand& value,
   1966                               Label* label) PER_SHARED_ARCH;
   1967 
   1968  inline void branchTestNumber(Condition cond, const ValueOperand& value,
   1969                               Label* label) PER_SHARED_ARCH;
   1970 
   1971  inline void branchTestBoolean(Condition cond, const Address& address,
   1972                                Label* label) PER_SHARED_ARCH;
   1973  inline void branchTestBoolean(Condition cond, const BaseIndex& address,
   1974                                Label* label) PER_SHARED_ARCH;
   1975  inline void branchTestBoolean(Condition cond, const ValueOperand& value,
   1976                                Label* label) PER_SHARED_ARCH;
   1977 
   1978  inline void branchTestString(Condition cond, const Address& address,
   1979                               Label* label) PER_SHARED_ARCH;
   1980  inline void branchTestString(Condition cond, const BaseIndex& address,
   1981                               Label* label) PER_SHARED_ARCH;
   1982  inline void branchTestString(Condition cond, const ValueOperand& value,
   1983                               Label* label) PER_SHARED_ARCH;
   1984 
   1985  inline void branchTestSymbol(Condition cond, const Address& address,
   1986                               Label* label) PER_SHARED_ARCH;
   1987  inline void branchTestSymbol(Condition cond, const BaseIndex& address,
   1988                               Label* label) PER_SHARED_ARCH;
   1989  inline void branchTestSymbol(Condition cond, const ValueOperand& value,
   1990                               Label* label) PER_SHARED_ARCH;
   1991 
   1992  inline void branchTestBigInt(Condition cond, const Address& address,
   1993                               Label* label) PER_SHARED_ARCH;
   1994  inline void branchTestBigInt(Condition cond, const BaseIndex& address,
   1995                               Label* label) PER_SHARED_ARCH;
   1996  inline void branchTestBigInt(Condition cond, const ValueOperand& value,
   1997                               Label* label) PER_SHARED_ARCH;
   1998 
   1999  inline void branchTestNull(Condition cond, const Address& address,
   2000                             Label* label) PER_SHARED_ARCH;
   2001  inline void branchTestNull(Condition cond, const BaseIndex& address,
   2002                             Label* label) PER_SHARED_ARCH;
   2003  inline void branchTestNull(Condition cond, const ValueOperand& value,
   2004                             Label* label) PER_SHARED_ARCH;
   2005 
   2006  // Clobbers the ScratchReg on x64.
   2007  inline void branchTestObject(Condition cond, const Address& address,
   2008                               Label* label) PER_SHARED_ARCH;
   2009  inline void branchTestObject(Condition cond, const BaseIndex& address,
   2010                               Label* label) PER_SHARED_ARCH;
   2011  inline void branchTestObject(Condition cond, const ValueOperand& value,
   2012                               Label* label) PER_SHARED_ARCH;
   2013 
   2014  inline void branchTestGCThing(Condition cond, const Address& address,
   2015                                Label* label) PER_SHARED_ARCH;
   2016  inline void branchTestGCThing(Condition cond, const BaseIndex& address,
   2017                                Label* label) PER_SHARED_ARCH;
   2018  inline void branchTestGCThing(Condition cond, const ValueOperand& value,
   2019                                Label* label) PER_SHARED_ARCH;
   2020 
   2021  inline void branchTestPrimitive(Condition cond, const ValueOperand& value,
   2022                                  Label* label) PER_SHARED_ARCH;
   2023 
   2024  inline void branchTestMagic(Condition cond, const Address& address,
   2025                              Label* label) PER_SHARED_ARCH;
   2026  inline void branchTestMagic(Condition cond, const BaseIndex& address,
   2027                              Label* label) PER_SHARED_ARCH;
   2028  inline void branchTestMagic(Condition cond, const ValueOperand& value,
   2029                              Label* label) PER_SHARED_ARCH;
   2030 
   2031  inline void branchTestMagic(Condition cond, const Address& valaddr,
   2032                              JSWhyMagic why, Label* label) PER_ARCH;
   2033 
   2034  inline void branchTestMagicValue(Condition cond, const ValueOperand& val,
   2035                                   JSWhyMagic why, Label* label);
   2036 
   2037  void branchTestValue(Condition cond, const ValueOperand& lhs,
   2038                       const Value& rhs, Label* label) PER_ARCH;
   2039  void branchTestNaNValue(Condition cond, const ValueOperand& val,
   2040                          Register temp, Label* label) PER_ARCH;
   2041 
   2042  template <typename T>
   2043  inline void branchTestValue(Condition cond, const T& lhs,
   2044                              const ValueOperand& rhs, Label* label) PER_ARCH;
   2045 
   2046  // Checks if given Value is evaluated to true or false in a condition.
   2047  // The type of the value should match the type of the method.
   2048  inline void branchTestInt32Truthy(bool truthy, const ValueOperand& value,
   2049                                    Label* label) PER_SHARED_ARCH;
   2050  inline void branchTestDoubleTruthy(bool truthy, FloatRegister reg,
   2051                                     Label* label) PER_SHARED_ARCH;
   2052  inline void branchTestBooleanTruthy(bool truthy, const ValueOperand& value,
   2053                                      Label* label) PER_ARCH;
   2054  inline void branchTestStringTruthy(bool truthy, const ValueOperand& value,
   2055                                     Label* label) PER_SHARED_ARCH;
   2056  inline void branchTestBigIntTruthy(bool truthy, const ValueOperand& value,
   2057                                     Label* label) PER_SHARED_ARCH;
   2058 
   2059  // Create an unconditional branch to the address given as argument.
   2060  inline void branchToComputedAddress(const BaseIndex& address) PER_ARCH;
   2061 
   2062  // Subtract a constant in the range 1 .. 127 inclusive from the value stored
   2063  // at `address`, write the result back to `address`, and jump to `label` if
   2064  // the updated value is negative.  The subtract is a 32-bit operation even
   2065  // though the value to be subtracted must fit in 7 bits.
   2066  CodeOffset sub32FromMemAndBranchIfNegativeWithPatch(
   2067      Address address, Label* label) PER_SHARED_ARCH;
   2068 
   2069  // Patch in the value to be subtracted.  Must be 1 .. 127 inclusive.
   2070  void patchSub32FromMemAndBranchIfNegative(CodeOffset offset,
   2071                                            Imm32 imm) PER_SHARED_ARCH;
   2072 
   2073 private:
   2074  template <typename T, typename S>
   2075  inline void branchPtrImpl(Condition cond, const T& lhs, const S& rhs,
   2076                            Label* label) DEFINED_ON(x86_shared);
   2077 
   2078  void branchPtrInNurseryChunkImpl(Condition cond, Register ptr, Label* label)
   2079      DEFINED_ON(x86);
   2080  template <typename T>
   2081  void branchValueIsNurseryCellImpl(Condition cond, const T& value,
   2082                                    Register temp, Label* label)
   2083      DEFINED_ON(arm64, x64, mips64, loong64, riscv64);
   2084 
   2085  template <typename T>
   2086  inline void branchTestUndefinedImpl(Condition cond, const T& t, Label* label)
   2087      DEFINED_ON(arm, arm64, x86_shared);
   2088  template <typename T>
   2089  inline void branchTestInt32Impl(Condition cond, const T& t, Label* label)
   2090      DEFINED_ON(arm, arm64, x86_shared);
   2091  template <typename T>
   2092  inline void branchTestDoubleImpl(Condition cond, const T& t, Label* label)
   2093      DEFINED_ON(arm, arm64, x86_shared);
   2094  template <typename T>
   2095  inline void branchTestNumberImpl(Condition cond, const T& t, Label* label)
   2096      DEFINED_ON(arm, arm64, x86_shared);
   2097  template <typename T>
   2098  inline void branchTestBooleanImpl(Condition cond, const T& t, Label* label)
   2099      DEFINED_ON(arm, arm64, x86_shared);
   2100  template <typename T>
   2101  inline void branchTestStringImpl(Condition cond, const T& t, Label* label)
   2102      DEFINED_ON(arm, arm64, x86_shared);
   2103  template <typename T>
   2104  inline void branchTestSymbolImpl(Condition cond, const T& t, Label* label)
   2105      DEFINED_ON(arm, arm64, x86_shared);
   2106  template <typename T>
   2107  inline void branchTestBigIntImpl(Condition cond, const T& t, Label* label)
   2108      DEFINED_ON(arm, arm64, x86_shared);
   2109  template <typename T>
   2110  inline void branchTestNullImpl(Condition cond, const T& t, Label* label)
   2111      DEFINED_ON(arm, arm64, x86_shared);
   2112  template <typename T>
   2113  inline void branchTestObjectImpl(Condition cond, const T& t, Label* label)
   2114      DEFINED_ON(arm, arm64, x86_shared);
   2115  template <typename T>
   2116  inline void branchTestGCThingImpl(Condition cond, const T& t,
   2117                                    Label* label) PER_SHARED_ARCH;
   2118  template <typename T>
   2119  inline void branchTestPrimitiveImpl(Condition cond, const T& t, Label* label)
   2120      DEFINED_ON(arm, arm64, x86_shared);
   2121  template <typename T>
   2122  inline void branchTestMagicImpl(Condition cond, const T& t, Label* label)
   2123      DEFINED_ON(arm, arm64, x86_shared);
   2124 
   2125 public:
   2126  template <typename T>
   2127  inline void testNumberSet(Condition cond, const T& src,
   2128                            Register dest) PER_SHARED_ARCH;
   2129  template <typename T>
   2130  inline void testBooleanSet(Condition cond, const T& src,
   2131                             Register dest) PER_SHARED_ARCH;
   2132  template <typename T>
   2133  inline void testStringSet(Condition cond, const T& src,
   2134                            Register dest) PER_SHARED_ARCH;
   2135  template <typename T>
   2136  inline void testSymbolSet(Condition cond, const T& src,
   2137                            Register dest) PER_SHARED_ARCH;
   2138  template <typename T>
   2139  inline void testBigIntSet(Condition cond, const T& src,
   2140                            Register dest) PER_SHARED_ARCH;
   2141 
   2142 public:
   2143  // The fallibleUnbox* methods below combine a Value type check with an unbox.
   2144  // Especially on 64-bit platforms this can be implemented more efficiently
   2145  // than a separate branch + unbox.
   2146  //
   2147  // |src| and |dest| can be the same register, but |dest| may hold garbage on
   2148  // failure.
   2149  inline void fallibleUnboxPtr(const ValueOperand& src, Register dest,
   2150                               JSValueType type, Label* fail) PER_ARCH;
   2151  inline void fallibleUnboxPtr(const Address& src, Register dest,
   2152                               JSValueType type, Label* fail) PER_ARCH;
   2153  inline void fallibleUnboxPtr(const BaseIndex& src, Register dest,
   2154                               JSValueType type, Label* fail) PER_ARCH;
   2155  template <typename T>
   2156  inline void fallibleUnboxInt32(const T& src, Register dest, Label* fail);
   2157  template <typename T>
   2158  inline void fallibleUnboxBoolean(const T& src, Register dest, Label* fail);
   2159  template <typename T>
   2160  inline void fallibleUnboxObject(const T& src, Register dest, Label* fail);
   2161  template <typename T>
   2162  inline void fallibleUnboxString(const T& src, Register dest, Label* fail);
   2163  template <typename T>
   2164  inline void fallibleUnboxSymbol(const T& src, Register dest, Label* fail);
   2165  template <typename T>
   2166  inline void fallibleUnboxBigInt(const T& src, Register dest, Label* fail);
   2167 
   2168  inline void cmp32Move32(Condition cond, Register lhs, Imm32 rhs, Register src,
   2169                          Register dest) PER_SHARED_ARCH;
   2170 
   2171  inline void cmp32Move32(Condition cond, Register lhs, Register rhs,
   2172                          Register src, Register dest) PER_SHARED_ARCH;
   2173 
   2174  inline void cmp32Move32(Condition cond, Register lhs, const Address& rhs,
   2175                          Register src, Register dest) PER_SHARED_ARCH;
   2176 
   2177  inline void cmpPtrMovePtr(Condition cond, Register lhs, Imm32 rhs,
   2178                            Register src, Register dest) PER_ARCH;
   2179 
   2180  inline void cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
   2181                            Register src, Register dest) PER_ARCH;
   2182 
   2183  inline void cmpPtrMovePtr(Condition cond, Register lhs, const Address& rhs,
   2184                            Register src, Register dest) PER_ARCH;
   2185 
   2186  inline void cmp32Load32(Condition cond, Register lhs, const Address& rhs,
   2187                          const Address& src, Register dest) PER_SHARED_ARCH;
   2188 
   2189  inline void cmp32Load32(Condition cond, Register lhs, Register rhs,
   2190                          const Address& src, Register dest) PER_SHARED_ARCH;
   2191 
   2192  inline void cmp32Load32(Condition cond, Register lhs, Imm32 rhs,
   2193                          const Address& src, Register dest) PER_SHARED_ARCH;
   2194 
   2195  inline void cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
   2196                           const Address& src, Register dest) PER_ARCH;
   2197 
   2198  inline void cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
   2199                           Register src, Register dest) PER_ARCH;
   2200 
   2201  inline void test32LoadPtr(Condition cond, const Address& addr, Imm32 mask,
   2202                            const Address& src, Register dest) PER_ARCH;
   2203 
   2204  inline void test32MovePtr(Condition cond, Register operand, Imm32 mask,
   2205                            Register src, Register dest) PER_ARCH;
   2206 
   2207  inline void test32MovePtr(Condition cond, const Address& addr, Imm32 mask,
   2208                            Register src, Register dest) PER_ARCH;
   2209 
   2210  // Conditional move for Spectre mitigations.
   2211  inline void spectreMovePtr(Condition cond, Register src,
   2212                             Register dest) PER_ARCH;
   2213 
   2214  // Zeroes dest if the condition is true.
   2215  inline void spectreZeroRegister(Condition cond, Register scratch,
   2216                                  Register dest) PER_SHARED_ARCH;
   2217 
   2218  // Performs a bounds check and zeroes the index register if out-of-bounds
   2219  // (to mitigate Spectre).
   2220 private:
   2221  inline void spectreBoundsCheck32(Register index, const Operand& length,
   2222                                   Register maybeScratch, Label* failure)
   2223      DEFINED_ON(x86);
   2224 
   2225 public:
   2226  inline void spectreBoundsCheck32(Register index, Register length,
   2227                                   Register maybeScratch,
   2228                                   Label* failure) PER_ARCH;
   2229  inline void spectreBoundsCheck32(Register index, const Address& length,
   2230                                   Register maybeScratch,
   2231                                   Label* failure) PER_ARCH;
   2232 
   2233  inline void spectreBoundsCheckPtr(Register index, Register length,
   2234                                    Register maybeScratch,
   2235                                    Label* failure) PER_ARCH;
   2236  inline void spectreBoundsCheckPtr(Register index, const Address& length,
   2237                                    Register maybeScratch,
   2238                                    Label* failure) PER_ARCH;
   2239 
   2240  // ========================================================================
   2241  // Canonicalization primitives.
   2242  inline void canonicalizeDouble(FloatRegister reg);
   2243 
   2244  inline void canonicalizeFloat(FloatRegister reg);
   2245 
   2246 public:
   2247  // ========================================================================
   2248  // Memory access primitives.
   2249  inline FaultingCodeOffset storeDouble(FloatRegister src,
   2250                                        const Address& dest) PER_SHARED_ARCH;
   2251  inline FaultingCodeOffset storeDouble(FloatRegister src,
   2252                                        const BaseIndex& dest) PER_SHARED_ARCH;
   2253  inline FaultingCodeOffset storeDouble(FloatRegister src, const Operand& dest)
   2254      DEFINED_ON(x86_shared);
   2255 
   2256  template <class T>
   2257  inline void boxDouble(FloatRegister src, const T& dest);
   2258 
   2259  using MacroAssemblerSpecific::boxDouble;
   2260 
   2261  inline FaultingCodeOffset storeFloat32(FloatRegister src,
   2262                                         const Address& dest) PER_SHARED_ARCH;
   2263  inline FaultingCodeOffset storeFloat32(FloatRegister src,
   2264                                         const BaseIndex& dest) PER_SHARED_ARCH;
   2265  inline FaultingCodeOffset storeFloat32(FloatRegister src, const Operand& dest)
   2266      DEFINED_ON(x86_shared);
   2267 
   2268  inline FaultingCodeOffset storeFloat16(FloatRegister src, const Address& dest,
   2269                                         Register scratch) PER_SHARED_ARCH;
   2270  inline FaultingCodeOffset storeFloat16(FloatRegister src,
   2271                                         const BaseIndex& dest,
   2272                                         Register scratch) PER_SHARED_ARCH;
   2273 
   2274  template <typename T>
   2275  void storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
   2276                         const T& dest) PER_ARCH;
   2277 
   2278  inline void memoryBarrier(MemoryBarrier barrier) PER_SHARED_ARCH;
   2279 
   2280 public:
   2281  // ========================================================================
   2282  // Wasm SIMD
   2283  //
   2284  // Naming is "operationSimd128" when operate on the whole vector, otherwise
   2285  // it's "operation<Type><Size>x<Lanes>".
   2286  //
   2287  // For microarchitectural reasons we can in principle get a performance win by
   2288  // using int or float specific instructions in the operationSimd128 case when
   2289  // we know that subsequent operations on the result are int or float oriented.
   2290  // In practice, we don't care about that yet.
   2291  //
   2292  // The order of operations here follows those in the SIMD overview document,
   2293  // https://github.com/WebAssembly/simd/blob/master/proposals/simd/SIMD.md.
   2294  //
   2295  // Since we must target Intel SSE indefinitely and SSE is one-address or
   2296  // two-address, the x86 porting interfaces are nearly all one-address or
   2297  // two-address.  Likewise there are two-address ARM64 interfaces to support
   2298  // the baseline compiler.  But there are also three-address ARM64 interfaces
   2299  // as the ARM64 Ion back-end can use those.  In the future, they may support
   2300  // AVX2 or similar for x86.
   2301  //
   2302  // Conventions for argument order and naming and semantics:
   2303  //  - Condition codes come first.
   2304  //  - Other immediates (masks, shift counts) come next.
   2305  //  - Operands come next:
   2306  //    - For a binary two-address operator where the left-hand-side has the
   2307  //      same type as the result, one register parameter is normally named
   2308  //      `lhsDest` and is both the left-hand side and destination; the other
   2309  //      parameter is named `rhs` and is the right-hand side.  `rhs` comes
   2310  //      first, `lhsDest` second.  `rhs` and `lhsDest` may be the same register
   2311  //      (if rhs is a register).
   2312  //    - For a binary three-address operator the order is `lhs`, `rhs`, `dest`,
   2313  //      and generally these registers may be the same.
   2314  //    - For a unary operator, the input is named `src` and the output is named
   2315  //      `dest`.  `src` comes first, `dest` second.  `src` and `dest` may be
   2316  //      the same register (if `src` is a register).
   2317  //  - Temp registers follow operands and are named `temp` if there's only one,
   2318  //    otherwise `temp1`, `temp2`, etc regardless of type.  GPR temps precede
   2319  //    FPU temps.  If there are several temps then they must be distinct
   2320  //    registers, and they must be distinct from the operand registers unless
   2321  //    noted.
   2322 
   2323  // Moves
   2324 
   2325  inline void moveSimd128(FloatRegister src, FloatRegister dest)
   2326      DEFINED_ON(x86_shared, arm64);
   2327 
   2328  // Constants
   2329 
   2330  inline void loadConstantSimd128(const SimdConstant& v, FloatRegister dest)
   2331      DEFINED_ON(x86_shared, arm64);
   2332 
   2333  // Splat
   2334 
   2335  inline void splatX16(Register src, FloatRegister dest)
   2336      DEFINED_ON(x86_shared, arm64);
   2337 
   2338  inline void splatX16(uint32_t srcLane, FloatRegister src, FloatRegister dest)
   2339      DEFINED_ON(arm64);
   2340 
   2341  inline void splatX8(Register src, FloatRegister dest)
   2342      DEFINED_ON(x86_shared, arm64);
   2343 
   2344  inline void splatX8(uint32_t srcLane, FloatRegister src, FloatRegister dest)
   2345      DEFINED_ON(arm64);
   2346 
   2347  inline void splatX4(Register src, FloatRegister dest)
   2348      DEFINED_ON(x86_shared, arm64);
   2349 
   2350  inline void splatX4(FloatRegister src, FloatRegister dest)
   2351      DEFINED_ON(x86_shared, arm64);
   2352 
   2353  inline void splatX2(Register64 src, FloatRegister dest)
   2354      DEFINED_ON(x86, x64, arm64);
   2355 
   2356  inline void splatX2(FloatRegister src, FloatRegister dest)
   2357      DEFINED_ON(x86_shared, arm64);
   2358 
   2359  // Extract lane as scalar.  Float extraction does not canonicalize the value.
   2360 
   2361  inline void extractLaneInt8x16(uint32_t lane, FloatRegister src,
   2362                                 Register dest) DEFINED_ON(x86_shared, arm64);
   2363 
   2364  inline void unsignedExtractLaneInt8x16(uint32_t lane, FloatRegister src,
   2365                                         Register dest)
   2366      DEFINED_ON(x86_shared, arm64);
   2367 
   2368  inline void extractLaneInt16x8(uint32_t lane, FloatRegister src,
   2369                                 Register dest) DEFINED_ON(x86_shared, arm64);
   2370 
   2371  inline void unsignedExtractLaneInt16x8(uint32_t lane, FloatRegister src,
   2372                                         Register dest)
   2373      DEFINED_ON(x86_shared, arm64);
   2374 
   2375  inline void extractLaneInt32x4(uint32_t lane, FloatRegister src,
   2376                                 Register dest) DEFINED_ON(x86_shared, arm64);
   2377 
   2378  inline void extractLaneInt64x2(uint32_t lane, FloatRegister src,
   2379                                 Register64 dest) DEFINED_ON(x86, x64, arm64);
   2380 
   2381  inline void extractLaneFloat32x4(uint32_t lane, FloatRegister src,
   2382                                   FloatRegister dest)
   2383      DEFINED_ON(x86_shared, arm64);
   2384 
   2385  inline void extractLaneFloat64x2(uint32_t lane, FloatRegister src,
   2386                                   FloatRegister dest)
   2387      DEFINED_ON(x86_shared, arm64);
   2388 
   2389  // Replace lane value
   2390 
   2391  inline void replaceLaneInt8x16(unsigned lane, FloatRegister lhs, Register rhs,
   2392                                 FloatRegister dest) DEFINED_ON(x86_shared);
   2393 
   2394  inline void replaceLaneInt8x16(unsigned lane, Register rhs,
   2395                                 FloatRegister lhsDest)
   2396      DEFINED_ON(x86_shared, arm64);
   2397 
   2398  inline void replaceLaneInt16x8(unsigned lane, FloatRegister lhs, Register rhs,
   2399                                 FloatRegister dest) DEFINED_ON(x86_shared);
   2400 
   2401  inline void replaceLaneInt16x8(unsigned lane, Register rhs,
   2402                                 FloatRegister lhsDest)
   2403      DEFINED_ON(x86_shared, arm64);
   2404 
   2405  inline void replaceLaneInt32x4(unsigned lane, FloatRegister lhs, Register rhs,
   2406                                 FloatRegister dest) DEFINED_ON(x86_shared);
   2407 
   2408  inline void replaceLaneInt32x4(unsigned lane, Register rhs,
   2409                                 FloatRegister lhsDest)
   2410      DEFINED_ON(x86_shared, arm64);
   2411 
   2412  inline void replaceLaneInt64x2(unsigned lane, FloatRegister lhs,
   2413                                 Register64 rhs, FloatRegister dest)
   2414      DEFINED_ON(x86, x64);
   2415 
   2416  inline void replaceLaneInt64x2(unsigned lane, Register64 rhs,
   2417                                 FloatRegister lhsDest)
   2418      DEFINED_ON(x86, x64, arm64);
   2419 
   2420  inline void replaceLaneFloat32x4(unsigned lane, FloatRegister lhs,
   2421                                   FloatRegister rhs, FloatRegister dest)
   2422      DEFINED_ON(x86_shared);
   2423 
   2424  inline void replaceLaneFloat32x4(unsigned lane, FloatRegister rhs,
   2425                                   FloatRegister lhsDest)
   2426      DEFINED_ON(x86_shared, arm64);
   2427 
   2428  inline void replaceLaneFloat64x2(unsigned lane, FloatRegister lhs,
   2429                                   FloatRegister rhs, FloatRegister dest)
   2430      DEFINED_ON(x86_shared);
   2431 
   2432  inline void replaceLaneFloat64x2(unsigned lane, FloatRegister rhs,
   2433                                   FloatRegister lhsDest)
   2434      DEFINED_ON(x86_shared, arm64);
   2435 
   2436  // Shuffle - blend and permute with immediate indices, and its many
   2437  // specializations.  Lane values other than those mentioned are illegal.
   2438 
   2439  // lane values 0..31
   2440  inline void shuffleInt8x16(const uint8_t lanes[16], FloatRegister rhs,
   2441                             FloatRegister lhsDest)
   2442      DEFINED_ON(x86_shared, arm64);
   2443 
   2444  inline void shuffleInt8x16(const uint8_t lanes[16], FloatRegister lhs,
   2445                             FloatRegister rhs, FloatRegister dest)
   2446      DEFINED_ON(x86_shared, arm64);
   2447 
   2448  // Lane values must be 0 (select from lhs) or FF (select from rhs).
   2449  // The behavior is undefined for lane values that are neither 0 nor FF.
   2450  // on x86_shared: it is required that lhs == dest.
   2451  inline void blendInt8x16(const uint8_t lanes[16], FloatRegister lhs,
   2452                           FloatRegister rhs, FloatRegister dest,
   2453                           FloatRegister temp) DEFINED_ON(x86_shared);
   2454 
   2455  // Lane values must be 0 (select from lhs) or FF (select from rhs).
   2456  // The behavior is undefined for lane values that are neither 0 nor FF.
   2457  inline void blendInt8x16(const uint8_t lanes[16], FloatRegister lhs,
   2458                           FloatRegister rhs, FloatRegister dest)
   2459      DEFINED_ON(arm64);
   2460 
   2461  // Lane values must be 0 (select from lhs) or FFFF (select from rhs).
   2462  // The behavior is undefined for lane values that are neither 0 nor FFFF.
   2463  // on x86_shared: it is required that lhs == dest.
   2464  inline void blendInt16x8(const uint16_t lanes[8], FloatRegister lhs,
   2465                           FloatRegister rhs, FloatRegister dest)
   2466      DEFINED_ON(x86_shared, arm64);
   2467 
   2468  // Mask lane values must be ~0 or 0. The former selects from lhs and the
   2469  // latter from rhs.
   2470  // The implementation works effectively for I8x16, I16x8, I32x4, and I64x2.
   2471  inline void laneSelectSimd128(FloatRegister mask, FloatRegister lhs,
   2472                                FloatRegister rhs, FloatRegister dest)
   2473      DEFINED_ON(x86_shared, arm64);
   2474 
   2475  inline void interleaveHighInt8x16(FloatRegister lhs, FloatRegister rhs,
   2476                                    FloatRegister dest)
   2477      DEFINED_ON(x86_shared, arm64);
   2478 
   2479  inline void interleaveHighInt16x8(FloatRegister lhs, FloatRegister rhs,
   2480                                    FloatRegister dest)
   2481      DEFINED_ON(x86_shared, arm64);
   2482 
   2483  inline void interleaveHighInt32x4(FloatRegister lhs, FloatRegister rhs,
   2484                                    FloatRegister dest)
   2485      DEFINED_ON(x86_shared, arm64);
   2486 
   2487  inline void interleaveHighInt64x2(FloatRegister lhs, FloatRegister rhs,
   2488                                    FloatRegister dest)
   2489      DEFINED_ON(x86_shared, arm64);
   2490 
   2491  inline void interleaveLowInt8x16(FloatRegister lhs, FloatRegister rhs,
   2492                                   FloatRegister dest)
   2493      DEFINED_ON(x86_shared, arm64);
   2494 
   2495  inline void interleaveLowInt16x8(FloatRegister lhs, FloatRegister rhs,
   2496                                   FloatRegister dest)
   2497      DEFINED_ON(x86_shared, arm64);
   2498 
   2499  inline void interleaveLowInt32x4(FloatRegister lhs, FloatRegister rhs,
   2500                                   FloatRegister dest)
   2501      DEFINED_ON(x86_shared, arm64);
   2502 
   2503  inline void interleaveLowInt64x2(FloatRegister lhs, FloatRegister rhs,
   2504                                   FloatRegister dest)
   2505      DEFINED_ON(x86_shared, arm64);
   2506 
   2507  // Permute - permute with immediate indices.
   2508 
   2509  // lane values 0..15
   2510  inline void permuteInt8x16(const uint8_t lanes[16], FloatRegister src,
   2511                             FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2512 
   2513  // lane values 0..7
   2514  inline void permuteInt16x8(const uint16_t lanes[8], FloatRegister src,
   2515                             FloatRegister dest) DEFINED_ON(arm64);
   2516 
   2517  // lane values 0..3 [sic].
   2518  inline void permuteHighInt16x8(const uint16_t lanes[4], FloatRegister src,
   2519                                 FloatRegister dest) DEFINED_ON(x86_shared);
   2520 
   2521  // lane values 0..3.
   2522  inline void permuteLowInt16x8(const uint16_t lanes[4], FloatRegister src,
   2523                                FloatRegister dest) DEFINED_ON(x86_shared);
   2524 
   2525  // lane values 0..3
   2526  inline void permuteInt32x4(const uint32_t lanes[4], FloatRegister src,
   2527                             FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2528 
   2529  // Funnel shift by immediate count:
   2530  //   low_16_bytes_of((lhs ++ rhs) >> shift*8), shift must be < 16
   2531  inline void concatAndRightShiftSimd128(FloatRegister lhs, FloatRegister rhs,
   2532                                         FloatRegister dest, uint32_t shift)
   2533      DEFINED_ON(x86_shared, arm64);
   2534 
   2535  // Rotate right by immediate count:
   2536  //   low_16_bytes_of((src ++ src) >> shift*8), shift must be < 16
   2537  inline void rotateRightSimd128(FloatRegister src, FloatRegister dest,
   2538                                 uint32_t shift) DEFINED_ON(arm64);
   2539 
   2540  // Shift bytes with immediate count, shifting in zeroes.  Shift count 0..15.
   2541 
   2542  inline void leftShiftSimd128(Imm32 count, FloatRegister src,
   2543                               FloatRegister dest)
   2544      DEFINED_ON(x86_shared, arm64);
   2545 
   2546  inline void rightShiftSimd128(Imm32 count, FloatRegister src,
   2547                                FloatRegister dest)
   2548      DEFINED_ON(x86_shared, arm64);
   2549 
   2550  // Zero extend int values.
   2551 
   2552  inline void zeroExtend8x16To16x8(FloatRegister src, FloatRegister dest)
   2553      DEFINED_ON(x86_shared, arm64);
   2554  inline void zeroExtend8x16To32x4(FloatRegister src, FloatRegister dest)
   2555      DEFINED_ON(x86_shared, arm64);
   2556  inline void zeroExtend8x16To64x2(FloatRegister src, FloatRegister dest)
   2557      DEFINED_ON(x86_shared, arm64);
   2558  inline void zeroExtend16x8To32x4(FloatRegister src, FloatRegister dest)
   2559      DEFINED_ON(x86_shared, arm64);
   2560  inline void zeroExtend16x8To64x2(FloatRegister src, FloatRegister dest)
   2561      DEFINED_ON(x86_shared, arm64);
   2562  inline void zeroExtend32x4To64x2(FloatRegister src, FloatRegister dest)
   2563      DEFINED_ON(x86_shared, arm64);
   2564 
   2565  // Reverse bytes in lanes.
   2566 
   2567  inline void reverseInt16x8(FloatRegister src, FloatRegister dest)
   2568      DEFINED_ON(x86_shared, arm64);
   2569 
   2570  inline void reverseInt32x4(FloatRegister src, FloatRegister dest)
   2571      DEFINED_ON(x86_shared, arm64);
   2572 
   2573  inline void reverseInt64x2(FloatRegister src, FloatRegister dest)
   2574      DEFINED_ON(x86_shared, arm64);
   2575 
   2576  // Swizzle - permute with variable indices.  `rhs` holds the lanes parameter.
   2577 
   2578  inline void swizzleInt8x16(FloatRegister lhs, FloatRegister rhs,
   2579                             FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2580 
   2581  inline void swizzleInt8x16Relaxed(FloatRegister lhs, FloatRegister rhs,
   2582                                    FloatRegister dest)
   2583      DEFINED_ON(x86_shared, arm64);
   2584 
   2585  // Integer Add
   2586 
   2587  inline void addInt8x16(FloatRegister lhs, FloatRegister rhs,
   2588                         FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2589 
   2590  inline void addInt8x16(FloatRegister lhs, const SimdConstant& rhs,
   2591                         FloatRegister dest) DEFINED_ON(x86_shared);
   2592 
   2593  inline void addInt16x8(FloatRegister lhs, FloatRegister rhs,
   2594                         FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2595 
   2596  inline void addInt16x8(FloatRegister lhs, const SimdConstant& rhs,
   2597                         FloatRegister dest) DEFINED_ON(x86_shared);
   2598 
   2599  inline void addInt32x4(FloatRegister lhs, FloatRegister rhs,
   2600                         FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2601 
   2602  inline void addInt32x4(FloatRegister lhs, const SimdConstant& rhs,
   2603                         FloatRegister dest) DEFINED_ON(x86_shared);
   2604 
   2605  inline void addInt64x2(FloatRegister lhs, FloatRegister rhs,
   2606                         FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2607 
   2608  inline void addInt64x2(FloatRegister lhs, const SimdConstant& rhs,
   2609                         FloatRegister dest) DEFINED_ON(x86_shared);
   2610 
   2611  // Integer Subtract
   2612 
   2613  inline void subInt8x16(FloatRegister lhs, FloatRegister rhs,
   2614                         FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2615 
   2616  inline void subInt8x16(FloatRegister lhs, const SimdConstant& rhs,
   2617                         FloatRegister dest) DEFINED_ON(x86_shared);
   2618 
   2619  inline void subInt16x8(FloatRegister lhs, FloatRegister rhs,
   2620                         FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2621 
   2622  inline void subInt16x8(FloatRegister lhs, const SimdConstant& rhs,
   2623                         FloatRegister dest) DEFINED_ON(x86_shared);
   2624 
   2625  inline void subInt32x4(FloatRegister lhs, const SimdConstant& rhs,
   2626                         FloatRegister dest) DEFINED_ON(x86_shared);
   2627 
   2628  inline void subInt32x4(FloatRegister lhs, FloatRegister rhs,
   2629                         FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2630 
   2631  inline void subInt64x2(FloatRegister lhs, const SimdConstant& rhs,
   2632                         FloatRegister dest) DEFINED_ON(x86_shared);
   2633 
   2634  inline void subInt64x2(FloatRegister lhs, FloatRegister rhs,
   2635                         FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2636 
   2637  // Integer Multiply
   2638 
   2639  inline void mulInt16x8(FloatRegister lhs, FloatRegister rhs,
   2640                         FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2641 
   2642  inline void mulInt16x8(FloatRegister lhs, const SimdConstant& rhs,
   2643                         FloatRegister dest) DEFINED_ON(x86_shared);
   2644 
   2645  inline void mulInt32x4(FloatRegister lhs, FloatRegister rhs,
   2646                         FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2647 
   2648  inline void mulInt32x4(FloatRegister lhs, const SimdConstant& rhs,
   2649                         FloatRegister dest) DEFINED_ON(x86_shared);
   2650 
   2651  // On x86_shared, it is required lhs == dest
   2652  inline void mulInt64x2(FloatRegister lhs, FloatRegister rhs,
   2653                         FloatRegister dest, FloatRegister temp)
   2654      DEFINED_ON(x86_shared);
   2655 
   2656  inline void mulInt64x2(FloatRegister lhs, const SimdConstant& rhs,
   2657                         FloatRegister dest, FloatRegister temp)
   2658      DEFINED_ON(x86_shared);
   2659 
   2660  inline void mulInt64x2(FloatRegister lhs, FloatRegister rhs,
   2661                         FloatRegister dest, FloatRegister temp1,
   2662                         FloatRegister temp2) DEFINED_ON(arm64);
   2663 
   2664  // Note for the extMul opcodes, the NxM designation is for the input lanes;
   2665  // the output lanes are twice as wide.
   2666  inline void extMulLowInt8x16(FloatRegister lhs, FloatRegister rhs,
   2667                               FloatRegister dest)
   2668      DEFINED_ON(x86_shared, arm64);
   2669 
   2670  inline void extMulHighInt8x16(FloatRegister lhs, FloatRegister rhs,
   2671                                FloatRegister dest)
   2672      DEFINED_ON(x86_shared, arm64);
   2673 
   2674  inline void unsignedExtMulLowInt8x16(FloatRegister lhs, FloatRegister rhs,
   2675                                       FloatRegister dest)
   2676      DEFINED_ON(x86_shared, arm64);
   2677 
   2678  inline void unsignedExtMulHighInt8x16(FloatRegister lhs, FloatRegister rhs,
   2679                                        FloatRegister dest)
   2680      DEFINED_ON(x86_shared, arm64);
   2681 
   2682  inline void extMulLowInt16x8(FloatRegister lhs, FloatRegister rhs,
   2683                               FloatRegister dest)
   2684      DEFINED_ON(x86_shared, arm64);
   2685 
   2686  inline void extMulHighInt16x8(FloatRegister lhs, FloatRegister rhs,
   2687                                FloatRegister dest)
   2688      DEFINED_ON(x86_shared, arm64);
   2689 
   2690  inline void unsignedExtMulLowInt16x8(FloatRegister lhs, FloatRegister rhs,
   2691                                       FloatRegister dest)
   2692      DEFINED_ON(x86_shared, arm64);
   2693 
   2694  inline void unsignedExtMulHighInt16x8(FloatRegister lhs, FloatRegister rhs,
   2695                                        FloatRegister dest)
   2696      DEFINED_ON(x86_shared, arm64);
   2697 
   2698  inline void extMulLowInt32x4(FloatRegister lhs, FloatRegister rhs,
   2699                               FloatRegister dest)
   2700      DEFINED_ON(x86_shared, arm64);
   2701 
   2702  inline void extMulHighInt32x4(FloatRegister lhs, FloatRegister rhs,
   2703                                FloatRegister dest)
   2704      DEFINED_ON(x86_shared, arm64);
   2705 
   2706  inline void unsignedExtMulLowInt32x4(FloatRegister lhs, FloatRegister rhs,
   2707                                       FloatRegister dest)
   2708      DEFINED_ON(x86_shared, arm64);
   2709 
   2710  inline void unsignedExtMulHighInt32x4(FloatRegister lhs, FloatRegister rhs,
   2711                                        FloatRegister dest)
   2712      DEFINED_ON(x86_shared, arm64);
   2713 
   2714  inline void q15MulrSatInt16x8(FloatRegister lhs, FloatRegister rhs,
   2715                                FloatRegister dest)
   2716      DEFINED_ON(x86_shared, arm64);
   2717 
   2718  // Integer Negate
   2719 
   2720  inline void negInt8x16(FloatRegister src, FloatRegister dest)
   2721      DEFINED_ON(x86_shared, arm64);
   2722 
   2723  inline void negInt16x8(FloatRegister src, FloatRegister dest)
   2724      DEFINED_ON(x86_shared, arm64);
   2725 
   2726  inline void negInt32x4(FloatRegister src, FloatRegister dest)
   2727      DEFINED_ON(x86_shared, arm64);
   2728 
   2729  inline void negInt64x2(FloatRegister src, FloatRegister dest)
   2730      DEFINED_ON(x86_shared, arm64);
   2731 
   2732  // Saturating integer add
   2733 
   2734  inline void addSatInt8x16(FloatRegister lhs, FloatRegister rhs,
   2735                            FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2736 
   2737  inline void addSatInt8x16(FloatRegister lhs, const SimdConstant& rhs,
   2738                            FloatRegister dest) DEFINED_ON(x86_shared);
   2739 
   2740  inline void unsignedAddSatInt8x16(FloatRegister lhs, FloatRegister rhs,
   2741                                    FloatRegister dest)
   2742      DEFINED_ON(x86_shared, arm64);
   2743 
   2744  inline void unsignedAddSatInt8x16(FloatRegister lhs, const SimdConstant& rhs,
   2745                                    FloatRegister dest) DEFINED_ON(x86_shared);
   2746 
   2747  inline void addSatInt16x8(FloatRegister lhs, FloatRegister rhs,
   2748                            FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2749 
   2750  inline void addSatInt16x8(FloatRegister lhs, const SimdConstant& rhs,
   2751                            FloatRegister dest) DEFINED_ON(x86_shared);
   2752 
   2753  inline void unsignedAddSatInt16x8(FloatRegister lhs, FloatRegister rhs,
   2754                                    FloatRegister dest)
   2755      DEFINED_ON(x86_shared, arm64);
   2756 
   2757  inline void unsignedAddSatInt16x8(FloatRegister lhs, const SimdConstant& rhs,
   2758                                    FloatRegister dest) DEFINED_ON(x86_shared);
   2759 
   2760  // Saturating integer subtract
   2761 
   2762  inline void subSatInt8x16(FloatRegister lhs, FloatRegister rhs,
   2763                            FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2764 
   2765  inline void subSatInt8x16(FloatRegister lhs, const SimdConstant& rhs,
   2766                            FloatRegister dest) DEFINED_ON(x86_shared);
   2767 
   2768  inline void unsignedSubSatInt8x16(FloatRegister lhs, FloatRegister rhs,
   2769                                    FloatRegister dest)
   2770      DEFINED_ON(x86_shared, arm64);
   2771 
   2772  inline void unsignedSubSatInt8x16(FloatRegister lhs, const SimdConstant& rhs,
   2773                                    FloatRegister dest) DEFINED_ON(x86_shared);
   2774 
   2775  inline void subSatInt16x8(FloatRegister lhs, FloatRegister rhs,
   2776                            FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2777 
   2778  inline void subSatInt16x8(FloatRegister lhs, const SimdConstant& rhs,
   2779                            FloatRegister dest) DEFINED_ON(x86_shared);
   2780 
   2781  inline void unsignedSubSatInt16x8(FloatRegister lhs, FloatRegister rhs,
   2782                                    FloatRegister dest)
   2783      DEFINED_ON(x86_shared, arm64);
   2784 
   2785  inline void unsignedSubSatInt16x8(FloatRegister lhs, const SimdConstant& rhs,
   2786                                    FloatRegister dest) DEFINED_ON(x86_shared);
   2787 
   2788  // Lane-wise integer minimum
   2789 
   2790  inline void minInt8x16(FloatRegister lhs, FloatRegister rhs,
   2791                         FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2792 
   2793  inline void minInt8x16(FloatRegister lhs, const SimdConstant& rhs,
   2794                         FloatRegister dest) DEFINED_ON(x86_shared);
   2795 
   2796  inline void unsignedMinInt8x16(FloatRegister lhs, FloatRegister rhs,
   2797                                 FloatRegister dest)
   2798      DEFINED_ON(x86_shared, arm64);
   2799 
   2800  inline void unsignedMinInt8x16(FloatRegister lhs, const SimdConstant& rhs,
   2801                                 FloatRegister dest) DEFINED_ON(x86_shared);
   2802 
   2803  inline void minInt16x8(FloatRegister lhs, FloatRegister rhs,
   2804                         FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2805 
   2806  inline void minInt16x8(FloatRegister lhs, const SimdConstant& rhs,
   2807                         FloatRegister dest) DEFINED_ON(x86_shared);
   2808 
   2809  inline void unsignedMinInt16x8(FloatRegister lhs, FloatRegister rhs,
   2810                                 FloatRegister dest)
   2811      DEFINED_ON(x86_shared, arm64);
   2812 
   2813  inline void unsignedMinInt16x8(FloatRegister lhs, const SimdConstant& rhs,
   2814                                 FloatRegister dest) DEFINED_ON(x86_shared);
   2815 
   2816  inline void minInt32x4(FloatRegister lhs, FloatRegister rhs,
   2817                         FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2818 
   2819  inline void minInt32x4(FloatRegister lhs, const SimdConstant& rhs,
   2820                         FloatRegister dest) DEFINED_ON(x86_shared);
   2821 
   2822  inline void unsignedMinInt32x4(FloatRegister lhs, FloatRegister rhs,
   2823                                 FloatRegister dest)
   2824      DEFINED_ON(x86_shared, arm64);
   2825 
   2826  inline void unsignedMinInt32x4(FloatRegister lhs, const SimdConstant& rhs,
   2827                                 FloatRegister dest) DEFINED_ON(x86_shared);
   2828 
   2829  // Lane-wise integer maximum
   2830 
   2831  inline void maxInt8x16(FloatRegister lhs, FloatRegister rhs,
   2832                         FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2833 
   2834  inline void maxInt8x16(FloatRegister lhs, const SimdConstant& rhs,
   2835                         FloatRegister dest) DEFINED_ON(x86_shared);
   2836 
   2837  inline void unsignedMaxInt8x16(FloatRegister lhs, FloatRegister rhs,
   2838                                 FloatRegister dest)
   2839      DEFINED_ON(x86_shared, arm64);
   2840 
   2841  inline void unsignedMaxInt8x16(FloatRegister lhs, const SimdConstant& rhs,
   2842                                 FloatRegister dest) DEFINED_ON(x86_shared);
   2843 
   2844  inline void maxInt16x8(FloatRegister lhs, FloatRegister rhs,
   2845                         FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2846 
   2847  inline void maxInt16x8(FloatRegister lhs, const SimdConstant& rhs,
   2848                         FloatRegister dest) DEFINED_ON(x86_shared);
   2849 
   2850  inline void unsignedMaxInt16x8(FloatRegister lhs, FloatRegister rhs,
   2851                                 FloatRegister dest)
   2852      DEFINED_ON(x86_shared, arm64);
   2853 
   2854  inline void unsignedMaxInt16x8(FloatRegister lhs, const SimdConstant& rhs,
   2855                                 FloatRegister dest) DEFINED_ON(x86_shared);
   2856 
   2857  inline void maxInt32x4(FloatRegister lhs, FloatRegister rhs,
   2858                         FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   2859 
   2860  inline void maxInt32x4(FloatRegister lhs, const SimdConstant& rhs,
   2861                         FloatRegister dest) DEFINED_ON(x86_shared);
   2862 
   2863  inline void unsignedMaxInt32x4(FloatRegister lhs, FloatRegister rhs,
   2864                                 FloatRegister dest)
   2865      DEFINED_ON(x86_shared, arm64);
   2866 
   2867  inline void unsignedMaxInt32x4(FloatRegister lhs, const SimdConstant& rhs,
   2868                                 FloatRegister dest) DEFINED_ON(x86_shared);
   2869 
   2870  // Lane-wise integer rounding average
   2871 
   2872  inline void unsignedAverageInt8x16(FloatRegister lhs, FloatRegister rhs,
   2873                                     FloatRegister dest)
   2874      DEFINED_ON(x86_shared, arm64);
   2875 
   2876  inline void unsignedAverageInt16x8(FloatRegister lhs, FloatRegister rhs,
   2877                                     FloatRegister dest)
   2878      DEFINED_ON(x86_shared, arm64);
   2879 
   2880  // Lane-wise integer absolute value
   2881 
   2882  inline void absInt8x16(FloatRegister src, FloatRegister dest)
   2883      DEFINED_ON(x86_shared, arm64);
   2884 
   2885  inline void absInt16x8(FloatRegister src, FloatRegister dest)
   2886      DEFINED_ON(x86_shared, arm64);
   2887 
   2888  inline void absInt32x4(FloatRegister src, FloatRegister dest)
   2889      DEFINED_ON(x86_shared, arm64);
   2890 
   2891  inline void absInt64x2(FloatRegister src, FloatRegister dest)
   2892      DEFINED_ON(x86_shared, arm64);
   2893 
   2894  // Left shift by scalar. Immediates and variable shifts must have been
   2895  // masked; shifts of zero will work but may or may not generate code.
   2896 
   2897  inline void leftShiftInt8x16(Register rhs, FloatRegister lhsDest,
   2898                               FloatRegister temp) DEFINED_ON(x86_shared);
   2899 
   2900  inline void leftShiftInt8x16(FloatRegister lhs, Register rhs,
   2901                               FloatRegister dest) DEFINED_ON(arm64);
   2902 
   2903  inline void leftShiftInt8x16(Imm32 count, FloatRegister src,
   2904                               FloatRegister dest)
   2905      DEFINED_ON(x86_shared, arm64);
   2906 
   2907  inline void leftShiftInt16x8(Register rhs, FloatRegister lhsDest)
   2908      DEFINED_ON(x86_shared);
   2909 
   2910  inline void leftShiftInt16x8(FloatRegister lhs, Register rhs,
   2911                               FloatRegister dest) DEFINED_ON(arm64);
   2912 
   2913  inline void leftShiftInt16x8(Imm32 count, FloatRegister src,
   2914                               FloatRegister dest)
   2915      DEFINED_ON(x86_shared, arm64);
   2916 
   2917  inline void leftShiftInt32x4(Register rhs, FloatRegister lhsDest)
   2918      DEFINED_ON(x86_shared);
   2919 
   2920  inline void leftShiftInt32x4(FloatRegister lhs, Register rhs,
   2921                               FloatRegister dest) DEFINED_ON(arm64);
   2922 
   2923  inline void leftShiftInt32x4(Imm32 count, FloatRegister src,
   2924                               FloatRegister dest)
   2925      DEFINED_ON(x86_shared, arm64);
   2926 
   2927  inline void leftShiftInt64x2(Register rhs, FloatRegister lhsDest)
   2928      DEFINED_ON(x86_shared);
   2929 
   2930  inline void leftShiftInt64x2(FloatRegister lhs, Register rhs,
   2931                               FloatRegister dest) DEFINED_ON(arm64);
   2932 
   2933  inline void leftShiftInt64x2(Imm32 count, FloatRegister src,
   2934                               FloatRegister dest)
   2935      DEFINED_ON(x86_shared, arm64);
   2936 
   2937  // Right shift by scalar. Immediates and variable shifts must have been
   2938  // masked; shifts of zero will work but may or may not generate code.
   2939 
   2940  inline void rightShiftInt8x16(Register rhs, FloatRegister lhsDest,
   2941                                FloatRegister temp) DEFINED_ON(x86_shared);
   2942 
   2943  inline void rightShiftInt8x16(FloatRegister lhs, Register rhs,
   2944                                FloatRegister dest) DEFINED_ON(arm64);
   2945 
   2946  inline void rightShiftInt8x16(Imm32 count, FloatRegister src,
   2947                                FloatRegister dest)
   2948      DEFINED_ON(x86_shared, arm64);
   2949 
   2950  inline void unsignedRightShiftInt8x16(Register rhs, FloatRegister lhsDest,
   2951                                        FloatRegister temp)
   2952      DEFINED_ON(x86_shared);
   2953 
   2954  inline void unsignedRightShiftInt8x16(FloatRegister lhs, Register rhs,
   2955                                        FloatRegister dest) DEFINED_ON(arm64);
   2956 
   2957  inline void unsignedRightShiftInt8x16(Imm32 count, FloatRegister src,
   2958                                        FloatRegister dest)
   2959      DEFINED_ON(x86_shared, arm64);
   2960 
   2961  inline void rightShiftInt16x8(Register rhs, FloatRegister lhsDest)
   2962      DEFINED_ON(x86_shared);
   2963 
   2964  inline void rightShiftInt16x8(FloatRegister lhs, Register rhs,
   2965                                FloatRegister dest) DEFINED_ON(arm64);
   2966 
   2967  inline void rightShiftInt16x8(Imm32 count, FloatRegister src,
   2968                                FloatRegister dest)
   2969      DEFINED_ON(x86_shared, arm64);
   2970 
   2971  inline void unsignedRightShiftInt16x8(Register rhs, FloatRegister lhsDest)
   2972      DEFINED_ON(x86_shared);
   2973 
   2974  inline void unsignedRightShiftInt16x8(FloatRegister lhs, Register rhs,
   2975                                        FloatRegister dest) DEFINED_ON(arm64);
   2976 
   2977  inline void unsignedRightShiftInt16x8(Imm32 count, FloatRegister src,
   2978                                        FloatRegister dest)
   2979      DEFINED_ON(x86_shared, arm64);
   2980 
   2981  inline void rightShiftInt32x4(Register rhs, FloatRegister lhsDest)
   2982      DEFINED_ON(x86_shared);
   2983 
   2984  inline void rightShiftInt32x4(FloatRegister lhs, Register rhs,
   2985                                FloatRegister dest) DEFINED_ON(arm64);
   2986 
   2987  inline void rightShiftInt32x4(Imm32 count, FloatRegister src,
   2988                                FloatRegister dest)
   2989      DEFINED_ON(x86_shared, arm64);
   2990 
   2991  inline void unsignedRightShiftInt32x4(Register rhs, FloatRegister lhsDest)
   2992      DEFINED_ON(x86_shared);
   2993 
   2994  inline void unsignedRightShiftInt32x4(FloatRegister lhs, Register rhs,
   2995                                        FloatRegister dest) DEFINED_ON(arm64);
   2996 
   2997  inline void unsignedRightShiftInt32x4(Imm32 count, FloatRegister src,
   2998                                        FloatRegister dest)
   2999      DEFINED_ON(x86_shared, arm64);
   3000 
   3001  inline void rightShiftInt64x2(Register rhs, FloatRegister lhsDest,
   3002                                FloatRegister temp) DEFINED_ON(x86_shared);
   3003 
   3004  inline void rightShiftInt64x2(Imm32 count, FloatRegister src,
   3005                                FloatRegister dest)
   3006      DEFINED_ON(x86_shared, arm64);
   3007 
   3008  inline void rightShiftInt64x2(FloatRegister lhs, Register rhs,
   3009                                FloatRegister dest) DEFINED_ON(arm64);
   3010 
   3011  inline void unsignedRightShiftInt64x2(Register rhs, FloatRegister lhsDest)
   3012      DEFINED_ON(x86_shared);
   3013 
   3014  inline void unsignedRightShiftInt64x2(FloatRegister lhs, Register rhs,
   3015                                        FloatRegister dest) DEFINED_ON(arm64);
   3016 
   3017  inline void unsignedRightShiftInt64x2(Imm32 count, FloatRegister src,
   3018                                        FloatRegister dest)
   3019      DEFINED_ON(x86_shared, arm64);
   3020 
   3021  // Sign replication operation
   3022 
   3023  inline void signReplicationInt8x16(FloatRegister src, FloatRegister dest)
   3024      DEFINED_ON(x86_shared);
   3025 
   3026  inline void signReplicationInt16x8(FloatRegister src, FloatRegister dest)
   3027      DEFINED_ON(x86_shared);
   3028 
   3029  inline void signReplicationInt32x4(FloatRegister src, FloatRegister dest)
   3030      DEFINED_ON(x86_shared);
   3031 
   3032  inline void signReplicationInt64x2(FloatRegister src, FloatRegister dest)
   3033      DEFINED_ON(x86_shared);
   3034 
   3035  // Bitwise and, or, xor, not
   3036 
   3037  inline void bitwiseAndSimd128(FloatRegister rhs, FloatRegister lhsDest)
   3038      DEFINED_ON(x86_shared, arm64);
   3039 
   3040  inline void bitwiseAndSimd128(FloatRegister lhs, FloatRegister rhs,
   3041                                FloatRegister dest)
   3042      DEFINED_ON(x86_shared, arm64);
   3043 
   3044  inline void bitwiseAndSimd128(FloatRegister lhs, const SimdConstant& rhs,
   3045                                FloatRegister dest) DEFINED_ON(x86_shared);
   3046 
   3047  inline void bitwiseOrSimd128(FloatRegister rhs, FloatRegister lhsDest)
   3048      DEFINED_ON(x86_shared, arm64);
   3049 
   3050  inline void bitwiseOrSimd128(FloatRegister lhs, FloatRegister rhs,
   3051                               FloatRegister dest)
   3052      DEFINED_ON(x86_shared, arm64);
   3053 
   3054  inline void bitwiseOrSimd128(FloatRegister lhs, const SimdConstant& rhs,
   3055                               FloatRegister dest) DEFINED_ON(x86_shared);
   3056 
   3057  inline void bitwiseXorSimd128(FloatRegister rhs, FloatRegister lhsDest)
   3058      DEFINED_ON(x86_shared, arm64);
   3059 
   3060  inline void bitwiseXorSimd128(FloatRegister lhs, FloatRegister rhs,
   3061                                FloatRegister dest)
   3062      DEFINED_ON(x86_shared, arm64);
   3063 
   3064  inline void bitwiseXorSimd128(FloatRegister lhs, const SimdConstant& rhs,
   3065                                FloatRegister dest) DEFINED_ON(x86_shared);
   3066 
   3067  inline void bitwiseNotSimd128(FloatRegister src, FloatRegister dest)
   3068      DEFINED_ON(x86_shared, arm64);
   3069 
   3070  // Bitwise AND with compliment: dest = lhs & ~rhs, note only arm64 can do it.
   3071  inline void bitwiseAndNotSimd128(FloatRegister lhs, FloatRegister rhs,
   3072                                   FloatRegister lhsDest) DEFINED_ON(arm64);
   3073 
   3074  // Bitwise AND with complement: dest = ~lhs & rhs, note this is not what Wasm
   3075  // wants but what the x86 hardware offers.  Hence the name.
   3076 
   3077  inline void bitwiseNotAndSimd128(FloatRegister rhs, FloatRegister lhsDest)
   3078      DEFINED_ON(x86_shared, arm64);
   3079 
   3080  inline void bitwiseNotAndSimd128(FloatRegister lhs, FloatRegister rhs,
   3081                                   FloatRegister lhsDest)
   3082      DEFINED_ON(x86_shared);
   3083 
   3084  // Bitwise select
   3085 
   3086  inline void bitwiseSelectSimd128(FloatRegister mask, FloatRegister onTrue,
   3087                                   FloatRegister onFalse, FloatRegister dest,
   3088                                   FloatRegister temp) DEFINED_ON(x86_shared);
   3089 
   3090  inline void bitwiseSelectSimd128(FloatRegister onTrue, FloatRegister onFalse,
   3091                                   FloatRegister maskDest) DEFINED_ON(arm64);
   3092 
   3093  // Population count
   3094 
   3095  inline void popcntInt8x16(FloatRegister src, FloatRegister dest,
   3096                            FloatRegister temp) DEFINED_ON(x86_shared);
   3097 
   3098  inline void popcntInt8x16(FloatRegister src, FloatRegister dest)
   3099      DEFINED_ON(arm64);
   3100 
   3101  // Any lane true, ie, any bit set
   3102 
   3103  inline void anyTrueSimd128(FloatRegister src, Register dest)
   3104      DEFINED_ON(x86_shared, arm64);
   3105 
   3106  // All lanes true
   3107 
   3108  inline void allTrueInt8x16(FloatRegister src, Register dest)
   3109      DEFINED_ON(x86_shared, arm64);
   3110 
   3111  inline void allTrueInt16x8(FloatRegister src, Register dest)
   3112      DEFINED_ON(x86_shared, arm64);
   3113 
   3114  inline void allTrueInt32x4(FloatRegister src, Register dest)
   3115      DEFINED_ON(x86_shared, arm64);
   3116 
   3117  inline void allTrueInt64x2(FloatRegister src, Register dest)
   3118      DEFINED_ON(x86_shared, arm64);
   3119 
   3120  // Bitmask, ie extract and compress high bits of all lanes
   3121 
   3122  inline void bitmaskInt8x16(FloatRegister src, Register dest)
   3123      DEFINED_ON(x86_shared);
   3124 
   3125  inline void bitmaskInt8x16(FloatRegister src, Register dest,
   3126                             FloatRegister temp) DEFINED_ON(arm64);
   3127 
   3128  inline void bitmaskInt16x8(FloatRegister src, Register dest)
   3129      DEFINED_ON(x86_shared);
   3130 
   3131  inline void bitmaskInt16x8(FloatRegister src, Register dest,
   3132                             FloatRegister temp) DEFINED_ON(arm64);
   3133 
   3134  inline void bitmaskInt32x4(FloatRegister src, Register dest)
   3135      DEFINED_ON(x86_shared);
   3136 
   3137  inline void bitmaskInt32x4(FloatRegister src, Register dest,
   3138                             FloatRegister temp) DEFINED_ON(arm64);
   3139 
   3140  inline void bitmaskInt64x2(FloatRegister src, Register dest)
   3141      DEFINED_ON(x86_shared);
   3142 
   3143  inline void bitmaskInt64x2(FloatRegister src, Register dest,
   3144                             FloatRegister temp) DEFINED_ON(arm64);
   3145 
   3146  // Comparisons (integer and floating-point)
   3147 
   3148  inline void compareInt8x16(Assembler::Condition cond, FloatRegister rhs,
   3149                             FloatRegister lhsDest)
   3150      DEFINED_ON(x86_shared, arm64);
   3151 
   3152  // On x86_shared, limited to !=, ==, <=, >
   3153  inline void compareInt8x16(Assembler::Condition cond, FloatRegister lhs,
   3154                             const SimdConstant& rhs, FloatRegister dest)
   3155      DEFINED_ON(x86_shared);
   3156 
   3157  // On arm64, use any integer comparison condition.
   3158  inline void compareInt8x16(Assembler::Condition cond, FloatRegister lhs,
   3159                             FloatRegister rhs, FloatRegister dest)
   3160      DEFINED_ON(x86_shared, arm64);
   3161 
   3162  inline void compareInt16x8(Assembler::Condition cond, FloatRegister rhs,
   3163                             FloatRegister lhsDest)
   3164      DEFINED_ON(x86_shared, arm64);
   3165 
   3166  inline void compareInt16x8(Assembler::Condition cond, FloatRegister lhs,
   3167                             FloatRegister rhs, FloatRegister dest)
   3168      DEFINED_ON(x86_shared, arm64);
   3169 
   3170  // On x86_shared, limited to !=, ==, <=, >
   3171  inline void compareInt16x8(Assembler::Condition cond, FloatRegister lhs,
   3172                             const SimdConstant& rhs, FloatRegister dest)
   3173      DEFINED_ON(x86_shared);
   3174 
   3175  // On x86_shared, limited to !=, ==, <=, >
   3176  inline void compareInt32x4(Assembler::Condition cond, FloatRegister rhs,
   3177                             FloatRegister lhsDest)
   3178      DEFINED_ON(x86_shared, arm64);
   3179 
   3180  inline void compareInt32x4(Assembler::Condition cond, FloatRegister lhs,
   3181                             const SimdConstant& rhs, FloatRegister dest)
   3182      DEFINED_ON(x86_shared);
   3183 
   3184  // On arm64, use any integer comparison condition.
   3185  inline void compareInt32x4(Assembler::Condition cond, FloatRegister lhs,
   3186                             FloatRegister rhs, FloatRegister dest)
   3187      DEFINED_ON(x86_shared, arm64);
   3188 
   3189  inline void compareForEqualityInt64x2(Assembler::Condition cond,
   3190                                        FloatRegister lhs, FloatRegister rhs,
   3191                                        FloatRegister dest)
   3192      DEFINED_ON(x86_shared);
   3193 
   3194  inline void compareForOrderingInt64x2(Assembler::Condition cond,
   3195                                        FloatRegister lhs, FloatRegister rhs,
   3196                                        FloatRegister dest, FloatRegister temp1,
   3197                                        FloatRegister temp2)
   3198      DEFINED_ON(x86_shared);
   3199 
   3200  inline void compareInt64x2(Assembler::Condition cond, FloatRegister rhs,
   3201                             FloatRegister lhsDest) DEFINED_ON(arm64);
   3202 
   3203  inline void compareInt64x2(Assembler::Condition cond, FloatRegister lhs,
   3204                             FloatRegister rhs, FloatRegister dest)
   3205      DEFINED_ON(arm64);
   3206 
   3207  inline void compareFloat32x4(Assembler::Condition cond, FloatRegister rhs,
   3208                               FloatRegister lhsDest)
   3209      DEFINED_ON(x86_shared, arm64);
   3210 
   3211  // On x86_shared, limited to ==, !=, <, <=
   3212  inline void compareFloat32x4(Assembler::Condition cond, FloatRegister lhs,
   3213                               const SimdConstant& rhs, FloatRegister dest)
   3214      DEFINED_ON(x86_shared);
   3215 
   3216  // On x86_shared, limited to ==, !=, <, <=
   3217  // On arm64, use any float-point comparison condition.
   3218  inline void compareFloat32x4(Assembler::Condition cond, FloatRegister lhs,
   3219                               FloatRegister rhs, FloatRegister dest)
   3220      DEFINED_ON(x86_shared, arm64);
   3221 
   3222  inline void compareFloat64x2(Assembler::Condition cond, FloatRegister rhs,
   3223                               FloatRegister lhsDest)
   3224      DEFINED_ON(x86_shared, arm64);
   3225 
   3226  // On x86_shared, limited to ==, !=, <, <=
   3227  inline void compareFloat64x2(Assembler::Condition cond, FloatRegister lhs,
   3228                               const SimdConstant& rhs, FloatRegister dest)
   3229      DEFINED_ON(x86_shared);
   3230 
   3231  // On x86_shared, limited to ==, !=, <, <=
   3232  // On arm64, use any float-point comparison condition.
   3233  inline void compareFloat64x2(Assembler::Condition cond, FloatRegister lhs,
   3234                               FloatRegister rhs, FloatRegister dest)
   3235      DEFINED_ON(x86_shared, arm64);
   3236 
   3237  // Load
   3238 
   3239  inline void loadUnalignedSimd128(const Operand& src, FloatRegister dest)
   3240      DEFINED_ON(x86_shared);
   3241 
   3242  inline FaultingCodeOffset loadUnalignedSimd128(const Address& src,
   3243                                                 FloatRegister dest)
   3244      DEFINED_ON(x86_shared, arm64);
   3245 
   3246  inline FaultingCodeOffset loadUnalignedSimd128(const BaseIndex& src,
   3247                                                 FloatRegister dest)
   3248      DEFINED_ON(x86_shared, arm64);
   3249 
   3250  // Store
   3251 
   3252  inline FaultingCodeOffset storeUnalignedSimd128(FloatRegister src,
   3253                                                  const Address& dest)
   3254      DEFINED_ON(x86_shared, arm64);
   3255 
   3256  inline FaultingCodeOffset storeUnalignedSimd128(FloatRegister src,
   3257                                                  const BaseIndex& dest)
   3258      DEFINED_ON(x86_shared, arm64);
   3259 
   3260  // Floating point negation
   3261 
   3262  inline void negFloat32x4(FloatRegister src, FloatRegister dest)
   3263      DEFINED_ON(x86_shared, arm64);
   3264 
   3265  inline void negFloat64x2(FloatRegister src, FloatRegister dest)
   3266      DEFINED_ON(x86_shared, arm64);
   3267 
   3268  // Floating point absolute value
   3269 
   3270  inline void absFloat32x4(FloatRegister src, FloatRegister dest)
   3271      DEFINED_ON(x86_shared, arm64);
   3272 
   3273  inline void absFloat64x2(FloatRegister src, FloatRegister dest)
   3274      DEFINED_ON(x86_shared, arm64);
   3275 
   3276  // NaN-propagating minimum
   3277 
   3278  inline void minFloat32x4(FloatRegister lhs, FloatRegister rhs,
   3279                           FloatRegister dest, FloatRegister temp1,
   3280                           FloatRegister temp2) DEFINED_ON(x86_shared);
   3281 
   3282  inline void minFloat32x4(FloatRegister rhs, FloatRegister lhsDest)
   3283      DEFINED_ON(arm64);
   3284 
   3285  inline void minFloat32x4(FloatRegister lhs, FloatRegister rhs,
   3286                           FloatRegister dest) DEFINED_ON(arm64);
   3287 
   3288  inline void minFloat64x2(FloatRegister lhs, FloatRegister rhs,
   3289                           FloatRegister dest, FloatRegister temp1,
   3290                           FloatRegister temp2) DEFINED_ON(x86_shared);
   3291 
   3292  inline void minFloat64x2(FloatRegister rhs, FloatRegister lhsDest)
   3293      DEFINED_ON(arm64);
   3294 
   3295  inline void minFloat64x2(FloatRegister lhs, FloatRegister rhs,
   3296                           FloatRegister dest) DEFINED_ON(arm64);
   3297 
   3298  // NaN-propagating maximum
   3299 
   3300  inline void maxFloat32x4(FloatRegister lhs, FloatRegister rhs,
   3301                           FloatRegister dest, FloatRegister temp1,
   3302                           FloatRegister temp2) DEFINED_ON(x86_shared);
   3303 
   3304  inline void maxFloat32x4(FloatRegister rhs, FloatRegister lhsDest)
   3305      DEFINED_ON(arm64);
   3306 
   3307  inline void maxFloat32x4(FloatRegister lhs, FloatRegister rhs,
   3308                           FloatRegister dest) DEFINED_ON(arm64);
   3309 
   3310  inline void maxFloat64x2(FloatRegister lhs, FloatRegister rhs,
   3311                           FloatRegister dest, FloatRegister temp1,
   3312                           FloatRegister temp2) DEFINED_ON(x86_shared);
   3313 
   3314  inline void maxFloat64x2(FloatRegister rhs, FloatRegister lhsDest)
   3315      DEFINED_ON(arm64);
   3316 
   3317  inline void maxFloat64x2(FloatRegister lhs, FloatRegister rhs,
   3318                           FloatRegister dest) DEFINED_ON(arm64);
   3319 
   3320  // Floating add
   3321 
   3322  inline void addFloat32x4(FloatRegister lhs, FloatRegister rhs,
   3323                           FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   3324 
   3325  inline void addFloat32x4(FloatRegister lhs, const SimdConstant& rhs,
   3326                           FloatRegister dest) DEFINED_ON(x86_shared);
   3327 
   3328  inline void addFloat64x2(FloatRegister lhs, FloatRegister rhs,
   3329                           FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   3330 
   3331  inline void addFloat64x2(FloatRegister lhs, const SimdConstant& rhs,
   3332                           FloatRegister dest) DEFINED_ON(x86_shared);
   3333 
   3334  // Floating subtract
   3335 
   3336  inline void subFloat32x4(FloatRegister lhs, FloatRegister rhs,
   3337                           FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   3338 
   3339  inline void subFloat32x4(FloatRegister lhs, const SimdConstant& rhs,
   3340                           FloatRegister dest) DEFINED_ON(x86_shared);
   3341 
   3342  inline void subFloat64x2(FloatRegister lhs, FloatRegister rhs,
   3343                           FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   3344 
   3345  inline void subFloat64x2(FloatRegister lhs, const SimdConstant& rhs,
   3346                           FloatRegister dest) DEFINED_ON(x86_shared);
   3347 
   3348  // Floating division
   3349 
   3350  inline void divFloat32x4(FloatRegister lhs, FloatRegister rhs,
   3351                           FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   3352 
   3353  inline void divFloat32x4(FloatRegister lhs, const SimdConstant& rhs,
   3354                           FloatRegister dest) DEFINED_ON(x86_shared);
   3355 
   3356  inline void divFloat64x2(FloatRegister lhs, FloatRegister rhs,
   3357                           FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   3358 
   3359  inline void divFloat64x2(FloatRegister lhs, const SimdConstant& rhs,
   3360                           FloatRegister dest) DEFINED_ON(x86_shared);
   3361 
   3362  // Floating Multiply
   3363 
   3364  inline void mulFloat32x4(FloatRegister lhs, FloatRegister rhs,
   3365                           FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   3366 
   3367  inline void mulFloat32x4(FloatRegister lhs, const SimdConstant& rhs,
   3368                           FloatRegister dest) DEFINED_ON(x86_shared);
   3369 
   3370  inline void mulFloat64x2(FloatRegister lhs, FloatRegister rhs,
   3371                           FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   3372 
   3373  inline void mulFloat64x2(FloatRegister lhs, const SimdConstant& rhs,
   3374                           FloatRegister dest) DEFINED_ON(x86_shared);
   3375 
   3376  // Pairwise add
   3377 
   3378  inline void extAddPairwiseInt8x16(FloatRegister src, FloatRegister dest)
   3379      DEFINED_ON(x86_shared, arm64);
   3380 
   3381  inline void unsignedExtAddPairwiseInt8x16(FloatRegister src,
   3382                                            FloatRegister dest)
   3383      DEFINED_ON(x86_shared, arm64);
   3384 
   3385  inline void extAddPairwiseInt16x8(FloatRegister src, FloatRegister dest)
   3386      DEFINED_ON(x86_shared, arm64);
   3387 
   3388  inline void unsignedExtAddPairwiseInt16x8(FloatRegister src,
   3389                                            FloatRegister dest)
   3390      DEFINED_ON(x86_shared, arm64);
   3391 
   3392  // Floating square root
   3393 
   3394  inline void sqrtFloat32x4(FloatRegister src, FloatRegister dest)
   3395      DEFINED_ON(x86_shared, arm64);
   3396 
   3397  inline void sqrtFloat64x2(FloatRegister src, FloatRegister dest)
   3398      DEFINED_ON(x86_shared, arm64);
   3399 
   3400  // Integer to floating point with rounding
   3401 
   3402  inline void convertInt32x4ToFloat32x4(FloatRegister src, FloatRegister dest)
   3403      DEFINED_ON(x86_shared, arm64);
   3404 
   3405  inline void unsignedConvertInt32x4ToFloat32x4(FloatRegister src,
   3406                                                FloatRegister dest)
   3407      DEFINED_ON(x86_shared, arm64);
   3408 
   3409  inline void convertInt32x4ToFloat64x2(FloatRegister src, FloatRegister dest)
   3410      DEFINED_ON(x86_shared, arm64);
   3411 
   3412  inline void unsignedConvertInt32x4ToFloat64x2(FloatRegister src,
   3413                                                FloatRegister dest)
   3414      DEFINED_ON(x86_shared, arm64);
   3415 
   3416  // Floating point to integer with saturation
   3417 
   3418  inline void truncSatFloat32x4ToInt32x4(FloatRegister src, FloatRegister dest)
   3419      DEFINED_ON(x86_shared, arm64);
   3420 
   3421  inline void unsignedTruncSatFloat32x4ToInt32x4(FloatRegister src,
   3422                                                 FloatRegister dest,
   3423                                                 FloatRegister temp)
   3424      DEFINED_ON(x86_shared);
   3425 
   3426  inline void unsignedTruncSatFloat32x4ToInt32x4(FloatRegister src,
   3427                                                 FloatRegister dest)
   3428      DEFINED_ON(arm64);
   3429 
   3430  inline void truncSatFloat64x2ToInt32x4(FloatRegister src, FloatRegister dest,
   3431                                         FloatRegister temp)
   3432      DEFINED_ON(x86_shared, arm64);
   3433 
   3434  inline void unsignedTruncSatFloat64x2ToInt32x4(FloatRegister src,
   3435                                                 FloatRegister dest,
   3436                                                 FloatRegister temp)
   3437      DEFINED_ON(x86_shared, arm64);
   3438 
   3439  inline void truncFloat32x4ToInt32x4Relaxed(FloatRegister src,
   3440                                             FloatRegister dest)
   3441      DEFINED_ON(x86_shared, arm64);
   3442 
   3443  inline void unsignedTruncFloat32x4ToInt32x4Relaxed(FloatRegister src,
   3444                                                     FloatRegister dest)
   3445      DEFINED_ON(x86_shared, arm64);
   3446 
   3447  inline void truncFloat64x2ToInt32x4Relaxed(FloatRegister src,
   3448                                             FloatRegister dest)
   3449      DEFINED_ON(x86_shared, arm64);
   3450 
   3451  inline void unsignedTruncFloat64x2ToInt32x4Relaxed(FloatRegister src,
   3452                                                     FloatRegister dest)
   3453      DEFINED_ON(x86_shared, arm64);
   3454 
   3455  // Floating point narrowing
   3456 
   3457  inline void convertFloat64x2ToFloat32x4(FloatRegister src, FloatRegister dest)
   3458      DEFINED_ON(x86_shared, arm64);
   3459 
   3460  // Floating point widening
   3461 
   3462  inline void convertFloat32x4ToFloat64x2(FloatRegister src, FloatRegister dest)
   3463      DEFINED_ON(x86_shared, arm64);
   3464 
   3465  // Integer to integer narrowing
   3466 
   3467  inline void narrowInt16x8(FloatRegister lhs, const SimdConstant& rhs,
   3468                            FloatRegister dest) DEFINED_ON(x86_shared);
   3469 
   3470  inline void narrowInt16x8(FloatRegister lhs, FloatRegister rhs,
   3471                            FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   3472 
   3473  inline void unsignedNarrowInt16x8(FloatRegister lhs, const SimdConstant& rhs,
   3474                                    FloatRegister dest) DEFINED_ON(x86_shared);
   3475 
   3476  inline void unsignedNarrowInt16x8(FloatRegister lhs, FloatRegister rhs,
   3477                                    FloatRegister dest)
   3478      DEFINED_ON(x86_shared, arm64);
   3479 
   3480  inline void narrowInt32x4(FloatRegister lhs, const SimdConstant& rhs,
   3481                            FloatRegister dest) DEFINED_ON(x86_shared);
   3482 
   3483  inline void narrowInt32x4(FloatRegister lhs, FloatRegister rhs,
   3484                            FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   3485 
   3486  inline void unsignedNarrowInt32x4(FloatRegister lhs, const SimdConstant& rhs,
   3487                                    FloatRegister dest) DEFINED_ON(x86_shared);
   3488 
   3489  inline void unsignedNarrowInt32x4(FloatRegister lhs, FloatRegister rhs,
   3490                                    FloatRegister dest)
   3491      DEFINED_ON(x86_shared, arm64);
   3492 
   3493  // Integer to integer widening
   3494 
   3495  inline void widenLowInt8x16(FloatRegister src, FloatRegister dest)
   3496      DEFINED_ON(x86_shared, arm64);
   3497 
   3498  inline void widenHighInt8x16(FloatRegister src, FloatRegister dest)
   3499      DEFINED_ON(x86_shared, arm64);
   3500 
   3501  inline void unsignedWidenLowInt8x16(FloatRegister src, FloatRegister dest)
   3502      DEFINED_ON(x86_shared, arm64);
   3503 
   3504  inline void unsignedWidenHighInt8x16(FloatRegister src, FloatRegister dest)
   3505      DEFINED_ON(x86_shared, arm64);
   3506 
   3507  inline void widenLowInt16x8(FloatRegister src, FloatRegister dest)
   3508      DEFINED_ON(x86_shared, arm64);
   3509 
   3510  inline void widenHighInt16x8(FloatRegister src, FloatRegister dest)
   3511      DEFINED_ON(x86_shared, arm64);
   3512 
   3513  inline void unsignedWidenLowInt16x8(FloatRegister src, FloatRegister dest)
   3514      DEFINED_ON(x86_shared, arm64);
   3515 
   3516  inline void unsignedWidenHighInt16x8(FloatRegister src, FloatRegister dest)
   3517      DEFINED_ON(x86_shared, arm64);
   3518 
   3519  inline void widenLowInt32x4(FloatRegister src, FloatRegister dest)
   3520      DEFINED_ON(x86_shared, arm64);
   3521 
   3522  inline void unsignedWidenLowInt32x4(FloatRegister src, FloatRegister dest)
   3523      DEFINED_ON(x86_shared, arm64);
   3524 
   3525  inline void widenHighInt32x4(FloatRegister src, FloatRegister dest)
   3526      DEFINED_ON(x86_shared, arm64);
   3527 
   3528  inline void unsignedWidenHighInt32x4(FloatRegister src, FloatRegister dest)
   3529      DEFINED_ON(x86_shared, arm64);
   3530 
   3531  // Compare-based minimum/maximum
   3532  //
   3533  // On x86, the signature is (rhsDest, lhs); on arm64 it is (rhs, lhsDest).
   3534  //
   3535  // The masm preprocessor can't deal with multiple declarations with identical
   3536  // signatures even if they are on different platforms, hence the weird
   3537  // argument names.
   3538 
   3539  inline void pseudoMinFloat32x4(FloatRegister rhsOrRhsDest,
   3540                                 FloatRegister lhsOrLhsDest)
   3541      DEFINED_ON(x86_shared, arm64);
   3542 
   3543  inline void pseudoMinFloat32x4(FloatRegister lhs, FloatRegister rhs,
   3544                                 FloatRegister dest)
   3545      DEFINED_ON(x86_shared, arm64);
   3546 
   3547  inline void pseudoMinFloat64x2(FloatRegister rhsOrRhsDest,
   3548                                 FloatRegister lhsOrLhsDest)
   3549      DEFINED_ON(x86_shared, arm64);
   3550 
   3551  inline void pseudoMinFloat64x2(FloatRegister lhs, FloatRegister rhs,
   3552                                 FloatRegister dest)
   3553      DEFINED_ON(x86_shared, arm64);
   3554 
   3555  inline void pseudoMaxFloat32x4(FloatRegister rhsOrRhsDest,
   3556                                 FloatRegister lhsOrLhsDest)
   3557      DEFINED_ON(x86_shared, arm64);
   3558 
   3559  inline void pseudoMaxFloat32x4(FloatRegister lhs, FloatRegister rhs,
   3560                                 FloatRegister dest)
   3561      DEFINED_ON(x86_shared, arm64);
   3562 
   3563  inline void pseudoMaxFloat64x2(FloatRegister rhsOrRhsDest,
   3564                                 FloatRegister lhsOrLhsDest)
   3565      DEFINED_ON(x86_shared, arm64);
   3566 
   3567  inline void pseudoMaxFloat64x2(FloatRegister lhs, FloatRegister rhs,
   3568                                 FloatRegister dest)
   3569      DEFINED_ON(x86_shared, arm64);
   3570 
   3571  // Widening/pairwise integer dot product
   3572 
   3573  inline void widenDotInt16x8(FloatRegister lhs, FloatRegister rhs,
   3574                              FloatRegister dest) DEFINED_ON(x86_shared, arm64);
   3575 
   3576  inline void widenDotInt16x8(FloatRegister lhs, const SimdConstant& rhs,
   3577                              FloatRegister dest) DEFINED_ON(x86_shared);
   3578 
   3579  inline void dotInt8x16Int7x16(FloatRegister lhs, FloatRegister rhs,
   3580                                FloatRegister dest)
   3581      DEFINED_ON(x86_shared, arm64);
   3582 
   3583  inline void dotInt8x16Int7x16ThenAdd(FloatRegister lhs, FloatRegister rhs,
   3584                                       FloatRegister dest)
   3585      DEFINED_ON(x86_shared);
   3586 
   3587  inline void dotInt8x16Int7x16ThenAdd(FloatRegister lhs, FloatRegister rhs,
   3588                                       FloatRegister dest, FloatRegister temp)
   3589      DEFINED_ON(arm64);
   3590 
   3591  // Floating point rounding
   3592 
   3593  inline void ceilFloat32x4(FloatRegister src, FloatRegister dest)
   3594      DEFINED_ON(x86_shared, arm64);
   3595 
   3596  inline void ceilFloat64x2(FloatRegister src, FloatRegister dest)
   3597      DEFINED_ON(x86_shared, arm64);
   3598 
   3599  inline void floorFloat32x4(FloatRegister src, FloatRegister dest)
   3600      DEFINED_ON(x86_shared, arm64);
   3601 
   3602  inline void floorFloat64x2(FloatRegister src, FloatRegister dest)
   3603      DEFINED_ON(x86_shared, arm64);
   3604 
   3605  inline void truncFloat32x4(FloatRegister src, FloatRegister dest)
   3606      DEFINED_ON(x86_shared, arm64);
   3607 
   3608  inline void truncFloat64x2(FloatRegister src, FloatRegister dest)
   3609      DEFINED_ON(x86_shared, arm64);
   3610 
   3611  inline void nearestFloat32x4(FloatRegister src, FloatRegister dest)
   3612      DEFINED_ON(x86_shared, arm64);
   3613 
   3614  inline void nearestFloat64x2(FloatRegister src, FloatRegister dest)
   3615      DEFINED_ON(x86_shared, arm64);
   3616 
   3617  // Floating multiply-accumulate: srcDest [+-]= src1 * src2
   3618 
   3619  inline void fmaFloat32x4(FloatRegister src1, FloatRegister src2,
   3620                           FloatRegister srcDest) DEFINED_ON(x86_shared, arm64);
   3621 
   3622  inline void fnmaFloat32x4(FloatRegister src1, FloatRegister src2,
   3623                            FloatRegister srcDest)
   3624      DEFINED_ON(x86_shared, arm64);
   3625 
   3626  inline void fmaFloat64x2(FloatRegister src1, FloatRegister src2,
   3627                           FloatRegister srcDest) DEFINED_ON(x86_shared, arm64);
   3628 
   3629  inline void fnmaFloat64x2(FloatRegister src1, FloatRegister src2,
   3630                            FloatRegister srcDest)
   3631      DEFINED_ON(x86_shared, arm64);
   3632 
   3633  inline void minFloat32x4Relaxed(FloatRegister src, FloatRegister srcDest)
   3634      DEFINED_ON(x86_shared, arm64);
   3635 
   3636  inline void minFloat32x4Relaxed(FloatRegister lhs, FloatRegister rhs,
   3637                                  FloatRegister dest)
   3638      DEFINED_ON(x86_shared, arm64);
   3639 
   3640  inline void maxFloat32x4Relaxed(FloatRegister src, FloatRegister srcDest)
   3641      DEFINED_ON(x86_shared, arm64);
   3642 
   3643  inline void maxFloat32x4Relaxed(FloatRegister lhs, FloatRegister rhs,
   3644                                  FloatRegister dest)
   3645      DEFINED_ON(x86_shared, arm64);
   3646 
   3647  inline void minFloat64x2Relaxed(FloatRegister src, FloatRegister srcDest)
   3648      DEFINED_ON(x86_shared, arm64);
   3649 
   3650  inline void minFloat64x2Relaxed(FloatRegister lhs, FloatRegister rhs,
   3651                                  FloatRegister dest)
   3652      DEFINED_ON(x86_shared, arm64);
   3653 
   3654  inline void maxFloat64x2Relaxed(FloatRegister src, FloatRegister srcDest)
   3655      DEFINED_ON(x86_shared, arm64);
   3656 
   3657  inline void maxFloat64x2Relaxed(FloatRegister lhs, FloatRegister rhs,
   3658                                  FloatRegister dest)
   3659      DEFINED_ON(x86_shared, arm64);
   3660 
   3661  inline void q15MulrInt16x8Relaxed(FloatRegister lhs, FloatRegister rhs,
   3662                                    FloatRegister dest)
   3663      DEFINED_ON(x86_shared, arm64);
   3664 
   3665 public:
   3666  // ========================================================================
   3667  // Truncate floating point.
   3668 
   3669  // Undefined behaviour when truncation is outside Int64 range.
   3670  // Needs a temp register if SSE3 is not present.
   3671  inline void truncateFloat32ToInt64(Address src, Address dest, Register temp)
   3672      DEFINED_ON(x86_shared);
   3673  inline void truncateFloat32ToUInt64(Address src, Address dest, Register temp,
   3674                                      FloatRegister floatTemp)
   3675      DEFINED_ON(x86, x64);
   3676  inline void truncateDoubleToInt64(Address src, Address dest, Register temp)
   3677      DEFINED_ON(x86_shared);
   3678  inline void truncateDoubleToUInt64(Address src, Address dest, Register temp,
   3679                                     FloatRegister floatTemp)
   3680      DEFINED_ON(x86, x64);
   3681 
   3682 public:
   3683  // ========================================================================
   3684  // Convert floating point.
   3685 
   3686  // temp required on x86 and x64; must be undefined on mips64 and loong64.
   3687  void convertUInt64ToFloat32(Register64 src, FloatRegister dest, Register temp)
   3688      DEFINED_ON(arm64, mips64, loong64, riscv64, wasm32, x64, x86);
   3689 
   3690  void convertInt64ToFloat32(Register64 src, FloatRegister dest)
   3691      DEFINED_ON(arm64, mips64, loong64, riscv64, wasm32, x64, x86);
   3692 
   3693  bool convertUInt64ToDoubleNeedsTemp() PER_ARCH;
   3694 
   3695  // temp required when convertUInt64ToDoubleNeedsTemp() returns true.
   3696  void convertUInt64ToDouble(Register64 src, FloatRegister dest,
   3697                             Register temp) PER_ARCH;
   3698 
   3699  void convertInt64ToDouble(Register64 src, FloatRegister dest) PER_ARCH;
   3700 
   3701  void convertIntPtrToDouble(Register src, FloatRegister dest) PER_ARCH;
   3702 
   3703 public:
   3704  // ========================================================================
   3705  // wasm support
   3706 
   3707  FaultingCodeOffset wasmTrapInstruction() PER_SHARED_ARCH;
   3708 
   3709  void wasmTrap(wasm::Trap trap, const wasm::TrapSiteDesc& trapSiteDesc);
   3710 
   3711  // Load all pinned regs via InstanceReg.  If the trapOffset is something,
   3712  // give the first load a trap descriptor with type IndirectCallToNull, so that
   3713  // a null instance will cause a trap.
   3714  void loadWasmPinnedRegsFromInstance(
   3715      const wasm::MaybeTrapSiteDesc& trapSiteDesc);
   3716 
   3717  // Branches to the fail label if the stack would overflow the current stack
   3718  // limit. Returns the number of extra bytes of stack allocated prior to
   3719  // branching to the fail label.
   3720  uint32_t wasmReserveStackChecked(uint32_t amount, Label* fail);
   3721 
   3722  // Emit a bounds check against the wasm heap limit, jumping to 'ok' if 'cond'
   3723  // holds; this can be the label either of the access or of the trap.  The
   3724  // label should name a code position greater than the position of the bounds
   3725  // check.
   3726  //
   3727  // If JitOptions.spectreMaskIndex is true, a no-op speculation barrier is
   3728  // emitted in the code stream after the check to prevent an OOB access from
   3729  // being executed speculatively.  (On current tier-1 platforms the barrier is
   3730  // a conditional saturation of 'index' to 'boundsCheckLimit', using the same
   3731  // condition as the check.)  If the condition is such that the bounds check
   3732  // branches out of line to the trap, the barrier will actually be executed
   3733  // when the bounds check passes.
   3734  //
   3735  // On 32-bit systems for both wasm and asm.js, and on 64-bit systems for
   3736  // asm.js, heap lengths are limited to 2GB.  On 64-bit systems for wasm,
   3737  // 32-bit heap lengths are limited to 4GB, and 64-bit heap lengths will be
   3738  // limited to something much larger.
   3739 
   3740  void wasmBoundsCheck32(Condition cond, Register index,
   3741                         Register boundsCheckLimit,
   3742                         Label* label) PER_SHARED_ARCH;
   3743 
   3744  void wasmBoundsCheck32(Condition cond, Register index,
   3745                         Address boundsCheckLimit,
   3746                         Label* label) PER_SHARED_ARCH;
   3747 
   3748  void wasmBoundsCheck64(Condition cond, Register64 index,
   3749                         Register64 boundsCheckLimit, Label* label) PER_ARCH;
   3750 
   3751  void wasmBoundsCheck64(Condition cond, Register64 index,
   3752                         Address boundsCheckLimit, Label* label) PER_ARCH;
   3753 
   3754  // Each wasm load/store instruction appends its own wasm::Trap::OutOfBounds.
   3755  void wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr,
   3756                AnyRegister out) DEFINED_ON(x86, x64);
   3757  void wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr,
   3758                   Register64 out) DEFINED_ON(x86, x64);
   3759  void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value,
   3760                 Operand dstAddr) DEFINED_ON(x86, x64);
   3761  void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
   3762                    Operand dstAddr) DEFINED_ON(x86);
   3763 
   3764  // For all the ARM/MIPS/LOONG64 wasmLoad and wasmStore functions below, `ptr`
   3765  // MUST equal `ptrScratch`, and that register will be updated based on
   3766  // conditions listed below (where it is only mentioned as `ptr`).
   3767 
   3768  // `ptr` will be updated if access.offset32() != 0 or access.type() ==
   3769  // Scalar::Int64.
   3770  void wasmLoad(const wasm::MemoryAccessDesc& access, Register memoryBase,
   3771                Register ptr, Register ptrScratch, AnyRegister output)
   3772      DEFINED_ON(arm, loong64, riscv64, mips64);
   3773  void wasmLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase,
   3774                   Register ptr, Register ptrScratch, Register64 output)
   3775      DEFINED_ON(arm, mips64, loong64, riscv64);
   3776  void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value,
   3777                 Register memoryBase, Register ptr, Register ptrScratch)
   3778      DEFINED_ON(arm, loong64, riscv64, mips64);
   3779  void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
   3780                    Register memoryBase, Register ptr, Register ptrScratch)
   3781      DEFINED_ON(arm, mips64, loong64, riscv64);
   3782 
   3783  // These accept general memoryBase + ptr + offset (in `access`); the offset is
   3784  // always smaller than the guard region.  They will insert an additional add
   3785  // if the offset is nonzero, and of course that add may require a temporary
   3786  // register for the offset if the offset is large, and instructions to set it
   3787  // up.
   3788  void wasmLoad(const wasm::MemoryAccessDesc& access, Register memoryBase,
   3789                Register ptr, AnyRegister output) DEFINED_ON(arm64);
   3790  void wasmLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase,
   3791                   Register ptr, Register64 output) DEFINED_ON(arm64);
   3792  void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value,
   3793                 Register memoryBase, Register ptr) DEFINED_ON(arm64);
   3794  void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
   3795                    Register memoryBase, Register ptr) DEFINED_ON(arm64);
   3796 
   3797  // `ptr` will always be updated.
   3798  void wasmUnalignedLoad(const wasm::MemoryAccessDesc& access,
   3799                         Register memoryBase, Register ptr, Register ptrScratch,
   3800                         Register output, Register tmp) DEFINED_ON(mips64);
   3801 
   3802  // MIPS: `ptr` will always be updated.
   3803  void wasmUnalignedLoadFP(const wasm::MemoryAccessDesc& access,
   3804                           Register memoryBase, Register ptr,
   3805                           Register ptrScratch, FloatRegister output,
   3806                           Register tmp1) DEFINED_ON(mips64);
   3807 
   3808  // `ptr` will always be updated.
   3809  void wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access,
   3810                            Register memoryBase, Register ptr,
   3811                            Register ptrScratch, Register64 output,
   3812                            Register tmp) DEFINED_ON(mips64);
   3813 
   3814  // MIPS: `ptr` will always be updated.
   3815  void wasmUnalignedStore(const wasm::MemoryAccessDesc& access, Register value,
   3816                          Register memoryBase, Register ptr,
   3817                          Register ptrScratch, Register tmp) DEFINED_ON(mips64);
   3818 
   3819  // `ptr` will always be updated.
   3820  void wasmUnalignedStoreFP(const wasm::MemoryAccessDesc& access,
   3821                            FloatRegister floatValue, Register memoryBase,
   3822                            Register ptr, Register ptrScratch, Register tmp)
   3823      DEFINED_ON(mips64);
   3824 
   3825  // `ptr` will always be updated.
   3826  void wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access,
   3827                             Register64 value, Register memoryBase,
   3828                             Register ptr, Register ptrScratch, Register tmp)
   3829      DEFINED_ON(mips64);
   3830 
   3831  // wasm specific methods, used in both the wasm baseline compiler and ion.
   3832 
   3833  // The truncate-to-int32 methods do not bind the rejoin label; clients must
   3834  // do so if oolWasmTruncateCheckF64ToI32() can jump to it.
   3835  void wasmTruncateDoubleToUInt32(FloatRegister input, Register output,
   3836                                  bool isSaturating, Label* oolEntry) PER_ARCH;
   3837  void wasmTruncateDoubleToInt32(FloatRegister input, Register output,
   3838                                 bool isSaturating,
   3839                                 Label* oolEntry) PER_SHARED_ARCH;
   3840  void oolWasmTruncateCheckF64ToI32(FloatRegister input, Register output,
   3841                                    TruncFlags flags,
   3842                                    const wasm::TrapSiteDesc& trapSiteDesc,
   3843                                    Label* rejoin) PER_SHARED_ARCH;
   3844 
   3845  void wasmTruncateFloat32ToUInt32(FloatRegister input, Register output,
   3846                                   bool isSaturating, Label* oolEntry) PER_ARCH;
   3847  void wasmTruncateFloat32ToInt32(FloatRegister input, Register output,
   3848                                  bool isSaturating,
   3849                                  Label* oolEntry) PER_SHARED_ARCH;
   3850  void oolWasmTruncateCheckF32ToI32(FloatRegister input, Register output,
   3851                                    TruncFlags flags,
   3852                                    const wasm::TrapSiteDesc& trapSiteDesc,
   3853                                    Label* rejoin) PER_SHARED_ARCH;
   3854 
   3855  // The truncate-to-int64 methods will always bind the `oolRejoin` label
   3856  // after the last emitted instruction.
   3857  void wasmTruncateDoubleToInt64(FloatRegister input, Register64 output,
   3858                                 bool isSaturating, Label* oolEntry,
   3859                                 Label* oolRejoin, FloatRegister tempDouble)
   3860      DEFINED_ON(arm64, x86, x64, mips64, loong64, riscv64, wasm32);
   3861  void wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output,
   3862                                  bool isSaturating, Label* oolEntry,
   3863                                  Label* oolRejoin, FloatRegister tempDouble)
   3864      DEFINED_ON(arm64, x86, x64, mips64, loong64, riscv64, wasm32);
   3865  void oolWasmTruncateCheckF64ToI64(FloatRegister input, Register64 output,
   3866                                    TruncFlags flags,
   3867                                    const wasm::TrapSiteDesc& trapSiteDesc,
   3868                                    Label* rejoin) PER_SHARED_ARCH;
   3869 
   3870  void wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output,
   3871                                  bool isSaturating, Label* oolEntry,
   3872                                  Label* oolRejoin, FloatRegister tempDouble)
   3873      DEFINED_ON(arm64, x86, x64, mips64, loong64, riscv64, wasm32);
   3874  void wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output,
   3875                                   bool isSaturating, Label* oolEntry,
   3876                                   Label* oolRejoin, FloatRegister tempDouble)
   3877      DEFINED_ON(arm64, x86, x64, mips64, loong64, riscv64, wasm32);
   3878  void oolWasmTruncateCheckF32ToI64(FloatRegister input, Register64 output,
   3879                                    TruncFlags flags,
   3880                                    const wasm::TrapSiteDesc& trapSiteDesc,
   3881                                    Label* rejoin) PER_SHARED_ARCH;
   3882 
   3883  // This function takes care of loading the callee's instance and pinned regs
   3884  // but it is the caller's responsibility to save/restore instance or pinned
   3885  // regs.
   3886  CodeOffset wasmCallImport(const wasm::CallSiteDesc& desc,
   3887                            const wasm::CalleeDesc& callee);
   3888 
   3889  CodeOffset wasmReturnCallImport(const wasm::CallSiteDesc& desc,
   3890                                  const wasm::CalleeDesc& callee,
   3891                                  const ReturnCallAdjustmentInfo& retCallInfo);
   3892 
   3893  CodeOffset wasmReturnCall(const wasm::CallSiteDesc& desc,
   3894                            uint32_t funcDefIndex,
   3895                            const ReturnCallAdjustmentInfo& retCallInfo);
   3896 
   3897  void wasmCollapseFrameSlow(const ReturnCallAdjustmentInfo& retCallInfo,
   3898                             wasm::CallSiteDesc desc);
   3899 
   3900  void wasmCollapseFrameFast(const ReturnCallAdjustmentInfo& retCallInfo);
   3901 
   3902  void wasmCheckSlowCallsite(Register ra, Label* notSlow, Register temp1,
   3903                             Register temp2) PER_ARCH;
   3904 
   3905  // Places slow class marker for tail calls.
   3906  void wasmMarkCallAsSlow() PER_ARCH;
   3907 
   3908  // Combines slow class marker with actual assembler call.
   3909  CodeOffset wasmMarkedSlowCall(const wasm::CallSiteDesc& desc,
   3910                                const Register reg) PER_SHARED_ARCH;
   3911 
   3912  void wasmClampTable64Address(Register64 address, Register out);
   3913 
   3914  // WasmTableCallIndexReg must contain the index of the indirect call.  This is
   3915  // for wasm calls only.
   3916  //
   3917  // Indirect calls use a dual-path mechanism where a run-time test determines
   3918  // whether a context switch is needed (slow path) or not (fast path).  This
   3919  // gives rise to two call instructions, both of which need safe points.  As
   3920  // per normal, the call offsets are the code offsets at the end of the call
   3921  // instructions (the return points).
   3922  //
   3923  // `boundsCheckFailedLabel` is non-null iff a bounds check is required.
   3924  // `nullCheckFailedLabel` is non-null only on platforms that can't fold the
   3925  // null check into the rest of the call instructions.
   3926  void wasmCallIndirect(const wasm::CallSiteDesc& desc,
   3927                        const wasm::CalleeDesc& callee,
   3928                        Label* nullCheckFailedLabel, CodeOffset* fastCallOffset,
   3929                        CodeOffset* slowCallOffset);
   3930 
   3931  // WasmTableCallIndexReg must contain the index of the indirect call.  This is
   3932  // for wasm calls only.
   3933  //
   3934  // `boundsCheckFailedLabel` is non-null iff a bounds check is required.
   3935  // `nullCheckFailedLabel` is non-null only on platforms that can't fold the
   3936  // null check into the rest of the call instructions.
   3937  void wasmReturnCallIndirect(const wasm::CallSiteDesc& desc,
   3938                              const wasm::CalleeDesc& callee,
   3939                              Label* nullCheckFailedLabel,
   3940                              const ReturnCallAdjustmentInfo& retCallInfo);
   3941 
   3942  // This function takes care of loading the callee's instance and address from
   3943  // pinned reg.
   3944  void wasmCallRef(const wasm::CallSiteDesc& desc,
   3945                   const wasm::CalleeDesc& callee, CodeOffset* fastCallOffset,
   3946                   CodeOffset* slowCallOffset);
   3947 
   3948  void wasmReturnCallRef(const wasm::CallSiteDesc& desc,
   3949                         const wasm::CalleeDesc& callee,
   3950                         const ReturnCallAdjustmentInfo& retCallInfo);
   3951 
   3952  // WasmTableCallIndexReg must contain the index of the indirect call.
   3953  // This is for asm.js calls only.
   3954  CodeOffset asmCallIndirect(const wasm::CallSiteDesc& desc,
   3955                             const wasm::CalleeDesc& callee);
   3956 
   3957  // This function takes care of loading the pointer to the current instance
   3958  // as the implicit first argument. It preserves instance and pinned registers.
   3959  // (instance & pinned regs are non-volatile registers in the system ABI).
   3960  CodeOffset wasmCallBuiltinInstanceMethod(const wasm::CallSiteDesc& desc,
   3961                                           const ABIArg& instanceArg,
   3962                                           wasm::SymbolicAddress builtin,
   3963                                           wasm::FailureMode failureMode,
   3964                                           wasm::Trap failureTrap);
   3965 
   3966  // Performs the appropriate check based on the instance call's FailureMode,
   3967  // and traps if the check fails. The resultRegister should likely be
   3968  // ReturnReg, but this depends on whatever you do with registers immediately
   3969  // after the call.
   3970  void wasmTrapOnFailedInstanceCall(Register resultRegister,
   3971                                    wasm::FailureMode failureMode,
   3972                                    wasm::Trap failureTrap,
   3973                                    const wasm::TrapSiteDesc& trapSiteDesc);
   3974 
   3975  // Performs a bounds check for ranged wasm operations like memory.fill or
   3976  // array.fill. This handles the bizarre edge case in the wasm spec where a
   3977  // write to index N is valid as long as the length is zero - despite the index
   3978  // itself being out of bounds.
   3979  //
   3980  // `length` and `limit` will be unchanged.
   3981  void wasmBoundsCheckRange32(Register index, Register length, Register limit,
   3982                              Register tmp,
   3983                              const wasm::TrapSiteDesc& trapSiteDesc);
   3984 
   3985  // Returns information about which registers are necessary for a
   3986  // branchWasmRefIsSubtype call.
   3987  static BranchWasmRefIsSubtypeRegisters regsForBranchWasmRefIsSubtype(
   3988      wasm::RefType type);
   3989 
   3990  // Perform a subtype check that `ref` is a subtype of `type`, branching to
   3991  // `label` depending on `onSuccess`.
   3992  //
   3993  // Will select one of the other branchWasmRefIsSubtype* functions depending on
   3994  // destType. See each function for the register allocation requirements, as
   3995  // well as which registers will be preserved.
   3996  //
   3997  // If this function returns a valid FaultingCodeOffset, then you must emit a
   3998  // trap site to catch the bad cast. It will never return a valid
   3999  // FaultingCodeOffset when signalNullChecks is false.
   4000  FaultingCodeOffset branchWasmRefIsSubtype(
   4001      Register ref, wasm::MaybeRefType sourceType, wasm::RefType destType,
   4002      Label* label, bool onSuccess, bool signalNullChecks, Register superSTV,
   4003      Register scratch1, Register scratch2);
   4004 
   4005  // Perform a subtype check that `ref` is a subtype of `type`, branching to
   4006  // `label` depending on `onSuccess`. `type` must be in the `any` hierarchy.
   4007  //
   4008  // `superSTV` is required iff the destination type is a concrete
   4009  // type. `scratch1` is required iff the destination type is eq or lower and
   4010  // not none. `scratch2` is required iff the destination type is a concrete
   4011  // type and its `subTypingDepth` is >= wasm::MinSuperTypeVectorLength. See
   4012  // regsForBranchWasmRefIsSubtype.
   4013  //
   4014  // `ref` and `superSTV` are preserved. Scratch registers are
   4015  // clobbered.
   4016  //
   4017  // If this function returns a valid FaultingCodeOffset, then you must emit a
   4018  // trap site to catch the bad cast. It will never return a valid
   4019  // FaultingCodeOffset when signalNullChecks is false.
   4020  FaultingCodeOffset branchWasmRefIsSubtypeAny(
   4021      Register ref, wasm::RefType sourceType, wasm::RefType destType,
   4022      Label* label, bool onSuccess, bool signalNullChecks, Register superSTV,
   4023      Register scratch1, Register scratch2);
   4024 
   4025  // Perform a subtype check that `ref` is a subtype of `type`, branching to
   4026  // `label` depending on `onSuccess`. `type` must be in the `func` hierarchy.
   4027  //
   4028  // `superSTV` and `scratch1` are required iff the destination type
   4029  // is a concrete type (not func and not nofunc). `scratch2` is required iff
   4030  // the destination type is a concrete type and its `subTypingDepth` is >=
   4031  // wasm::MinSuperTypeVectorLength. See regsForBranchWasmRefIsSubtype.
   4032  //
   4033  // `ref` and `superSTV` are preserved. Scratch registers are
   4034  // clobbered.
   4035  void branchWasmRefIsSubtypeFunc(Register ref, wasm::RefType sourceType,
   4036                                  wasm::RefType destType, Label* label,
   4037                                  bool onSuccess, Register superSTV,
   4038                                  Register scratch1, Register scratch2);
   4039 
   4040  // Perform a subtype check that `ref` is a subtype of `destType`, branching to
   4041  // `label` depending on `onSuccess`. `type` must be in the `extern` hierarchy.
   4042  void branchWasmRefIsSubtypeExtern(Register ref, wasm::RefType sourceType,
   4043                                    wasm::RefType destType, Label* label,
   4044                                    bool onSuccess);
   4045 
   4046  // Perform a subtype check that `ref` is a subtype of `destType`, branching to
   4047  // `label` depending on `onSuccess`. `type` must be in the `exn` hierarchy.
   4048  void branchWasmRefIsSubtypeExn(Register ref, wasm::RefType sourceType,
   4049                                 wasm::RefType destType, Label* label,
   4050                                 bool onSuccess);
   4051 
   4052  // Perform a subtype check that `subSTV` is a subtype of `superSTV`, branching
   4053  // to `label` depending on `onSuccess`. This method is a specialization of the
   4054  // general `wasm::TypeDef::isSubTypeOf` method for the case where the
   4055  // `superSTV` is statically known, which is the case for all wasm
   4056  // instructions.
   4057  //
   4058  // `scratch` is required iff the destination type is not final and the
   4059  // `superDepth` is >= wasm::MinSuperTypeVectorLength. `subSTV` is clobbered by
   4060  // this method if the destination type is not final. `superSTV` is always
   4061  // preserved.
   4062  void branchWasmSTVIsSubtype(Register subSTV, Register superSTV,
   4063                              Register scratch, const wasm::TypeDef* destType,
   4064                              Label* label, bool onSuccess);
   4065 
   4066  // Same as branchWasmSTVIsSubtype, but looks up a dynamic position in the
   4067  // super type vector.
   4068  //
   4069  // `scratch` is always required. `subSTV` and `superDepth` are clobbered.
   4070  // `superSTV` is preserved.
   4071  void branchWasmSTVIsSubtypeDynamicDepth(Register subSTV, Register superSTV,
   4072                                          Register superDepth, Register scratch,
   4073                                          Label* label, bool onSuccess);
   4074 
   4075  // Extract the tag of wasm anyref `src`.
   4076  void extractWasmAnyRefTag(Register src, Register dest);
   4077 
   4078  // Remove the known tag of wasm anyref `src`.
   4079  void untagWasmAnyRef(Register src, Register dest, wasm::AnyRefTag tag);
   4080 
   4081  // Branch if the wasm anyref `src` is or is not the null value.
   4082  void branchWasmAnyRefIsNull(bool isNull, Register src, Label* label);
   4083  // Branch if the wasm anyref `src` is or is not an I31.
   4084  void branchWasmAnyRefIsI31(bool isI31, Register src, Label* label);
   4085  // Branch if the wasm anyref `src` is or is not a JSObject*.
   4086  void branchWasmAnyRefIsObjectOrNull(bool isObject, Register src,
   4087                                      Label* label);
   4088  // Branch if the wasm anyref `src` is or is not a JSString.
   4089  void branchWasmAnyRefIsJSString(bool isJSString, Register src, Register temp,
   4090                                  Label* label);
   4091  // Branch if the wasm anyref `src` is or is not a GC thing.
   4092  void branchWasmAnyRefIsGCThing(bool isGCThing, Register src, Label* label);
   4093  // Branch if the wasm anyref `src` is or is not pointing to a nursery cell.
   4094  void branchWasmAnyRefIsNurseryCell(bool isNurseryCell, Register src,
   4095                                     Register scratch, Label* label);
   4096 
   4097  // Create a wasm i31ref by truncating the 32-bit integer.
   4098  void truncate32ToWasmI31Ref(Register src, Register dest);
   4099  // Convert a wasm i31ref to a signed 32-bit integer.
   4100  void convertWasmI31RefTo32Signed(Register src, Register dest);
   4101  // Convert a wasm i31ref to an unsigned 32-bit integer.
   4102  void convertWasmI31RefTo32Unsigned(Register src, Register dest);
   4103 
   4104  // Branch if the JS value `src` would need to be boxed out of line to be
   4105  // converted to a wasm anyref.
   4106  void branchValueConvertsToWasmAnyRefInline(ValueOperand src,
   4107                                             Register scratchInt,
   4108                                             FloatRegister scratchFloat,
   4109                                             Label* label);
   4110  // Convert a JS value to a wasm anyref. If the value requires boxing, this
   4111  // will branch to `oolConvert`.
   4112  void convertValueToWasmAnyRef(ValueOperand src, Register dest,
   4113                                FloatRegister scratchFloat, Label* oolConvert);
   4114  // Convert a JS object to a wasm anyref. This cannot fail.
   4115  void convertObjectToWasmAnyRef(Register src, Register dest);
   4116  // Convert a JS string to a wasm anyref. This cannot fail.
   4117  void convertStringToWasmAnyRef(Register src, Register dest);
   4118 
   4119  // Convert a wasm anyref to a JS value. This cannot fail.
   4120  //
   4121  // Due to spectre mitigations, these methods may clobber src.
   4122  void convertWasmAnyRefToValue(Register instance, Register src,
   4123                                ValueOperand dst, Register scratch);
   4124  void convertWasmAnyRefToValue(Register instance, Register src,
   4125                                const Address& dst, Register scratch);
   4126 
   4127  // Branch if the object `src` is or is not a WasmGcObject.
   4128  FaultingCodeOffset branchObjectIsWasmGcObject(bool isGcObject, Register src,
   4129                                                Register scratch, Label* label);
   4130 
   4131  // `typeDefData` will be preserved. `instance` and `result` may be the same
   4132  // register, in which case `instance` will be clobbered.
   4133  void wasmNewStructObject(Register instance, Register result,
   4134                           Register allocSite, Register temp1,
   4135                           size_t offsetOfTypeDefData, Label* fail,
   4136                           gc::AllocKind allocKind, bool zeroFields);
   4137  // Allocates a wasm array with a dynamic number of elements.
   4138  //
   4139  // `numElements` and `typeDefData` will be preserved. `instance` and `result`
   4140  // may be the same register, in which case `instance` will be clobbered.
   4141  void wasmNewArrayObject(Register instance, Register result,
   4142                          Register numElements, Register allocSite,
   4143                          Register temp, size_t offsetOfTypeDefData,
   4144                          Label* fail, uint32_t elemSize, bool zeroFields);
   4145  // Allocates a wasm array with a fixed number of elements.
   4146  //
   4147  // `typeDefData` will be preserved. `instance` and `result` may be the same
   4148  // register, in which case `instance` will be clobbered.
   4149  void wasmNewArrayObjectFixed(Register instance, Register result,
   4150                               Register allocSite, Register temp1,
   4151                               Register temp2, size_t offsetOfTypeDefData,
   4152                               Label* fail, uint32_t numElements,
   4153                               uint32_t storageBytes, bool zeroFields);
   4154 
   4155  // This function handles nursery allocations for wasm. For JS, see
   4156  // MacroAssembler::bumpPointerAllocate.
   4157  //
   4158  // `typeDefData` will be preserved. `instance` and `result` may be the same
   4159  // register, in which case `instance` will be clobbered.
   4160  //
   4161  // See also the dynamically-sized version,
   4162  // MacroAssembler::wasmBumpPointerAllocateDynamic.
   4163  void wasmBumpPointerAllocate(Register instance, Register result,
   4164                               Register allocSite, Register temp1, Label* fail,
   4165                               uint32_t size);
   4166  // This function handles nursery allocations for wasm of dynamic size. For
   4167  // fixed-size allocations, see MacroAssembler::wasmBumpPointerAllocate.
   4168  //
   4169  // `typeDefData` and `size` will be preserved. `instance` and `result` may be
   4170  // the same register, in which case `instance` will be clobbered.
   4171  void wasmBumpPointerAllocateDynamic(Register instance, Register result,
   4172                                      Register allocSite, Register size,
   4173                                      Register temp1, Label* fail);
   4174 
   4175  // Compute ptr += (indexTemp32 << shift) where shift can be any value < 32.
   4176  // May destroy indexTemp32.  The value of indexTemp32 must be positive, and it
   4177  // is implementation-defined what happens if bits are lost or the value
   4178  // becomes negative through the shift.  On 64-bit systems, the high 32 bits of
   4179  // indexTemp32 must be zero, not garbage.
   4180  void shiftIndex32AndAdd(Register indexTemp32, int shift,
   4181                          Register pointer) PER_SHARED_ARCH;
   4182 
   4183  // The System ABI frequently states that the high bits of a 64-bit register
   4184  // that holds a 32-bit return value are unpredictable, and C++ compilers will
   4185  // indeed generate code that leaves garbage in the upper bits.
   4186  //
   4187  // Adjust the contents of the 64-bit register `r` to conform to our internal
   4188  // convention, which requires predictable high bits.  In practice, this means
   4189  // that the 32-bit value will be zero-extended or sign-extended to 64 bits as
   4190  // appropriate for the platform.
   4191  void widenInt32(Register r) DEFINED_ON(arm64, x64, mips64, loong64, riscv64);
   4192 
   4193  // As enterFakeExitFrame(), but using register conventions appropriate for
   4194  // wasm stubs.
   4195  void enterFakeExitFrameForWasm(Register cxreg, Register scratch,
   4196                                 ExitFrameType type) PER_SHARED_ARCH;
   4197 
   4198 public:
   4199  // ========================================================================
   4200  // Barrier functions.
   4201 
   4202  void emitPreBarrierFastPath(MIRType type, Register temp1, Register temp2,
   4203                              Register temp3, Label* noBarrier);
   4204  void emitValueReadBarrierFastPath(ValueOperand value, Register cell,
   4205                                    Register temp1, Register temp2,
   4206                                    Register temp3, Register temp4,
   4207                                    Label* barrier);
   4208 
   4209 private:
   4210  void loadMarkBits(Register cell, Register chunk, Register markWord,
   4211                    Register bitIndex, Register temp, gc::ColorBit color);
   4212 
   4213 public:
   4214  // ========================================================================
   4215  // Clamping functions.
   4216 
   4217  inline void clampIntToUint8(Register reg) PER_SHARED_ARCH;
   4218 
   4219 public:
   4220  // ========================================================================
   4221  // Primitive atomic operations.
   4222  //
   4223  // If the access is from JS and the eventual destination of the result is a
   4224  // js::Value, it's probably best to use the JS-specific versions of these,
   4225  // see further below.
   4226  //
   4227  // Temp registers must be defined unless otherwise noted in the per-function
   4228  // constraints.
   4229 
   4230  // 8-bit, 16-bit, and 32-bit wide operations.
   4231  //
   4232  // The 8-bit and 16-bit operations zero-extend or sign-extend the result to
   4233  // 32 bits, according to `type`. On 64-bit systems, the upper 32 bits of the
   4234  // result will be zero on some platforms (eg, on x64) and will be the sign
   4235  // extension of the lower bits on other platforms (eg, MIPS).
   4236 
   4237  // CompareExchange with memory.  Return the value that was in memory,
   4238  // whether we wrote or not.
   4239  //
   4240  // x86-shared: `output` must be eax.
   4241  // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
   4242  // and 16-bit wide operations.
   4243 
   4244  void compareExchange(Scalar::Type type, Synchronization sync,
   4245                       const Address& mem, Register expected,
   4246                       Register replacement, Register output)
   4247      DEFINED_ON(arm, arm64, x86_shared);
   4248 
   4249  void compareExchange(Scalar::Type type, Synchronization sync,
   4250                       const BaseIndex& mem, Register expected,
   4251                       Register replacement, Register output)
   4252      DEFINED_ON(arm, arm64, x86_shared);
   4253 
   4254  void compareExchange(Scalar::Type type, Synchronization sync,
   4255                       const Address& mem, Register expected,
   4256                       Register replacement, Register valueTemp,
   4257                       Register offsetTemp, Register maskTemp, Register output)
   4258      DEFINED_ON(mips64, loong64, riscv64);
   4259 
   4260  void compareExchange(Scalar::Type type, Synchronization sync,
   4261                       const BaseIndex& mem, Register expected,
   4262                       Register replacement, Register valueTemp,
   4263                       Register offsetTemp, Register maskTemp, Register output)
   4264      DEFINED_ON(mips64, loong64, riscv64);
   4265 
   4266  // x86: `expected` and `output` must be edx:eax; `replacement` is ecx:ebx.
   4267  // x64: `output` must be rax.
   4268  // ARM: Registers must be distinct; `replacement` and `output` must be
   4269  // (even,odd) pairs.
   4270 
   4271  void compareExchange64(Synchronization sync, const Address& mem,
   4272                         Register64 expected, Register64 replacement,
   4273                         Register64 output)
   4274      DEFINED_ON(arm, arm64, x64, x86, mips64, loong64, riscv64);
   4275 
   4276  void compareExchange64(Synchronization sync, const BaseIndex& mem,
   4277                         Register64 expected, Register64 replacement,
   4278                         Register64 output)
   4279      DEFINED_ON(arm, arm64, x64, x86, mips64, loong64, riscv64);
   4280 
   4281  // Exchange with memory.  Return the value initially in memory.
   4282  // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
   4283  // and 16-bit wide operations.
   4284 
   4285  void atomicExchange(Scalar::Type type, Synchronization sync,
   4286                      const Address& mem, Register value, Register output)
   4287      DEFINED_ON(arm, arm64, x86_shared);
   4288 
   4289  void atomicExchange(Scalar::Type type, Synchronization sync,
   4290                      const BaseIndex& mem, Register value, Register output)
   4291      DEFINED_ON(arm, arm64, x86_shared);
   4292 
   4293  void atomicExchange(Scalar::Type type, Synchronization sync,
   4294                      const Address& mem, Register value, Register valueTemp,
   4295                      Register offsetTemp, Register maskTemp, Register output)
   4296      DEFINED_ON(mips64, loong64, riscv64);
   4297 
   4298  void atomicExchange(Scalar::Type type, Synchronization sync,
   4299                      const BaseIndex& mem, Register value, Register valueTemp,
   4300                      Register offsetTemp, Register maskTemp, Register output)
   4301      DEFINED_ON(mips64, loong64, riscv64);
   4302 
   4303  // x86: `value` must be ecx:ebx; `output` must be edx:eax.
   4304  // ARM: `value` and `output` must be distinct and (even,odd) pairs.
   4305  // ARM64: `value` and `output` must be distinct.
   4306 
   4307  void atomicExchange64(Synchronization sync, const Address& mem,
   4308                        Register64 value, Register64 output)
   4309      DEFINED_ON(arm, arm64, x64, x86, mips64, loong64, riscv64);
   4310 
   4311  void atomicExchange64(Synchronization sync, const BaseIndex& mem,
   4312                        Register64 value, Register64 output)
   4313      DEFINED_ON(arm, arm64, x64, x86, mips64, loong64, riscv64);
   4314 
   4315  // Read-modify-write with memory.  Return the value in memory before the
   4316  // operation.
   4317  //
   4318  // x86-shared:
   4319  //   For 8-bit operations, `value` and `output` must have a byte subregister.
   4320  //   For Add and Sub, `temp` must be invalid.
   4321  //   For And, Or, and Xor, `output` must be eax and `temp` must have a byte
   4322  //   subregister.
   4323  //
   4324  // ARM: Registers `value` and `output` must differ.
   4325  // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
   4326  // and 16-bit wide operations; `value` and `output` must differ.
   4327 
   4328  void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
   4329                     Register value, const Address& mem, Register temp,
   4330                     Register output) DEFINED_ON(arm, arm64, x86_shared);
   4331 
   4332  void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
   4333                     Imm32 value, const Address& mem, Register temp,
   4334                     Register output) DEFINED_ON(x86_shared);
   4335 
   4336  void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
   4337                     Register value, const BaseIndex& mem, Register temp,
   4338                     Register output) DEFINED_ON(arm, arm64, x86_shared);
   4339 
   4340  void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
   4341                     Imm32 value, const BaseIndex& mem, Register temp,
   4342                     Register output) DEFINED_ON(x86_shared);
   4343 
   4344  void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
   4345                     Register value, const Address& mem, Register valueTemp,
   4346                     Register offsetTemp, Register maskTemp, Register output)
   4347      DEFINED_ON(mips64, loong64, riscv64);
   4348 
   4349  void atomicFetchOp(Scalar::Type type, Synchronization sync, AtomicOp op,
   4350                     Register value, const BaseIndex& mem, Register valueTemp,
   4351                     Register offsetTemp, Register maskTemp, Register output)
   4352      DEFINED_ON(mips64, loong64, riscv64);
   4353 
   4354  // x86:
   4355  //   `temp` must be ecx:ebx; `output` must be edx:eax.
   4356  // x64:
   4357  //   For Add and Sub, `temp` is ignored.
   4358  //   For And, Or, and Xor, `output` must be rax.
   4359  // ARM:
   4360  //   `temp` and `output` must be (even,odd) pairs and distinct from `value`.
   4361  // ARM64:
   4362  //   Registers `value`, `temp`, and `output` must all differ.
   4363 
   4364  void atomicFetchOp64(Synchronization sync, AtomicOp op, Register64 value,
   4365                       const Address& mem, Register64 temp, Register64 output)
   4366      DEFINED_ON(arm, arm64, x64, mips64, loong64, riscv64);
   4367 
   4368  void atomicFetchOp64(Synchronization sync, AtomicOp op, const Address& value,
   4369                       const Address& mem, Register64 temp, Register64 output)
   4370      DEFINED_ON(x86);
   4371 
   4372  void atomicFetchOp64(Synchronization sync, AtomicOp op, Register64 value,
   4373                       const BaseIndex& mem, Register64 temp, Register64 output)
   4374      DEFINED_ON(arm, arm64, x64, mips64, loong64, riscv64);
   4375 
   4376  void atomicFetchOp64(Synchronization sync, AtomicOp op, const Address& value,
   4377                       const BaseIndex& mem, Register64 temp, Register64 output)
   4378      DEFINED_ON(x86);
   4379 
   4380  // x64:
   4381  //   `value` can be any register.
   4382  // ARM:
   4383  //   `temp` must be an (even,odd) pair and distinct from `value`.
   4384  // ARM64:
   4385  //   Registers `value` and `temp` must differ.
   4386 
   4387  void atomicEffectOp64(Synchronization sync, AtomicOp op, Register64 value,
   4388                        const Address& mem) DEFINED_ON(x64);
   4389 
   4390  void atomicEffectOp64(Synchronization sync, AtomicOp op, Register64 value,
   4391                        const Address& mem, Register64 temp)
   4392      DEFINED_ON(arm, arm64, mips64, loong64, riscv64);
   4393 
   4394  void atomicEffectOp64(Synchronization sync, AtomicOp op, Register64 value,
   4395                        const BaseIndex& mem) DEFINED_ON(x64);
   4396 
   4397  void atomicEffectOp64(Synchronization sync, AtomicOp op, Register64 value,
   4398                        const BaseIndex& mem, Register64 temp)
   4399      DEFINED_ON(arm, arm64, mips64, loong64, riscv64);
   4400 
   4401  // 64-bit atomic load. On 64-bit systems, use regular load with
   4402  // Synchronization::Load, not this method.
   4403  //
   4404  // x86: `temp` must be ecx:ebx; `output` must be edx:eax.
   4405  // ARM: `output` must be (even,odd) pair.
   4406 
   4407  void atomicLoad64(Synchronization sync, const Address& mem, Register64 temp,
   4408                    Register64 output) DEFINED_ON(x86);
   4409 
   4410  void atomicLoad64(Synchronization sync, const BaseIndex& mem, Register64 temp,
   4411                    Register64 output) DEFINED_ON(x86);
   4412 
   4413  void atomicLoad64(Synchronization sync, const Address& mem, Register64 output)
   4414      DEFINED_ON(arm);
   4415 
   4416  void atomicLoad64(Synchronization sync, const BaseIndex& mem,
   4417                    Register64 output) DEFINED_ON(arm);
   4418 
   4419  // 64-bit atomic store. On 64-bit systems, use regular store with
   4420  // Synchronization::Store, not this method.
   4421  //
   4422  // x86: `value` must be ecx:ebx; `temp` must be edx:eax.
   4423  // ARM: `value` and `temp` must be (even,odd) pairs.
   4424 
   4425  void atomicStore64(Synchronization sync, const Address& mem, Register64 value,
   4426                     Register64 temp) DEFINED_ON(x86, arm);
   4427 
   4428  void atomicStore64(Synchronization sync, const BaseIndex& mem,
   4429                     Register64 value, Register64 temp) DEFINED_ON(x86, arm);
   4430 
   4431  // ========================================================================
   4432  // Wasm atomic operations.
   4433  //
   4434  // Constraints, when omitted, are exactly as for the primitive operations
   4435  // above.
   4436 
   4437  void wasmCompareExchange(const wasm::MemoryAccessDesc& access,
   4438                           const Address& mem, Register expected,
   4439                           Register replacement, Register output)
   4440      DEFINED_ON(arm, arm64, x86_shared);
   4441 
   4442  void wasmCompareExchange(const wasm::MemoryAccessDesc& access,
   4443                           const BaseIndex& mem, Register expected,
   4444                           Register replacement, Register output)
   4445      DEFINED_ON(arm, arm64, x86_shared);
   4446 
   4447  void wasmCompareExchange(const wasm::MemoryAccessDesc& access,
   4448                           const Address& mem, Register expected,
   4449                           Register replacement, Register valueTemp,
   4450                           Register offsetTemp, Register maskTemp,
   4451                           Register output)
   4452      DEFINED_ON(mips64, loong64, riscv64);
   4453 
   4454  void wasmCompareExchange(const wasm::MemoryAccessDesc& access,
   4455                           const BaseIndex& mem, Register expected,
   4456                           Register replacement, Register valueTemp,
   4457                           Register offsetTemp, Register maskTemp,
   4458                           Register output)
   4459      DEFINED_ON(mips64, loong64, riscv64);
   4460 
   4461  void wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
   4462                          const Address& mem, Register value, Register output)
   4463      DEFINED_ON(arm, arm64, x86_shared);
   4464 
   4465  void wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
   4466                          const BaseIndex& mem, Register value, Register output)
   4467      DEFINED_ON(arm, arm64, x86_shared);
   4468 
   4469  void wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
   4470                          const Address& mem, Register value,
   4471                          Register valueTemp, Register offsetTemp,
   4472                          Register maskTemp, Register output)
   4473      DEFINED_ON(mips64, loong64, riscv64);
   4474 
   4475  void wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
   4476                          const BaseIndex& mem, Register value,
   4477                          Register valueTemp, Register offsetTemp,
   4478                          Register maskTemp, Register output)
   4479      DEFINED_ON(mips64, loong64, riscv64);
   4480 
   4481  void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4482                         Register value, const Address& mem, Register temp,
   4483                         Register output) DEFINED_ON(arm, arm64, x86_shared);
   4484 
   4485  void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4486                         Imm32 value, const Address& mem, Register temp,
   4487                         Register output) DEFINED_ON(x86_shared);
   4488 
   4489  void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4490                         Register value, const BaseIndex& mem, Register temp,
   4491                         Register output) DEFINED_ON(arm, arm64, x86_shared);
   4492 
   4493  void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4494                         Imm32 value, const BaseIndex& mem, Register temp,
   4495                         Register output) DEFINED_ON(x86_shared);
   4496 
   4497  void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4498                         Register value, const Address& mem, Register valueTemp,
   4499                         Register offsetTemp, Register maskTemp,
   4500                         Register output) DEFINED_ON(mips64, loong64, riscv64);
   4501 
   4502  void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4503                         Register value, const BaseIndex& mem,
   4504                         Register valueTemp, Register offsetTemp,
   4505                         Register maskTemp, Register output)
   4506      DEFINED_ON(mips64, loong64, riscv64);
   4507 
   4508  // Read-modify-write with memory.  Return no value.
   4509  //
   4510  // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
   4511  // and 16-bit wide operations.
   4512 
   4513  void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4514                          Register value, const Address& mem, Register temp)
   4515      DEFINED_ON(arm, arm64, x86_shared);
   4516 
   4517  void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4518                          Imm32 value, const Address& mem, Register temp)
   4519      DEFINED_ON(x86_shared);
   4520 
   4521  void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4522                          Register value, const BaseIndex& mem, Register temp)
   4523      DEFINED_ON(arm, arm64, x86_shared);
   4524 
   4525  void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4526                          Imm32 value, const BaseIndex& mem, Register temp)
   4527      DEFINED_ON(x86_shared);
   4528 
   4529  void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4530                          Register value, const Address& mem,
   4531                          Register valueTemp, Register offsetTemp,
   4532                          Register maskTemp)
   4533      DEFINED_ON(mips64, loong64, riscv64);
   4534 
   4535  void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4536                          Register value, const BaseIndex& mem,
   4537                          Register valueTemp, Register offsetTemp,
   4538                          Register maskTemp)
   4539      DEFINED_ON(mips64, loong64, riscv64);
   4540 
   4541  // 64-bit wide operations.
   4542 
   4543  // 64-bit atomic load.  On 64-bit systems, use regular wasm load with
   4544  // Synchronization::Load, not this method.
   4545  //
   4546  // x86: `temp` must be ecx:ebx; `output` must be edx:eax.
   4547  // ARM: `temp` should be invalid; `output` must be (even,odd) pair.
   4548 
   4549  void wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
   4550                        const Address& mem, Register64 temp, Register64 output)
   4551      DEFINED_ON(arm, x86, wasm32);
   4552 
   4553  void wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
   4554                        const BaseIndex& mem, Register64 temp,
   4555                        Register64 output) DEFINED_ON(arm, x86, wasm32);
   4556 
   4557  // x86: `expected` must be the same as `output`, and must be edx:eax.
   4558  // x86: `replacement` must be ecx:ebx.
   4559  // x64: `output` must be rax.
   4560  // ARM: Registers must be distinct; `replacement` and `output` must be
   4561  // (even,odd) pairs.
   4562  // ARM64: The base register in `mem` must not overlap `output`.
   4563  // MIPS: Registers must be distinct.
   4564 
   4565  void wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
   4566                             const Address& mem, Register64 expected,
   4567                             Register64 replacement,
   4568                             Register64 output) PER_ARCH;
   4569 
   4570  void wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
   4571                             const BaseIndex& mem, Register64 expected,
   4572                             Register64 replacement,
   4573                             Register64 output) PER_ARCH;
   4574 
   4575  // x86: `value` must be ecx:ebx; `output` must be edx:eax.
   4576  // ARM: Registers must be distinct; `value` and `output` must be (even,odd)
   4577  // pairs.
   4578  // MIPS: Registers must be distinct.
   4579 
   4580  void wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
   4581                            const Address& mem, Register64 value,
   4582                            Register64 output) PER_ARCH;
   4583 
   4584  void wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
   4585                            const BaseIndex& mem, Register64 value,
   4586                            Register64 output) PER_ARCH;
   4587 
   4588  // x86: `output` must be edx:eax, `temp` must be ecx:ebx.
   4589  // x64: For And, Or, and Xor `output` must be rax.
   4590  // ARM: Registers must be distinct; `temp` and `output` must be (even,odd)
   4591  // pairs.
   4592  // MIPS: Registers must be distinct.
   4593 
   4594  void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4595                           Register64 value, const Address& mem,
   4596                           Register64 temp, Register64 output)
   4597      DEFINED_ON(arm, arm64, mips64, loong64, riscv64, x64);
   4598 
   4599  void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4600                           Register64 value, const BaseIndex& mem,
   4601                           Register64 temp, Register64 output)
   4602      DEFINED_ON(arm, arm64, mips64, loong64, riscv64, x64);
   4603 
   4604  void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4605                           const Address& value, const Address& mem,
   4606                           Register64 temp, Register64 output) DEFINED_ON(x86);
   4607 
   4608  void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4609                           const Address& value, const BaseIndex& mem,
   4610                           Register64 temp, Register64 output) DEFINED_ON(x86);
   4611 
   4612  // Here `value` can be any register.
   4613 
   4614  void wasmAtomicEffectOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4615                            Register64 value, const BaseIndex& mem)
   4616      DEFINED_ON(x64);
   4617 
   4618  void wasmAtomicEffectOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
   4619                            Register64 value, const BaseIndex& mem,
   4620                            Register64 temp) DEFINED_ON(arm64);
   4621 
   4622  // ========================================================================
   4623  // JS atomic operations.
   4624  //
   4625  // Here the arrayType must be a type that is valid for JS.  As of 2017 that
   4626  // is an 8-bit, 16-bit, or 32-bit integer type.
   4627  //
   4628  // If arrayType is Scalar::Uint32 then:
   4629  //
   4630  //   - `output` must be a float register
   4631  //   - if the operation takes one temp register then `temp` must be defined
   4632  //   - if the operation takes two temp registers then `temp2` must be defined.
   4633  //
   4634  // Otherwise `output` must be a GPR and `temp`/`temp2` should be InvalidReg.
   4635  // (`temp1` must always be valid.)
   4636  //
   4637  // For additional register constraints, see the primitive 32-bit operations
   4638  // and/or wasm operations above.
   4639 
   4640  void compareExchangeJS(Scalar::Type arrayType, Synchronization sync,
   4641                         const Address& mem, Register expected,
   4642                         Register replacement, Register temp,
   4643                         AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
   4644 
   4645  void compareExchangeJS(Scalar::Type arrayType, Synchronization sync,
   4646                         const BaseIndex& mem, Register expected,
   4647                         Register replacement, Register temp,
   4648                         AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
   4649 
   4650  void compareExchangeJS(Scalar::Type arrayType, Synchronization sync,
   4651                         const Address& mem, Register expected,
   4652                         Register replacement, Register valueTemp,
   4653                         Register offsetTemp, Register maskTemp, Register temp,
   4654                         AnyRegister output)
   4655      DEFINED_ON(mips64, loong64, riscv64);
   4656 
   4657  void compareExchangeJS(Scalar::Type arrayType, Synchronization sync,
   4658                         const BaseIndex& mem, Register expected,
   4659                         Register replacement, Register valueTemp,
   4660                         Register offsetTemp, Register maskTemp, Register temp,
   4661                         AnyRegister output)
   4662      DEFINED_ON(mips64, loong64, riscv64);
   4663 
   4664  void atomicExchangeJS(Scalar::Type arrayType, Synchronization sync,
   4665                        const Address& mem, Register value, Register temp,
   4666                        AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
   4667 
   4668  void atomicExchangeJS(Scalar::Type arrayType, Synchronization sync,
   4669                        const BaseIndex& mem, Register value, Register temp,
   4670                        AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
   4671 
   4672  void atomicExchangeJS(Scalar::Type arrayType, Synchronization sync,
   4673                        const Address& mem, Register value, Register valueTemp,
   4674                        Register offsetTemp, Register maskTemp, Register temp,
   4675                        AnyRegister output)
   4676      DEFINED_ON(mips64, loong64, riscv64);
   4677 
   4678  void atomicExchangeJS(Scalar::Type arrayType, Synchronization sync,
   4679                        const BaseIndex& mem, Register value,
   4680                        Register valueTemp, Register offsetTemp,
   4681                        Register maskTemp, Register temp, AnyRegister output)
   4682      DEFINED_ON(mips64, loong64, riscv64);
   4683 
   4684  void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
   4685                       AtomicOp op, Register value, const Address& mem,
   4686                       Register temp1, Register temp2, AnyRegister output)
   4687      DEFINED_ON(arm, arm64, x86_shared);
   4688 
   4689  void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
   4690                       AtomicOp op, Register value, const BaseIndex& mem,
   4691                       Register temp1, Register temp2, AnyRegister output)
   4692      DEFINED_ON(arm, arm64, x86_shared);
   4693 
   4694  void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
   4695                       AtomicOp op, Imm32 value, const Address& mem,
   4696                       Register temp1, Register temp2, AnyRegister output)
   4697      DEFINED_ON(x86_shared);
   4698 
   4699  void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
   4700                       AtomicOp op, Imm32 value, const BaseIndex& mem,
   4701                       Register temp1, Register temp2, AnyRegister output)
   4702      DEFINED_ON(x86_shared);
   4703 
   4704  void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
   4705                       AtomicOp op, Register value, const Address& mem,
   4706                       Register valueTemp, Register offsetTemp,
   4707                       Register maskTemp, Register temp, AnyRegister output)
   4708      DEFINED_ON(mips64, loong64, riscv64);
   4709 
   4710  void atomicFetchOpJS(Scalar::Type arrayType, Synchronization sync,
   4711                       AtomicOp op, Register value, const BaseIndex& mem,
   4712                       Register valueTemp, Register offsetTemp,
   4713                       Register maskTemp, Register temp, AnyRegister output)
   4714      DEFINED_ON(mips64, loong64, riscv64);
   4715 
   4716  void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
   4717                        AtomicOp op, Register value, const Address& mem,
   4718                        Register temp) DEFINED_ON(arm, arm64, x86_shared);
   4719 
   4720  void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
   4721                        AtomicOp op, Register value, const BaseIndex& mem,
   4722                        Register temp) DEFINED_ON(arm, arm64, x86_shared);
   4723 
   4724  void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
   4725                        AtomicOp op, Imm32 value, const Address& mem,
   4726                        Register temp) DEFINED_ON(x86_shared);
   4727 
   4728  void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
   4729                        AtomicOp op, Imm32 value, const BaseIndex& mem,
   4730                        Register temp) DEFINED_ON(x86_shared);
   4731 
   4732  void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
   4733                        AtomicOp op, Register value, const Address& mem,
   4734                        Register valueTemp, Register offsetTemp,
   4735                        Register maskTemp) DEFINED_ON(mips64, loong64, riscv64);
   4736 
   4737  void atomicEffectOpJS(Scalar::Type arrayType, Synchronization sync,
   4738                        AtomicOp op, Register value, const BaseIndex& mem,
   4739                        Register valueTemp, Register offsetTemp,
   4740                        Register maskTemp) DEFINED_ON(mips64, loong64, riscv64);
   4741 
   4742  void atomicIsLockFreeJS(Register value, Register output);
   4743 
   4744  void atomicPause() PER_SHARED_ARCH;
   4745 
   4746  // ========================================================================
   4747  // Spectre Mitigations.
   4748  //
   4749  // Spectre attacks are side-channel attacks based on cache pollution or
   4750  // slow-execution of some instructions. We have multiple spectre mitigations
   4751  // possible:
   4752  //
   4753  //   - Stop speculative executions, with memory barriers. Memory barriers
   4754  //     force all branches depending on loads to be resolved, and thus
   4755  //     resolve all miss-speculated paths.
   4756  //
   4757  //   - Use conditional move instructions. Some CPUs have a branch predictor,
   4758  //     and not a flag predictor. In such cases, using a conditional move
   4759  //     instruction to zero some pointer/index is enough to add a
   4760  //     data-dependency which prevents any futher executions until the load is
   4761  //     resolved.
   4762 
   4763  void spectreMaskIndex32(Register index, Register length, Register output);
   4764  void spectreMaskIndex32(Register index, const Address& length,
   4765                          Register output);
   4766  void spectreMaskIndexPtr(Register index, Register length, Register output);
   4767  void spectreMaskIndexPtr(Register index, const Address& length,
   4768                           Register output);
   4769 
   4770  // The length must be a power of two. Performs a bounds check and Spectre
   4771  // index masking.
   4772  void boundsCheck32PowerOfTwo(Register index, uint32_t length, Label* failure);
   4773 
   4774  void speculationBarrier() PER_SHARED_ARCH;
   4775 
   4776  //}}} check_macroassembler_decl_style
   4777 public:
   4778  // Unsafe here means the caller is responsible for Spectre mitigations if
   4779  // needed. Prefer branchTestObjClass or one of the other masm helpers!
   4780  inline void loadObjClassUnsafe(Register obj, Register dest);
   4781  inline void loadObjShapeUnsafe(Register obj, Register dest);
   4782 
   4783  template <typename EmitPreBarrier>
   4784  inline void storeObjShape(Register shape, Register obj,
   4785                            EmitPreBarrier emitPreBarrier);
   4786  template <typename EmitPreBarrier>
   4787  inline void storeObjShape(Shape* shape, Register obj,
   4788                            EmitPreBarrier emitPreBarrier);
   4789 
   4790  inline void loadObjProto(Register obj, Register dest);
   4791 
   4792  inline void loadStringLength(Register str, Register dest);
   4793 
   4794  void loadStringChars(Register str, Register dest, CharEncoding encoding);
   4795 
   4796  void loadNonInlineStringChars(Register str, Register dest,
   4797                                CharEncoding encoding);
   4798  void loadNonInlineStringCharsForStore(Register str, Register dest);
   4799  void storeNonInlineStringChars(Register chars, Register str);
   4800 
   4801  void loadInlineStringChars(Register str, Register dest,
   4802                             CharEncoding encoding);
   4803  void loadInlineStringCharsForStore(Register str, Register dest);
   4804 
   4805 private:
   4806  enum class CharKind { CharCode, CodePoint };
   4807 
   4808  void branchIfMaybeSplitSurrogatePair(Register leftChild, Register index,
   4809                                       Register scratch, Label* maybeSplit,
   4810                                       Label* notSplit);
   4811 
   4812  void loadRopeChild(CharKind kind, Register str, Register index,
   4813                     Register output, Register maybeScratch, Label* isLinear,
   4814                     Label* splitSurrogate);
   4815 
   4816  void branchIfCanLoadStringChar(CharKind kind, Register str, Register index,
   4817                                 Register scratch, Register maybeScratch,
   4818                                 Label* label);
   4819  void branchIfNotCanLoadStringChar(CharKind kind, Register str, Register index,
   4820                                    Register scratch, Register maybeScratch,
   4821                                    Label* label);
   4822 
   4823  void loadStringChar(CharKind kind, Register str, Register index,
   4824                      Register output, Register scratch1, Register scratch2,
   4825                      Label* fail);
   4826 
   4827 public:
   4828  void branchIfCanLoadStringChar(Register str, Register index, Register scratch,
   4829                                 Label* label) {
   4830    branchIfCanLoadStringChar(CharKind::CharCode, str, index, scratch,
   4831                              InvalidReg, label);
   4832  }
   4833  void branchIfNotCanLoadStringChar(Register str, Register index,
   4834                                    Register scratch, Label* label) {
   4835    branchIfNotCanLoadStringChar(CharKind::CharCode, str, index, scratch,
   4836                                 InvalidReg, label);
   4837  }
   4838 
   4839  void branchIfCanLoadStringCodePoint(Register str, Register index,
   4840                                      Register scratch1, Register scratch2,
   4841                                      Label* label) {
   4842    branchIfCanLoadStringChar(CharKind::CodePoint, str, index, scratch1,
   4843                              scratch2, label);
   4844  }
   4845  void branchIfNotCanLoadStringCodePoint(Register str, Register index,
   4846                                         Register scratch1, Register scratch2,
   4847                                         Label* label) {
   4848    branchIfNotCanLoadStringChar(CharKind::CodePoint, str, index, scratch1,
   4849                                 scratch2, label);
   4850  }
   4851 
   4852  void loadStringChar(Register str, Register index, Register output,
   4853                      Register scratch1, Register scratch2, Label* fail) {
   4854    loadStringChar(CharKind::CharCode, str, index, output, scratch1, scratch2,
   4855                   fail);
   4856  }
   4857 
   4858  void loadStringChar(Register str, int32_t index, Register output,
   4859                      Register scratch1, Register scratch2, Label* fail);
   4860 
   4861  void loadStringCodePoint(Register str, Register index, Register output,
   4862                           Register scratch1, Register scratch2, Label* fail) {
   4863    loadStringChar(CharKind::CodePoint, str, index, output, scratch1, scratch2,
   4864                   fail);
   4865  }
   4866 
   4867  void loadRopeLeftChild(Register str, Register dest);
   4868  void loadRopeRightChild(Register str, Register dest);
   4869  void storeRopeChildren(Register left, Register right, Register str);
   4870 
   4871  void loadDependentStringBase(Register str, Register dest);
   4872  void storeDependentStringBase(Register base, Register str);
   4873 
   4874  void loadStringIndexValue(Register str, Register dest, Label* fail);
   4875 
   4876  /**
   4877   * Store the character in |src| to |dest|.
   4878   */
   4879  template <typename T>
   4880  void storeChar(const T& src, Address dest, CharEncoding encoding) {
   4881    if (encoding == CharEncoding::Latin1) {
   4882      store8(src, dest);
   4883    } else {
   4884      store16(src, dest);
   4885    }
   4886  }
   4887 
   4888  /**
   4889   * Load the character at |src| into |dest|.
   4890   */
   4891  template <typename T>
   4892  void loadChar(const T& src, Register dest, CharEncoding encoding) {
   4893    if (encoding == CharEncoding::Latin1) {
   4894      load8ZeroExtend(src, dest);
   4895    } else {
   4896      load16ZeroExtend(src, dest);
   4897    }
   4898  }
   4899 
   4900  /**
   4901   * Load the character at |chars[index + offset]| into |dest|. The optional
   4902   * offset argument is not scaled to the character encoding.
   4903   */
   4904  void loadChar(Register chars, Register index, Register dest,
   4905                CharEncoding encoding, int32_t offset = 0);
   4906 
   4907  /**
   4908   * Add |index| to |chars| so that |chars| now points at |chars[index]|.
   4909   */
   4910  void addToCharPtr(Register chars, Register index, CharEncoding encoding);
   4911 
   4912  /**
   4913   * Branch if |src| is not a lead surrogate character.
   4914   */
   4915  void branchIfNotLeadSurrogate(Register src, Label* label);
   4916 
   4917 private:
   4918  enum class SurrogateChar { Lead, Trail };
   4919  void branchSurrogate(Assembler::Condition cond, Register src,
   4920                       Register scratch, Label* label,
   4921                       SurrogateChar surrogateChar);
   4922 
   4923 public:
   4924  /**
   4925   * Branch if |src| is a lead surrogate character.
   4926   */
   4927  void branchIfLeadSurrogate(Register src, Register scratch, Label* label) {
   4928    branchSurrogate(Assembler::Equal, src, scratch, label, SurrogateChar::Lead);
   4929  }
   4930 
   4931  /**
   4932   * Branch if |src| is not a lead surrogate character.
   4933   */
   4934  void branchIfNotLeadSurrogate(Register src, Register scratch, Label* label) {
   4935    branchSurrogate(Assembler::NotEqual, src, scratch, label,
   4936                    SurrogateChar::Lead);
   4937  }
   4938 
   4939  /**
   4940   * Branch if |src| is not a trail surrogate character.
   4941   */
   4942  void branchIfNotTrailSurrogate(Register src, Register scratch, Label* label) {
   4943    branchSurrogate(Assembler::NotEqual, src, scratch, label,
   4944                    SurrogateChar::Trail);
   4945  }
   4946 
   4947 private:
   4948  void loadStringFromUnit(Register unit, Register dest,
   4949                          const StaticStrings& staticStrings);
   4950  void loadLengthTwoString(Register c1, Register c2, Register dest,
   4951                           const StaticStrings& staticStrings);
   4952 
   4953 public:
   4954  /**
   4955   * Lookup the length-one string from the static strings cache.
   4956   */
   4957  void lookupStaticString(Register ch, Register dest,
   4958                          const StaticStrings& staticStrings);
   4959 
   4960  /**
   4961   * Lookup the length-one string from the static strings cache. Jumps to |fail|
   4962   * when the string wasn't found in the strings cache.
   4963   */
   4964  void lookupStaticString(Register ch, Register dest,
   4965                          const StaticStrings& staticStrings, Label* fail);
   4966 
   4967  /**
   4968   * Lookup the length-two string from the static strings cache. Jumps to |fail|
   4969   * when the string wasn't found in the strings cache.
   4970   *
   4971   * Clobbers |ch1| and |ch2|.
   4972   */
   4973  void lookupStaticString(Register ch1, Register ch2, Register dest,
   4974                          const StaticStrings& staticStrings, Label* fail);
   4975 
   4976  /**
   4977   * Lookup the integer string from the static integer strings cache. Jumps to
   4978   * |fail| when the string wasn't found in the strings cache.
   4979   */
   4980  void lookupStaticIntString(Register integer, Register dest, Register scratch,
   4981                             const StaticStrings& staticStrings, Label* fail);
   4982  void lookupStaticIntString(Register integer, Register dest,
   4983                             const StaticStrings& staticStrings, Label* fail) {
   4984    lookupStaticIntString(integer, dest, dest, staticStrings, fail);
   4985  }
   4986 
   4987  /**
   4988   * Load the string representation of |input| in base |base|. Jumps to |fail|
   4989   * when the string representation needs to be allocated dynamically.
   4990   */
   4991  void loadInt32ToStringWithBase(Register input, Register base, Register dest,
   4992                                 Register scratch1, Register scratch2,
   4993                                 const StaticStrings& staticStrings,
   4994                                 const LiveRegisterSet& volatileRegs,
   4995                                 bool lowerCase, Label* fail);
   4996  void loadInt32ToStringWithBase(Register input, int32_t base, Register dest,
   4997                                 Register scratch1, Register scratch2,
   4998                                 const StaticStrings& staticStrings,
   4999                                 bool lowerCase, Label* fail);
   5000 
   5001  /**
   5002   * Load the BigInt digits from |bigInt| into |digits|.
   5003   */
   5004  void loadBigIntDigits(Register bigInt, Register digits);
   5005 
   5006  /**
   5007   * Load the first [u]int64 value from |bigInt| into |dest|.
   5008   */
   5009  void loadBigInt64(Register bigInt, Register64 dest);
   5010 
   5011  /**
   5012   * Load the first digit from |bigInt| into |dest|.
   5013   *
   5014   * Note: A BigInt digit is a pointer-sized value storing an unsigned number.
   5015   */
   5016  void loadBigIntDigit(Register bigInt, Register dest);
   5017 
   5018  /**
   5019   * Load the first digit from |bigInt| into |dest|. Jumps to |fail| when there
   5020   * is more than one BigInt digit.
   5021   *
   5022   * Note: A BigInt digit is a pointer-sized value storing an unsigned number.
   5023   */
   5024  void loadBigIntDigit(Register bigInt, Register dest, Label* fail);
   5025 
   5026  /**
   5027   * Load the number stored in |bigInt| into |dest|. Jumps to |fail| when the
   5028   * number can't be saved into a single pointer-sized register.
   5029   */
   5030  void loadBigIntPtr(Register bigInt, Register dest, Label* fail);
   5031 
   5032  /**
   5033   * Initialize a BigInt from |val|. Clobbers |val| when |temp| is invalid and
   5034   * |type == Scalar::BigInt64|!
   5035   */
   5036  void initializeBigInt64(Scalar::Type type, Register bigInt, Register64 val,
   5037                          Register64 temp = Register64::Invalid());
   5038 
   5039  /**
   5040   * Initialize a BigInt from the signed, pointer-sized register |val|.
   5041   * Clobbers |val|!
   5042   */
   5043  void initializeBigIntPtr(Register bigInt, Register val);
   5044 
   5045  /**
   5046   * Copy a BigInt. Jumps to |fail| on allocation failure or when the BigInt
   5047   * digits need to be heap allocated.
   5048   */
   5049  void copyBigIntWithInlineDigits(Register src, Register dest, Register temp,
   5050                                  gc::Heap initialHeap, Label* fail);
   5051 
   5052  /**
   5053   * Compare a BigInt and an Int32 value. Falls through to the false case.
   5054   */
   5055  void compareBigIntAndInt32(JSOp op, Register bigInt, Register int32,
   5056                             Register scratch1, Register scratch2,
   5057                             Label* ifTrue, Label* ifFalse);
   5058 
   5059  /**
   5060   * Compare a BigInt and an Int32 value. Falls through to the false case.
   5061   */
   5062  void compareBigIntAndInt32(JSOp op, Register bigInt, Imm32 int32,
   5063                             Register scratch, Label* ifTrue, Label* ifFalse);
   5064 
   5065  /**
   5066   * Compare two BigInts for equality. Falls through if both BigInts are equal
   5067   * to each other.
   5068   *
   5069   * - When we jump to |notSameLength|, |temp1| holds the length of the right
   5070   *   operand.
   5071   * - When we jump to |notSameDigit|, |temp2| points to the current digit of
   5072   *   the left operand and |temp4| holds the current digit of the right
   5073   *   operand.
   5074   */
   5075  void equalBigInts(Register left, Register right, Register temp1,
   5076                    Register temp2, Register temp3, Register temp4,
   5077                    Label* notSameSign, Label* notSameLength,
   5078                    Label* notSameDigit);
   5079 
   5080  void loadJSContext(Register dest);
   5081 
   5082  void loadGlobalObjectData(Register dest);
   5083 
   5084  void loadRealmFuse(RealmFuses::FuseIndex index, Register dest);
   5085 
   5086  void loadRuntimeFuse(RuntimeFuses::FuseIndex index, Register dest);
   5087 
   5088  void guardRuntimeFuse(RuntimeFuses::FuseIndex index, Label* fail);
   5089 
   5090  void switchToRealm(Register realm);
   5091  void switchToRealm(const void* realm, Register scratch);
   5092  void switchToObjectRealm(Register obj, Register scratch);
   5093  void switchToBaselineFrameRealm(Register scratch);
   5094  void switchToWasmInstanceRealm(Register scratch1, Register scratch2);
   5095  void debugAssertContextRealm(const void* realm, Register scratch);
   5096 
   5097  void guardObjectHasSameRealm(Register obj, Register scratch, Label* fail);
   5098 
   5099  template <typename ValueType>
   5100  void storeLocalAllocSite(ValueType value, Register scratch);
   5101 
   5102  void loadBaselineCompileQueue(Register dest);
   5103 
   5104  void loadJitActivation(Register dest);
   5105 
   5106  void guardSpecificAtom(Register str, JSOffThreadAtom* atom, Register scratch,
   5107                         const LiveRegisterSet& volatileRegs, Label* fail);
   5108 
   5109  void guardStringToInt32(Register str, Register output, Register scratch,
   5110                          LiveRegisterSet volatileRegs, Label* fail);
   5111 
   5112  template <typename T>
   5113  void loadTypedOrValue(const T& src, TypedOrValueRegister dest) {
   5114    if (dest.hasValue()) {
   5115      loadValue(src, dest.valueReg());
   5116    } else {
   5117      loadUnboxedValue(src, dest.type(), dest.typedReg());
   5118    }
   5119  }
   5120 
   5121  template <typename T>
   5122  void storeTypedOrValue(TypedOrValueRegister src, const T& dest) {
   5123    if (src.hasValue()) {
   5124      storeValue(src.valueReg(), dest);
   5125    } else if (IsFloatingPointType(src.type())) {
   5126      FloatRegister reg = src.typedReg().fpu();
   5127      if (src.type() == MIRType::Float32) {
   5128        ScratchDoubleScope fpscratch(*this);
   5129        convertFloat32ToDouble(reg, fpscratch);
   5130        boxDouble(fpscratch, dest);
   5131      } else {
   5132        boxDouble(reg, dest);
   5133      }
   5134    } else {
   5135      storeValue(ValueTypeFromMIRType(src.type()), src.typedReg().gpr(), dest);
   5136    }
   5137  }
   5138 
   5139  template <typename T>
   5140  void storeConstantOrRegister(const ConstantOrRegister& src, const T& dest) {
   5141    if (src.constant()) {
   5142      storeValue(src.value(), dest);
   5143    } else {
   5144      storeTypedOrValue(src.reg(), dest);
   5145    }
   5146  }
   5147 
   5148  void storeCallPointerResult(Register reg) {
   5149    if (reg != ReturnReg) {
   5150      mov(ReturnReg, reg);
   5151    }
   5152  }
   5153 
   5154  inline void storeCallBoolResult(Register reg);
   5155  inline void storeCallInt32Result(Register reg);
   5156 
   5157  void storeCallFloatResult(FloatRegister reg) {
   5158    if (reg.isSingle()) {
   5159      if (reg != ReturnFloat32Reg) {
   5160        moveFloat32(ReturnFloat32Reg, reg);
   5161      }
   5162    } else {
   5163      if (reg != ReturnDoubleReg) {
   5164        moveDouble(ReturnDoubleReg, reg);
   5165      }
   5166    }
   5167  }
   5168 
   5169  inline void storeCallResultValue(AnyRegister dest, JSValueType type);
   5170 
   5171  void storeCallResultValue(ValueOperand dest) {
   5172 #if defined(JS_NUNBOX32)
   5173    // reshuffle the return registers used for a call result to store into
   5174    // dest, using ReturnReg as a scratch register if necessary. This must
   5175    // only be called after returning from a call, at a point when the
   5176    // return register is not live. XXX would be better to allow wrappers
   5177    // to store the return value to different places.
   5178    if (dest.typeReg() == JSReturnReg_Data) {
   5179      if (dest.payloadReg() == JSReturnReg_Type) {
   5180        // swap the two registers.
   5181        mov(JSReturnReg_Type, ReturnReg);
   5182        mov(JSReturnReg_Data, JSReturnReg_Type);
   5183        mov(ReturnReg, JSReturnReg_Data);
   5184      } else {
   5185        mov(JSReturnReg_Data, dest.payloadReg());
   5186        mov(JSReturnReg_Type, dest.typeReg());
   5187      }
   5188    } else {
   5189      mov(JSReturnReg_Type, dest.typeReg());
   5190      mov(JSReturnReg_Data, dest.payloadReg());
   5191    }
   5192 #elif defined(JS_PUNBOX64)
   5193    if (dest.valueReg() != JSReturnReg) {
   5194      mov(JSReturnReg, dest.valueReg());
   5195    }
   5196 #else
   5197 #  error "Bad architecture"
   5198 #endif
   5199  }
   5200 
   5201  inline void storeCallResultValue(TypedOrValueRegister dest);
   5202 
   5203 private:
   5204  TrampolinePtr preBarrierTrampoline(MIRType type);
   5205 
   5206  template <typename T>
   5207  void unguardedCallPreBarrier(const T& address, MIRType type) {
   5208    Label done;
   5209    if (type == MIRType::Value) {
   5210      branchTestGCThing(Assembler::NotEqual, address, &done);
   5211    } else if (type == MIRType::Object || type == MIRType::String) {
   5212      branchPtr(Assembler::Equal, address, ImmWord(0), &done);
   5213    }
   5214 
   5215    Push(PreBarrierReg);
   5216    computeEffectiveAddress(address, PreBarrierReg);
   5217 
   5218    TrampolinePtr preBarrier = preBarrierTrampoline(type);
   5219 
   5220    call(preBarrier);
   5221    Pop(PreBarrierReg);
   5222    // On arm64, SP may be < PSP now (that's OK).
   5223    // eg testcase: tests/auto-regress/bug702915.js
   5224    bind(&done);
   5225  }
   5226 
   5227 public:
   5228  template <typename T>
   5229  void guardedCallPreBarrier(const T& address, MIRType type) {
   5230    Label done;
   5231    branchTestNeedsIncrementalBarrier(Assembler::Zero, &done);
   5232    unguardedCallPreBarrier(address, type);
   5233    bind(&done);
   5234  }
   5235 
   5236  // Like guardedCallPreBarrier, but unlike guardedCallPreBarrier this can be
   5237  // called from runtime-wide trampolines because it loads cx->zone (instead of
   5238  // baking in the current Zone) if JitContext::realm is nullptr.
   5239  template <typename T>
   5240  void guardedCallPreBarrierAnyZone(const T& address, MIRType type,
   5241                                    Register scratch) {
   5242    Label done;
   5243    branchTestNeedsIncrementalBarrierAnyZone(Assembler::Zero, &done, scratch);
   5244    unguardedCallPreBarrier(address, type);
   5245    bind(&done);
   5246  }
   5247 
   5248  enum class Uint32Mode { FailOnDouble, ForceDouble };
   5249 
   5250  void boxUint32(Register source, ValueOperand dest, Uint32Mode uint32Mode,
   5251                 Label* fail);
   5252 
   5253  static bool LoadRequiresCall(Scalar::Type type) {
   5254    return type == Scalar::Float16 && !MacroAssembler::SupportsFloat32To16();
   5255  }
   5256 
   5257  static bool StoreRequiresCall(Scalar::Type type) {
   5258    return type == Scalar::Float16 && !MacroAssembler::SupportsFloat32To16();
   5259  }
   5260 
   5261  template <typename T>
   5262  void loadFromTypedArray(Scalar::Type arrayType, const T& src,
   5263                          AnyRegister dest, Register temp1, Register temp2,
   5264                          Label* fail, LiveRegisterSet volatileLiveReg);
   5265 
   5266  void loadFromTypedArray(Scalar::Type arrayType, const BaseIndex& src,
   5267                          const ValueOperand& dest, Uint32Mode uint32Mode,
   5268                          Register temp, Label* fail,
   5269                          LiveRegisterSet volatileLiveReg);
   5270 
   5271  void loadFromTypedBigIntArray(Scalar::Type arrayType, const BaseIndex& src,
   5272                                const ValueOperand& dest, Register bigInt,
   5273                                Register64 temp);
   5274 
   5275  template <typename S, typename T>
   5276  void storeToTypedIntArray(Scalar::Type arrayType, const S& value,
   5277                            const T& dest) {
   5278    switch (arrayType) {
   5279      case Scalar::Int8:
   5280      case Scalar::Uint8:
   5281      case Scalar::Uint8Clamped:
   5282        store8(value, dest);
   5283        break;
   5284      case Scalar::Int16:
   5285      case Scalar::Uint16:
   5286        store16(value, dest);
   5287        break;
   5288      case Scalar::Int32:
   5289      case Scalar::Uint32:
   5290        store32(value, dest);
   5291        break;
   5292      default:
   5293        MOZ_CRASH("Invalid typed array type");
   5294    }
   5295  }
   5296 
   5297  template <typename T>
   5298  void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
   5299                              const T& dest, Register temp,
   5300                              LiveRegisterSet volatileLiveRegs);
   5301 
   5302  template <typename S, typename T>
   5303  void storeToTypedBigIntArray(const S& value, const T& dest) {
   5304    store64(value, dest);
   5305  }
   5306 
   5307  void memoryBarrierBefore(Synchronization sync);
   5308  void memoryBarrierAfter(Synchronization sync);
   5309 
   5310  using MacroAssemblerSpecific::convertDoubleToFloat16;
   5311  using MacroAssemblerSpecific::convertFloat32ToFloat16;
   5312  using MacroAssemblerSpecific::convertInt32ToFloat16;
   5313  using MacroAssemblerSpecific::loadFloat16;
   5314 
   5315  void convertDoubleToFloat16(FloatRegister src, FloatRegister dest,
   5316                              Register temp, LiveRegisterSet volatileLiveRegs);
   5317 
   5318  void convertDoubleToFloat16(FloatRegister src, FloatRegister dest,
   5319                              Register temp1, Register temp2);
   5320 
   5321  void convertFloat32ToFloat16(FloatRegister src, FloatRegister dest,
   5322                               Register temp, LiveRegisterSet volatileLiveRegs);
   5323 
   5324  void convertInt32ToFloat16(Register src, FloatRegister dest, Register temp,
   5325                             LiveRegisterSet volatileLiveRegs);
   5326 
   5327  template <typename T>
   5328  void loadFloat16(const T& src, FloatRegister dest, Register temp1,
   5329                   Register temp2, LiveRegisterSet volatileLiveRegs);
   5330 
   5331  template <typename T>
   5332  void storeFloat16(FloatRegister src, const T& dest, Register temp,
   5333                    LiveRegisterSet volatileLiveRegs);
   5334 
   5335  void moveFloat16ToGPR(FloatRegister src, Register dest,
   5336                        LiveRegisterSet volatileLiveRegs);
   5337 
   5338  void moveGPRToFloat16(Register src, FloatRegister dest, Register temp,
   5339                        LiveRegisterSet volatileLiveRegs);
   5340 
   5341  void debugAssertIsObject(const ValueOperand& val);
   5342  void debugAssertObjHasFixedSlots(Register obj, Register scratch);
   5343 
   5344  void debugAssertObjectHasClass(Register obj, Register scratch,
   5345                                 const JSClass* clasp);
   5346 
   5347  void debugAssertGCThingIsTenured(Register ptr, Register temp);
   5348 
   5349  void branchArrayIsNotPacked(Register array, Register temp1, Register temp2,
   5350                              Label* label);
   5351 
   5352  void setIsPackedArray(Register obj, Register output, Register temp);
   5353 
   5354  void packedArrayPop(Register array, ValueOperand output, Register temp1,
   5355                      Register temp2, Label* fail);
   5356  void packedArrayShift(Register array, ValueOperand output, Register temp1,
   5357                        Register temp2, LiveRegisterSet volatileRegs,
   5358                        Label* fail);
   5359 
   5360  void loadArgumentsObjectElement(Register obj, Register index,
   5361                                  ValueOperand output, Register temp,
   5362                                  Label* fail);
   5363  void loadArgumentsObjectElementHole(Register obj, Register index,
   5364                                      ValueOperand output, Register temp,
   5365                                      Label* fail);
   5366  void loadArgumentsObjectElementExists(Register obj, Register index,
   5367                                        Register output, Register temp,
   5368                                        Label* fail);
   5369 
   5370  void loadArgumentsObjectLength(Register obj, Register output, Label* fail);
   5371  void loadArgumentsObjectLength(Register obj, Register output);
   5372 
   5373  void branchTestArgumentsObjectFlags(Register obj, Register temp,
   5374                                      uint32_t flags, Condition cond,
   5375                                      Label* label);
   5376 
   5377  void typedArrayElementSize(Register obj, Register output);
   5378 
   5379 private:
   5380  // Shift |output| by the element shift of the ResizableTypedArray in |obj|.
   5381  void resizableTypedArrayElementShiftBy(Register obj, Register output,
   5382                                         Register scratch);
   5383 
   5384 public:
   5385  void branchIfClassIsNotTypedArray(Register clasp, Label* notTypedArray);
   5386  void branchIfClassIsNotNonResizableTypedArray(Register clasp,
   5387                                                Label* notTypedArray);
   5388  void branchIfClassIsNotResizableTypedArray(Register clasp,
   5389                                             Label* notTypedArray);
   5390 
   5391  void branchIfIsNotArrayBuffer(Register obj, Register temp, Label* label);
   5392  void branchIfIsNotSharedArrayBuffer(Register obj, Register temp,
   5393                                      Label* label);
   5394  void branchIfIsArrayBufferMaybeShared(Register obj, Register temp,
   5395                                        Label* label);
   5396 
   5397 private:
   5398  enum class BranchIfDetached { No, Yes };
   5399 
   5400  void branchIfHasDetachedArrayBuffer(BranchIfDetached branchIf, Register obj,
   5401                                      Register temp, Label* label);
   5402 
   5403 public:
   5404  void branchIfHasDetachedArrayBuffer(Register obj, Register temp,
   5405                                      Label* label) {
   5406    branchIfHasDetachedArrayBuffer(BranchIfDetached::Yes, obj, temp, label);
   5407  }
   5408 
   5409  void branchIfHasAttachedArrayBuffer(Register obj, Register temp,
   5410                                      Label* label) {
   5411    branchIfHasDetachedArrayBuffer(BranchIfDetached::No, obj, temp, label);
   5412  }
   5413 
   5414  void branchIfResizableArrayBufferViewOutOfBounds(Register obj, Register temp,
   5415                                                   Label* label);
   5416 
   5417  void branchIfResizableArrayBufferViewInBounds(Register obj, Register temp,
   5418                                                Label* label);
   5419 
   5420  void branchIfNativeIteratorNotReusable(Register ni, Label* notReusable);
   5421 
   5422  void maybeLoadIteratorFromShape(Register obj, Register dest, Register temp,
   5423                                  Register temp2, Register temp3,
   5424                                  Label* failure, bool exclusive);
   5425 
   5426  void iteratorMore(Register obj, ValueOperand output, Register temp);
   5427  void iteratorClose(Register obj, Register temp1, Register temp2,
   5428                     Register temp3);
   5429  void iteratorLength(Register obj, Register output);
   5430  void iteratorLoadElement(Register obj, Register index, Register output);
   5431  void iteratorLoadElement(Register obj, int32_t index, Register output);
   5432  void registerIterator(Register enumeratorsList, Register iter, Register temp);
   5433 
   5434  void prepareOOBStoreElement(Register object, Register index,
   5435                              Register elements, Register spectreTemp,
   5436                              Label* failure, LiveRegisterSet volatileLiveRegs);
   5437 
   5438  void toHashableNonGCThing(ValueOperand value, ValueOperand result,
   5439                            FloatRegister tempFloat);
   5440 
   5441  void toHashableValue(ValueOperand value, ValueOperand result,
   5442                       FloatRegister tempFloat, Label* atomizeString,
   5443                       Label* tagString);
   5444 
   5445 private:
   5446  void scrambleHashCode(Register result);
   5447 
   5448 public:
   5449  void hashAndScrambleValue(ValueOperand value, Register result, Register temp);
   5450  void prepareHashNonGCThing(ValueOperand value, Register result,
   5451                             Register temp);
   5452  void prepareHashString(Register str, Register result, Register temp);
   5453  void prepareHashSymbol(Register sym, Register result);
   5454  void prepareHashBigInt(Register bigInt, Register result, Register temp1,
   5455                         Register temp2, Register temp3);
   5456  void prepareHashObject(Register setObj, ValueOperand value, Register result,
   5457                         Register temp1, Register temp2, Register temp3,
   5458                         Register temp4);
   5459  void prepareHashValue(Register setObj, ValueOperand value, Register result,
   5460                        Register temp1, Register temp2, Register temp3,
   5461                        Register temp4);
   5462 
   5463  // Helper functions used to implement mozilla::HashTable lookup inline
   5464  // in jitcode.
   5465  void prepareHashMFBT(Register hashCode, bool alreadyScrambled);
   5466  template <typename Table>
   5467  void computeHash1MFBT(Register hashTable, Register hashCode, Register hash1,
   5468                        Register scratch);
   5469  template <typename Table>
   5470  void computeHash2MFBT(Register hashTable, Register hashCode, Register hash2,
   5471                        Register sizeMask, Register scratch);
   5472  void applyDoubleHashMFBT(Register hash1, Register hash2, Register sizeMask);
   5473  template <typename Table>
   5474  void checkForMatchMFBT(Register hashTable, Register hashIndex,
   5475                         Register hashCode, Register scratch, Register scratch2,
   5476                         Label* missing, Label* collision);
   5477 
   5478 public:
   5479  // This generates an inlined version of mozilla::detail::HashTable::lookup
   5480  // (ForNonAdd).
   5481  // Inputs/requirements:
   5482  // - hashTable: A register containing a pointer to a Table. The Table type
   5483  //              must define:
   5484  //              - Table::Entry
   5485  //              - Table::offsetOfHashShift()
   5486  //              - Table::offsetOfTable()
   5487  // - hashCode:  The 32-bit hash of the key to look up. This should already
   5488  //              have been scrambled using prepareHashMFBT.
   5489  // - match:     A lambda to generate code to compare keys. The code that it
   5490  //              generates can assume that `scratch` contains the address of
   5491  //              a Table::Entry with a matching hash value. `scratch2` can be
   5492  //              safely used without clobbering anything. If the keys don't
   5493  //              match, the generated code should fall through. If the keys
   5494  //              match, the generated code is responsible for jumping to the
   5495  //              correct continuation.
   5496  // - missing:   A label to jump to if the key does not exist in the table.
   5497  template <typename Table, typename Match>
   5498  void lookupMFBT(Register hashTable, Register hashCode, Register scratch,
   5499                  Register scratch2, Register scratch3, Register scratch4,
   5500                  Register scratch5, Label* missing, Match match);
   5501 
   5502 private:
   5503  enum class IsBigInt { No, Yes, Maybe };
   5504 
   5505  /**
   5506   * Search for a value in a MapObject or SetObject.
   5507   *
   5508   * When we jump to |found|, |entryTemp| holds the found hashtable entry.
   5509   */
   5510  template <typename TableObject>
   5511  void orderedHashTableLookup(Register setOrMapObj, ValueOperand value,
   5512                              Register hash, Register entryTemp, Register temp1,
   5513                              Register temp3, Register temp4, Register temp5,
   5514                              Label* found, IsBigInt isBigInt);
   5515 
   5516  void setObjectHas(Register setObj, ValueOperand value, Register hash,
   5517                    Register result, Register temp1, Register temp2,
   5518                    Register temp3, Register temp4, IsBigInt isBigInt);
   5519 
   5520  void mapObjectHas(Register mapObj, ValueOperand value, Register hash,
   5521                    Register result, Register temp1, Register temp2,
   5522                    Register temp3, Register temp4, IsBigInt isBigInt);
   5523 
   5524  void mapObjectGet(Register mapObj, ValueOperand value, Register hash,
   5525                    ValueOperand result, Register temp1, Register temp2,
   5526                    Register temp3, Register temp4, Register temp5,
   5527                    IsBigInt isBigInt);
   5528 
   5529 public:
   5530  void setObjectHasNonBigInt(Register setObj, ValueOperand value, Register hash,
   5531                             Register result, Register temp1, Register temp2) {
   5532    return setObjectHas(setObj, value, hash, result, temp1, temp2, InvalidReg,
   5533                        InvalidReg, IsBigInt::No);
   5534  }
   5535  void setObjectHasBigInt(Register setObj, ValueOperand value, Register hash,
   5536                          Register result, Register temp1, Register temp2,
   5537                          Register temp3, Register temp4) {
   5538    return setObjectHas(setObj, value, hash, result, temp1, temp2, temp3, temp4,
   5539                        IsBigInt::Yes);
   5540  }
   5541  void setObjectHasValue(Register setObj, ValueOperand value, Register hash,
   5542                         Register result, Register temp1, Register temp2,
   5543                         Register temp3, Register temp4) {
   5544    return setObjectHas(setObj, value, hash, result, temp1, temp2, temp3, temp4,
   5545                        IsBigInt::Maybe);
   5546  }
   5547 
   5548  void mapObjectHasNonBigInt(Register mapObj, ValueOperand value, Register hash,
   5549                             Register result, Register temp1, Register temp2) {
   5550    return mapObjectHas(mapObj, value, hash, result, temp1, temp2, InvalidReg,
   5551                        InvalidReg, IsBigInt::No);
   5552  }
   5553  void mapObjectHasBigInt(Register mapObj, ValueOperand value, Register hash,
   5554                          Register result, Register temp1, Register temp2,
   5555                          Register temp3, Register temp4) {
   5556    return mapObjectHas(mapObj, value, hash, result, temp1, temp2, temp3, temp4,
   5557                        IsBigInt::Yes);
   5558  }
   5559  void mapObjectHasValue(Register mapObj, ValueOperand value, Register hash,
   5560                         Register result, Register temp1, Register temp2,
   5561                         Register temp3, Register temp4) {
   5562    return mapObjectHas(mapObj, value, hash, result, temp1, temp2, temp3, temp4,
   5563                        IsBigInt::Maybe);
   5564  }
   5565 
   5566  void mapObjectGetNonBigInt(Register mapObj, ValueOperand value, Register hash,
   5567                             ValueOperand result, Register temp1,
   5568                             Register temp2, Register temp3) {
   5569    return mapObjectGet(mapObj, value, hash, result, temp1, temp2, temp3,
   5570                        InvalidReg, InvalidReg, IsBigInt::No);
   5571  }
   5572  void mapObjectGetBigInt(Register mapObj, ValueOperand value, Register hash,
   5573                          ValueOperand result, Register temp1, Register temp2,
   5574                          Register temp3, Register temp4, Register temp5) {
   5575    return mapObjectGet(mapObj, value, hash, result, temp1, temp2, temp3, temp4,
   5576                        temp5, IsBigInt::Yes);
   5577  }
   5578  void mapObjectGetValue(Register mapObj, ValueOperand value, Register hash,
   5579                         ValueOperand result, Register temp1, Register temp2,
   5580                         Register temp3, Register temp4, Register temp5) {
   5581    return mapObjectGet(mapObj, value, hash, result, temp1, temp2, temp3, temp4,
   5582                        temp5, IsBigInt::Maybe);
   5583  }
   5584 
   5585 private:
   5586  template <typename TableObject>
   5587  void loadOrderedHashTableCount(Register setOrMapObj, Register result);
   5588 
   5589 public:
   5590  void loadSetObjectSize(Register setObj, Register result);
   5591  void loadMapObjectSize(Register mapObj, Register result);
   5592 
   5593  // Inline version of js::ClampDoubleToUint8.
   5594  // This function clobbers the input register.
   5595  void clampDoubleToUint8(FloatRegister input, Register output) PER_ARCH;
   5596 
   5597  // If source is a double, load into dest.
   5598  // If source is int32, convert to double and store in dest.
   5599  // Else, branch to failure.
   5600  inline void ensureDouble(const ValueOperand& source, FloatRegister dest,
   5601                           Label* failure);
   5602 
   5603  template <typename S>
   5604  void ensureDouble(const S& source, FloatRegister dest, Label* failure) {
   5605    Label isDouble, done;
   5606    branchTestDouble(Assembler::Equal, source, &isDouble);
   5607    branchTestInt32(Assembler::NotEqual, source, failure);
   5608 
   5609    convertInt32ToDouble(source, dest);
   5610    jump(&done);
   5611 
   5612    bind(&isDouble);
   5613    unboxDouble(source, dest);
   5614 
   5615    bind(&done);
   5616  }
   5617 
   5618  // Inline allocation.
   5619 private:
   5620  void checkAllocatorState(Register temp, gc::AllocKind allocKind, Label* fail);
   5621  bool shouldNurseryAllocate(gc::AllocKind allocKind, gc::Heap initialHeap);
   5622  void nurseryAllocateObject(
   5623      Register result, Register temp, gc::AllocKind allocKind,
   5624      size_t nDynamicSlots, Label* fail,
   5625      const AllocSiteInput& allocSite = AllocSiteInput());
   5626  void bumpPointerAllocate(Register result, Register temp, Label* fail,
   5627                           CompileZone* zone, JS::TraceKind traceKind,
   5628                           uint32_t size,
   5629                           const AllocSiteInput& allocSite = AllocSiteInput());
   5630  void updateAllocSite(Register temp, Register result, CompileZone* zone,
   5631                       Register site);
   5632 
   5633  void freeListAllocate(Register result, Register temp, gc::AllocKind allocKind,
   5634                        Label* fail);
   5635  void allocateObject(Register result, Register temp, gc::AllocKind allocKind,
   5636                      uint32_t nDynamicSlots, gc::Heap initialHeap, Label* fail,
   5637                      const AllocSiteInput& allocSite = AllocSiteInput());
   5638  void nurseryAllocateString(Register result, Register temp,
   5639                             gc::AllocKind allocKind, Label* fail);
   5640  void allocateString(Register result, Register temp, gc::AllocKind allocKind,
   5641                      gc::Heap initialHeap, Label* fail);
   5642  void nurseryAllocateBigInt(Register result, Register temp, Label* fail);
   5643  void copySlotsFromTemplate(Register obj,
   5644                             const TemplateNativeObject& templateObj,
   5645                             uint32_t start, uint32_t end);
   5646  void fillSlotsWithConstantValue(Address addr, Register temp, uint32_t start,
   5647                                  uint32_t end, const Value& v);
   5648  void fillSlotsWithUndefined(Address addr, Register temp, uint32_t start,
   5649                              uint32_t end);
   5650  void fillSlotsWithUninitialized(Address addr, Register temp, uint32_t start,
   5651                                  uint32_t end);
   5652 
   5653  void initGCSlots(Register obj, Register temp,
   5654                   const TemplateNativeObject& templateObj);
   5655 
   5656 public:
   5657  void createGCObject(Register result, Register temp,
   5658                      const TemplateObject& templateObj, gc::Heap initialHeap,
   5659                      Label* fail, bool initContents = true,
   5660                      const AllocSiteInput& allocSite = AllocSiteInput());
   5661 
   5662  void createPlainGCObject(Register result, Register shape, Register temp,
   5663                           Register temp2, uint32_t numFixedSlots,
   5664                           uint32_t numDynamicSlots, gc::AllocKind allocKind,
   5665                           gc::Heap initialHeap, Label* fail,
   5666                           const AllocSiteInput& allocSite,
   5667                           bool initContents = true);
   5668 
   5669  // dynamicSlotsTemp is used to initialize the dynamic slots after allocating
   5670  // the object. If numUsedDynamicSlots == 0, it may be InvalidReg.
   5671  void createArrayWithFixedElements(
   5672      Register result, Register shape, Register temp, Register dynamicSlotsTemp,
   5673      uint32_t arrayLength, uint32_t arrayCapacity,
   5674      uint32_t numUsedDynamicSlots, uint32_t numDynamicSlots,
   5675      gc::AllocKind allocKind, gc::Heap initialHeap, Label* fail,
   5676      const AllocSiteInput& allocSite = AllocSiteInput());
   5677 
   5678  void createFunctionClone(Register result, Register canonical,
   5679                           Register envChain, Register temp,
   5680                           gc::AllocKind allocKind, Label* fail,
   5681                           const AllocSiteInput& allocSite);
   5682 
   5683  void initGCThing(Register obj, Register temp,
   5684                   const TemplateObject& templateObj, bool initContents = true);
   5685 
   5686  void initTypedArraySlots(Register obj, Register length, Register temp1,
   5687                           Register temp2, Label* fail,
   5688                           const FixedLengthTypedArrayObject* templateObj);
   5689 
   5690  void initTypedArraySlotsInline(
   5691      Register obj, Register temp,
   5692      const FixedLengthTypedArrayObject* templateObj);
   5693 
   5694  void newGCString(Register result, Register temp, gc::Heap initialHeap,
   5695                   Label* fail);
   5696  void newGCFatInlineString(Register result, Register temp,
   5697                            gc::Heap initialHeap, Label* fail);
   5698 
   5699  void newGCBigInt(Register result, Register temp, gc::Heap initialHeap,
   5700                   Label* fail);
   5701 
   5702  void preserveWrapper(Register wrapper, Register temp1, Register temp2,
   5703                       const LiveRegisterSet& liveRegs);
   5704 
   5705 private:
   5706  void branchIfNotStringCharsEquals(Register stringChars,
   5707                                    const JSOffThreadAtom* str, Label* label);
   5708 
   5709 public:
   5710  // Returns true if |str| is a (non-empty) string which can be compared
   5711  // using |compareStringChars|.
   5712  static bool canCompareStringCharsInline(const JSOffThreadAtom* str);
   5713 
   5714  // Load the string characters in preparation for |compareStringChars|.
   5715  void loadStringCharsForCompare(Register input, const JSOffThreadAtom* str,
   5716                                 Register stringChars, Label* fail);
   5717 
   5718  // Compare string characters based on the equality operator. The string
   5719  // characters must be at least as long as the length of |str|.
   5720  void compareStringChars(JSOp op, Register stringChars,
   5721                          const JSOffThreadAtom* str, Register result);
   5722 
   5723  // Compares two strings for equality based on the JSOP.
   5724  // This checks for identical pointers, atoms and length and fails for
   5725  // everything else.
   5726  void compareStrings(JSOp op, Register left, Register right, Register result,
   5727                      Label* fail);
   5728 
   5729  // Result of the typeof operation. Falls back to slow-path for proxies.
   5730  void typeOfObject(Register objReg, Register scratch, Label* slow,
   5731                    Label* isObject, Label* isCallable, Label* isUndefined);
   5732 
   5733  // Implementation of IsCallable. Doesn't handle proxies.
   5734  void isCallable(Register obj, Register output, Label* isProxy) {
   5735    isCallableOrConstructor(true, obj, output, isProxy);
   5736  }
   5737  void isConstructor(Register obj, Register output, Label* isProxy) {
   5738    isCallableOrConstructor(false, obj, output, isProxy);
   5739  }
   5740 
   5741  void setIsCrossRealmArrayConstructor(Register obj, Register output);
   5742 
   5743  void setIsDefinitelyTypedArrayConstructor(Register obj, Register output);
   5744 
   5745  void loadMegamorphicCache(Register dest);
   5746  void tryFastAtomize(Register str, Register scratch, Register output,
   5747                      Label* fail);
   5748  void loadMegamorphicSetPropCache(Register dest);
   5749 
   5750  void loadAtomOrSymbolAndHash(ValueOperand value, Register outId,
   5751                               Register outHash, Label* cacheMiss);
   5752 
   5753  void loadAtomHash(Register id, Register hash, Label* done);
   5754 
   5755  void emitExtractValueFromMegamorphicCacheEntry(
   5756      Register obj, Register entry, Register scratch1, Register scratch2,
   5757      ValueOperand output, Label* cacheHit, Label* cacheMiss,
   5758      Label* cacheHitGetter);
   5759 
   5760  template <typename IdOperandType>
   5761  void emitMegamorphicCacheLookupByValueCommon(
   5762      IdOperandType id, Register obj, Register scratch1, Register scratch2,
   5763      Register outEntryPtr, Label* cacheMiss, Label* cacheMissWithEntry);
   5764 
   5765  void emitMegamorphicCacheLookup(PropertyKey id, Register obj,
   5766                                  Register scratch1, Register scratch2,
   5767                                  Register outEntryPtr, ValueOperand output,
   5768                                  Label* cacheHit,
   5769                                  Label* cacheHitGetter = nullptr);
   5770 
   5771  // NOTE: |id| must either be a ValueOperand or a Register. If it is a
   5772  // Register, we assume that it is an atom.
   5773  template <typename IdOperandType>
   5774  void emitMegamorphicCacheLookupByValue(IdOperandType id, Register obj,
   5775                                         Register scratch1, Register scratch2,
   5776                                         Register outEntryPtr,
   5777                                         ValueOperand output, Label* cacheHit,
   5778                                         Label* cacheHitGetter = nullptr);
   5779 
   5780  void emitMegamorphicCacheLookupExists(ValueOperand id, Register obj,
   5781                                        Register scratch1, Register scratch2,
   5782                                        Register outEntryPtr, Register output,
   5783                                        Label* cacheHit, bool hasOwn);
   5784 
   5785  // Given a PropertyIteratorObject with valid indices, extract the current
   5786  // PropertyIndex, storing the index in |outIndex| and the kind in |outKind|
   5787  void extractCurrentIndexAndKindFromIterator(Register iterator,
   5788                                              Register outIndex,
   5789                                              Register outKind);
   5790  void extractIndexAndKindFromIteratorByIterIndex(Register iterator,
   5791                                                  Register inOutIndex,
   5792                                                  Register outKind,
   5793                                                  Register scratch);
   5794 
   5795  template <typename IdType>
   5796 #ifdef JS_CODEGEN_X86
   5797  // See MegamorphicSetElement in LIROps.yaml
   5798  void emitMegamorphicCachedSetSlot(
   5799      IdType id, Register obj, Register scratch1, ValueOperand value,
   5800      const LiveRegisterSet& liveRegs, Label* cacheHit,
   5801      void (*emitPreBarrier)(MacroAssembler&, const Address&, MIRType));
   5802 #else
   5803  void emitMegamorphicCachedSetSlot(
   5804      IdType id, Register obj, Register scratch1, Register scratch2,
   5805      Register scratch3, ValueOperand value, const LiveRegisterSet& liveRegs,
   5806      Label* cacheHit,
   5807      void (*emitPreBarrier)(MacroAssembler&, const Address&, MIRType));
   5808 #endif
   5809 
   5810  void loadDOMExpandoValueGuardGeneration(
   5811      Register obj, ValueOperand output,
   5812      JS::ExpandoAndGeneration* expandoAndGeneration, uint64_t generation,
   5813      Label* fail);
   5814 
   5815  void guardNonNegativeIntPtrToInt32(Register reg, Label* fail);
   5816 
   5817  void loadArrayBufferByteLengthIntPtr(Register obj, Register output);
   5818  void loadArrayBufferViewByteOffsetIntPtr(Register obj, Register output);
   5819  void loadArrayBufferViewLengthIntPtr(Register obj, Register output);
   5820 
   5821  void loadGrowableSharedArrayBufferByteLengthIntPtr(Synchronization sync,
   5822                                                     Register obj,
   5823                                                     Register output);
   5824 
   5825 private:
   5826  enum class ResizableArrayBufferView { TypedArray, DataView };
   5827 
   5828  void loadResizableArrayBufferViewLengthIntPtr(ResizableArrayBufferView view,
   5829                                                Synchronization sync,
   5830                                                Register obj, Register output,
   5831                                                Register scratch);
   5832 
   5833 public:
   5834  void loadResizableTypedArrayLengthIntPtr(Synchronization sync, Register obj,
   5835                                           Register output, Register scratch) {
   5836    loadResizableArrayBufferViewLengthIntPtr(
   5837        ResizableArrayBufferView::TypedArray, sync, obj, output, scratch);
   5838  }
   5839 
   5840  void loadResizableDataViewByteLengthIntPtr(Synchronization sync, Register obj,
   5841                                             Register output,
   5842                                             Register scratch) {
   5843    loadResizableArrayBufferViewLengthIntPtr(ResizableArrayBufferView::DataView,
   5844                                             sync, obj, output, scratch);
   5845  }
   5846 
   5847  void dateFillLocalTimeSlots(Register obj, Register scratch,
   5848                              const LiveRegisterSet& volatileRegs);
   5849 
   5850 private:
   5851  void udiv32ByConstant(Register src, uint32_t divisor, Register dest);
   5852 
   5853  void umod32ByConstant(Register src, uint32_t divisor, Register dest,
   5854                        Register scratch);
   5855 
   5856  template <typename GetTimeFn>
   5857  void dateTimeFromSecondsIntoYear(ValueOperand secondsIntoYear,
   5858                                   ValueOperand output, Register scratch1,
   5859                                   Register scratch2, GetTimeFn getTimeFn);
   5860 
   5861 public:
   5862  void dateHoursFromSecondsIntoYear(ValueOperand secondsIntoYear,
   5863                                    ValueOperand output, Register scratch1,
   5864                                    Register scratch2);
   5865 
   5866  void dateMinutesFromSecondsIntoYear(ValueOperand secondsIntoYear,
   5867                                      ValueOperand output, Register scratch1,
   5868                                      Register scratch2);
   5869 
   5870  void dateSecondsFromSecondsIntoYear(ValueOperand secondsIntoYear,
   5871                                      ValueOperand output, Register scratch1,
   5872                                      Register scratch2);
   5873 
   5874  void computeImplicitThis(Register env, ValueOperand output, Label* slowPath);
   5875 
   5876 private:
   5877  void isCallableOrConstructor(bool isCallable, Register obj, Register output,
   5878                               Label* isProxy);
   5879 
   5880 public:
   5881  // Generates code used to complete a bailout.
   5882  void generateBailoutTail(Register scratch, Register bailoutInfo);
   5883 
   5884 public:
   5885 #ifndef JS_CODEGEN_ARM64
   5886  // StackPointer manipulation functions.
   5887  // On ARM64, the StackPointer is implemented as two synchronized registers.
   5888  // Code shared across platforms must use these functions to be valid.
   5889  template <typename T>
   5890  inline void addToStackPtr(T t);
   5891  template <typename T>
   5892  inline void addStackPtrTo(T t);
   5893 
   5894  void subFromStackPtr(Imm32 imm32)
   5895      DEFINED_ON(mips64, loong64, riscv64, wasm32, arm, x86, x64);
   5896  void subFromStackPtr(Register reg);
   5897 
   5898  template <typename T>
   5899  void subStackPtrFrom(T t) {
   5900    subPtr(getStackPointer(), t);
   5901  }
   5902 
   5903  template <typename T>
   5904  void andToStackPtr(T t) {
   5905    andPtr(t, getStackPointer());
   5906  }
   5907 
   5908  template <typename T>
   5909  void moveToStackPtr(T t) {
   5910    movePtr(t, getStackPointer());
   5911  }
   5912  template <typename T>
   5913  void moveStackPtrTo(T t) {
   5914    movePtr(getStackPointer(), t);
   5915  }
   5916 
   5917  template <typename T>
   5918  void loadStackPtr(T t) {
   5919    loadPtr(t, getStackPointer());
   5920  }
   5921  template <typename T>
   5922  void storeStackPtr(T t) {
   5923    storePtr(getStackPointer(), t);
   5924  }
   5925 
   5926  template <typename T>
   5927  void loadStackPtrFromPrivateValue(T t) {
   5928    loadStackPtr(t);
   5929  }
   5930  template <typename T>
   5931  void storeStackPtrToPrivateValue(T t) {
   5932    storeStackPtr(t);
   5933  }
   5934 
   5935  // StackPointer testing functions.
   5936  // On ARM64, sp can function as the zero register depending on context.
   5937  // Code shared across platforms must use these functions to be valid.
   5938  template <typename T>
   5939  inline void branchTestStackPtr(Condition cond, T t, Label* label);
   5940  template <typename T>
   5941  inline void branchStackPtr(Condition cond, T rhs, Label* label);
   5942  template <typename T>
   5943  inline void branchStackPtrRhs(Condition cond, T lhs, Label* label);
   5944 
   5945  // Move the stack pointer based on the requested amount.
   5946  inline void reserveStack(uint32_t amount);
   5947 #else  // !JS_CODEGEN_ARM64
   5948  void reserveStack(uint32_t amount);
   5949 #endif
   5950 
   5951 public:
   5952  void enableProfilingInstrumentation() {
   5953    emitProfilingInstrumentation_ = true;
   5954  }
   5955 
   5956 private:
   5957  // This class is used to surround call sites throughout the assembler. This
   5958  // is used by callWithABI, and callJit functions, except if suffixed by
   5959  // NoProfiler.
   5960  class MOZ_RAII AutoProfilerCallInstrumentation {
   5961   public:
   5962    explicit AutoProfilerCallInstrumentation(MacroAssembler& masm);
   5963    ~AutoProfilerCallInstrumentation() = default;
   5964  };
   5965  friend class AutoProfilerCallInstrumentation;
   5966 
   5967  void appendProfilerCallSite(CodeOffset label) {
   5968    propagateOOM(profilerCallSites_.append(label));
   5969  }
   5970 
   5971  // Fix up the code pointers to be written for locations where profilerCallSite
   5972  // emitted moves of RIP to a register.
   5973  void linkProfilerCallSites(JitCode* code);
   5974 
   5975  // This field is used to manage profiling instrumentation output. If
   5976  // provided and enabled, then instrumentation will be emitted around call
   5977  // sites.
   5978  bool emitProfilingInstrumentation_;
   5979 
   5980  // Record locations of the call sites.
   5981  Vector<CodeOffset, 0, SystemAllocPolicy> profilerCallSites_;
   5982 
   5983 public:
   5984  void loadJitCodeRaw(Register func, Register dest);
   5985  void loadJitCodeRawNoIon(Register func, Register dest, Register scratch);
   5986 
   5987  void loadBaselineFramePtr(Register framePtr, Register dest);
   5988 
   5989  void pushBaselineFramePtr(Register framePtr, Register scratch) {
   5990    loadBaselineFramePtr(framePtr, scratch);
   5991    push(scratch);
   5992  }
   5993 
   5994  void PushBaselineFramePtr(Register framePtr, Register scratch) {
   5995    loadBaselineFramePtr(framePtr, scratch);
   5996    Push(scratch);
   5997  }
   5998 
   5999  using MacroAssemblerSpecific::movePtr;
   6000 
   6001  void movePtr(TrampolinePtr ptr, Register dest) {
   6002    movePtr(ImmPtr(ptr.value), dest);
   6003  }
   6004 
   6005 private:
   6006  void handleFailure();
   6007 
   6008 public:
   6009  Label* exceptionLabel() {
   6010    // Exceptions are currently handled the same way as sequential failures.
   6011    return &failureLabel_;
   6012  }
   6013 
   6014  Label* failureLabel() { return &failureLabel_; }
   6015 
   6016  void finish();
   6017  void link(JitCode* code);
   6018 
   6019  void assumeUnreachable(const char* output);
   6020 
   6021  void printf(const char* output);
   6022  void printf(const char* output, Register value);
   6023 
   6024  void outOfLineTruncateSlow(FloatRegister src, Register dest,
   6025                             bool widenFloatToDouble, bool compilingWasm,
   6026                             wasm::BytecodeOffset callOffset);
   6027 
   6028  void convertInt32ValueToDouble(ValueOperand val);
   6029 
   6030 private:
   6031  enum class FloatingPointType { Double, Float32, Float16 };
   6032 
   6033  void convertValueToFloatingPoint(ValueOperand value, FloatRegister output,
   6034                                   Register maybeTemp,
   6035                                   LiveRegisterSet volatileLiveRegs,
   6036                                   Label* fail, FloatingPointType outputType);
   6037 
   6038 public:
   6039  void convertValueToDouble(ValueOperand value, FloatRegister output,
   6040                            Label* fail) {
   6041    convertValueToFloatingPoint(value, output, InvalidReg, LiveRegisterSet{},
   6042                                fail, FloatingPointType::Double);
   6043  }
   6044 
   6045  void convertValueToFloat32(ValueOperand value, FloatRegister output,
   6046                             Label* fail) {
   6047    convertValueToFloatingPoint(value, output, InvalidReg, LiveRegisterSet{},
   6048                                fail, FloatingPointType::Float32);
   6049  }
   6050 
   6051  void convertValueToFloat16(ValueOperand value, FloatRegister output,
   6052                             Register maybeTemp,
   6053                             LiveRegisterSet volatileLiveRegs, Label* fail) {
   6054    convertValueToFloatingPoint(value, output, maybeTemp, volatileLiveRegs,
   6055                                fail, FloatingPointType::Float16);
   6056  }
   6057 
   6058  //
   6059  // Functions for converting values to int.
   6060  //
   6061 
   6062  // This carries over the MToNumberInt32 operation on the ValueOperand
   6063  // input; see comment at the top of this class.
   6064  void convertValueToInt32(ValueOperand value, FloatRegister temp,
   6065                           Register output, Label* fail, bool negativeZeroCheck,
   6066                           IntConversionInputKind conversion);
   6067 
   6068  // This carries over the MTruncateToInt32 operation on the ValueOperand
   6069  // input; see the comment at the top of this class.
   6070  //
   6071  // Strings may be handled by providing labels to jump to. The subroutine,
   6072  // usually an OOL call, is passed the unboxed string in |stringReg| and should
   6073  // convert it to a double store into |temp|.
   6074  void truncateValueToInt32(ValueOperand value, Label* handleStringEntry,
   6075                            Label* handleStringRejoin,
   6076                            Label* truncateDoubleSlow, Register stringReg,
   6077                            FloatRegister temp, Register output, Label* fail);
   6078 
   6079  void truncateValueToInt32(ValueOperand value, FloatRegister temp,
   6080                            Register output, Label* fail) {
   6081    truncateValueToInt32(value, nullptr, nullptr, nullptr, InvalidReg, temp,
   6082                         output, fail);
   6083  }
   6084 
   6085  // Convenience functions for clamping values to uint8.
   6086  //
   6087  // Strings are handled by providing labels to jump to. The subroutine, usually
   6088  // an OOL call, is passed the unboxed string in |stringReg| and should convert
   6089  // it to a double store into |temp|.
   6090  void clampValueToUint8(ValueOperand value, Label* handleStringEntry,
   6091                         Label* handleStringRejoin, Register stringReg,
   6092                         FloatRegister temp, Register output, Label* fail);
   6093 
   6094  [[nodiscard]] bool icBuildOOLFakeExitFrame(void* fakeReturnAddr,
   6095                                             AutoSaveLiveRegisters& save);
   6096 
   6097  // Align the stack pointer based on the number of arguments which are pushed
   6098  // on the stack, such that the JitFrameLayout would be correctly aligned on
   6099  // the JitStackAlignment.
   6100  void alignJitStackBasedOnNArgs(Register nargs, bool countIncludesThis);
   6101  void alignJitStackBasedOnNArgs(uint32_t argc, bool countIncludesThis);
   6102 
   6103  inline void assertStackAlignment(uint32_t alignment, int32_t offset = 0);
   6104 
   6105  void touchFrameValues(Register numStackValues, Register scratch1,
   6106                        Register scratch2);
   6107 
   6108 #ifdef JS_64BIT
   6109  // See comment block "64-bit GPRs carrying 32-bit values" above.  This asserts
   6110  // that the high bits of the register are appropriate for the architecture and
   6111  // the value in the low bits.
   6112  void debugAssertCanonicalInt32(Register r);
   6113 #endif
   6114 
   6115 #ifdef FUZZING_JS_FUZZILLI
   6116  void fuzzilliHashDouble(FloatRegister src, Register result, Register temp);
   6117 
   6118  void fuzzilliStoreHash(Register value, Register temp1, Register temp2);
   6119 #endif
   6120 };
   6121 
   6122 // StackMacroAssembler checks no GC will happen while it's on the stack.
   6123 class MOZ_RAII StackMacroAssembler : public MacroAssembler {
   6124  JS::AutoCheckCannotGC nogc;
   6125 
   6126 public:
   6127  StackMacroAssembler(JSContext* cx, TempAllocator& alloc);
   6128 };
   6129 
   6130 // WasmMacroAssembler does not contain GC pointers, so it doesn't need the no-GC
   6131 // checking StackMacroAssembler has.
   6132 class MOZ_RAII WasmMacroAssembler : public MacroAssembler {
   6133 public:
   6134  explicit WasmMacroAssembler(TempAllocator& alloc, bool limitedSize = true);
   6135  ~WasmMacroAssembler() { assertNoGCThings(); }
   6136 };
   6137 
   6138 // Heap-allocated MacroAssembler used for off-thread code generation.
   6139 // GC cancels off-thread compilations.
   6140 class OffThreadMacroAssembler : public MacroAssembler {
   6141 public:
   6142  OffThreadMacroAssembler(TempAllocator& alloc, CompileRealm* realm);
   6143 };
   6144 
   6145 //{{{ check_macroassembler_style
   6146 inline uint32_t MacroAssembler::framePushed() const { return framePushed_; }
   6147 
   6148 inline void MacroAssembler::setFramePushed(uint32_t framePushed) {
   6149  framePushed_ = framePushed;
   6150 }
   6151 
   6152 inline void MacroAssembler::adjustFrame(int32_t value) {
   6153  MOZ_ASSERT_IF(value < 0, framePushed_ >= uint32_t(-value));
   6154  setFramePushed(framePushed_ + value);
   6155 }
   6156 
   6157 inline void MacroAssembler::implicitPop(uint32_t bytes) {
   6158  MOZ_ASSERT(bytes % sizeof(intptr_t) == 0);
   6159  MOZ_ASSERT(bytes <= INT32_MAX);
   6160  adjustFrame(-int32_t(bytes));
   6161 }
   6162 //}}} check_macroassembler_style
   6163 
   6164 static inline Assembler::DoubleCondition JSOpToDoubleCondition(JSOp op) {
   6165  switch (op) {
   6166    case JSOp::Eq:
   6167    case JSOp::StrictEq:
   6168      return Assembler::DoubleEqual;
   6169    case JSOp::Ne:
   6170    case JSOp::StrictNe:
   6171      return Assembler::DoubleNotEqualOrUnordered;
   6172    case JSOp::Lt:
   6173      return Assembler::DoubleLessThan;
   6174    case JSOp::Le:
   6175      return Assembler::DoubleLessThanOrEqual;
   6176    case JSOp::Gt:
   6177      return Assembler::DoubleGreaterThan;
   6178    case JSOp::Ge:
   6179      return Assembler::DoubleGreaterThanOrEqual;
   6180    default:
   6181      MOZ_CRASH("Unexpected comparison operation");
   6182  }
   6183 }
   6184 
   6185 // Note: the op may have been inverted during lowering (to put constants in a
   6186 // position where they can be immediates), so it is important to use the
   6187 // lir->jsop() instead of the mir->jsop() when it is present.
   6188 static inline Assembler::Condition JSOpToCondition(JSOp op, bool isSigned) {
   6189  if (isSigned) {
   6190    switch (op) {
   6191      case JSOp::Eq:
   6192      case JSOp::StrictEq:
   6193        return Assembler::Equal;
   6194      case JSOp::Ne:
   6195      case JSOp::StrictNe:
   6196        return Assembler::NotEqual;
   6197      case JSOp::Lt:
   6198        return Assembler::LessThan;
   6199      case JSOp::Le:
   6200        return Assembler::LessThanOrEqual;
   6201      case JSOp::Gt:
   6202        return Assembler::GreaterThan;
   6203      case JSOp::Ge:
   6204        return Assembler::GreaterThanOrEqual;
   6205      default:
   6206        MOZ_CRASH("Unrecognized comparison operation");
   6207    }
   6208  } else {
   6209    switch (op) {
   6210      case JSOp::Eq:
   6211      case JSOp::StrictEq:
   6212        return Assembler::Equal;
   6213      case JSOp::Ne:
   6214      case JSOp::StrictNe:
   6215        return Assembler::NotEqual;
   6216      case JSOp::Lt:
   6217        return Assembler::Below;
   6218      case JSOp::Le:
   6219        return Assembler::BelowOrEqual;
   6220      case JSOp::Gt:
   6221        return Assembler::Above;
   6222      case JSOp::Ge:
   6223        return Assembler::AboveOrEqual;
   6224      default:
   6225        MOZ_CRASH("Unrecognized comparison operation");
   6226    }
   6227  }
   6228 }
   6229 
   6230 static inline size_t StackDecrementForCall(uint32_t alignment,
   6231                                           size_t bytesAlreadyPushed,
   6232                                           size_t bytesToPush) {
   6233  return bytesToPush +
   6234         ComputeByteAlignment(bytesAlreadyPushed + bytesToPush, alignment);
   6235 }
   6236 
   6237 // Helper for generatePreBarrier.
   6238 inline DynFn JitPreWriteBarrier(MIRType type);
   6239 }  // namespace jit
   6240 
   6241 }  // namespace js
   6242 
   6243 #endif /* jit_MacroAssembler_h */