tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Assembler-shared.h (28245B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef jit_shared_Assembler_shared_h
      8 #define jit_shared_Assembler_shared_h
      9 
     10 #if JS_BITS_PER_WORD == 32
     11 #  include "mozilla/CheckedInt.h"
     12 #endif
     13 #include "mozilla/DebugOnly.h"
     14 
     15 #include <limits.h>
     16 #include <utility>  // std::pair
     17 
     18 #include "gc/Barrier.h"
     19 #include "jit/AtomicOp.h"
     20 #include "jit/JitAllocPolicy.h"
     21 #include "jit/JitCode.h"
     22 #include "jit/JitContext.h"
     23 #include "jit/Label.h"
     24 #include "jit/Registers.h"
     25 #include "jit/RegisterSets.h"
     26 #include "js/ScalarType.h"  // js::Scalar::Type
     27 #include "vm/HelperThreads.h"
     28 #include "wasm/WasmCodegenTypes.h"
     29 #include "wasm/WasmConstants.h"
     30 
     31 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) ||      \
     32    defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64) || \
     33    defined(JS_CODEGEN_WASM32) || defined(JS_CODEGEN_RISCV64)
     34 // Push return addresses callee-side.
     35 #  define JS_USE_LINK_REGISTER
     36 #endif
     37 
     38 #if defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_ARM64) ||    \
     39    defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64) || \
     40    defined(JS_CODEGEN_ARM)
     41 // JS_CODELABEL_LINKMODE gives labels additional metadata
     42 // describing how Bind() should patch them.
     43 #  define JS_CODELABEL_LINKMODE
     44 #endif
     45 
     46 using js::wasm::FaultingCodeOffset;
     47 
     48 namespace js {
     49 namespace jit {
     50 
     51 enum class FrameType;
     52 enum class ExceptionResumeKind : int32_t;
     53 
     54 namespace Disassembler {
     55 class HeapAccess;
     56 }  // namespace Disassembler
     57 
     58 static constexpr uint32_t Simd128DataSize = 4 * sizeof(int32_t);
     59 static_assert(Simd128DataSize == 4 * sizeof(int32_t),
     60              "SIMD data should be able to contain int32x4");
     61 static_assert(Simd128DataSize == 4 * sizeof(float),
     62              "SIMD data should be able to contain float32x4");
     63 static_assert(Simd128DataSize == 2 * sizeof(double),
     64              "SIMD data should be able to contain float64x2");
     65 
     66 enum Scale {
     67  TimesOne = 0,
     68  TimesTwo = 1,
     69  TimesFour = 2,
     70  TimesEight = 3,
     71  Invalid = -1
     72 };
     73 
     74 static_assert(sizeof(JS::Value) == 8,
     75              "required for TimesEight and 3 below to be correct");
     76 static const Scale ValueScale = TimesEight;
     77 static const size_t ValueShift = 3;
     78 
     79 static inline unsigned ScaleToShift(Scale scale) { return unsigned(scale); }
     80 
     81 static inline bool IsShiftInScaleRange(int i) {
     82  return i >= TimesOne && i <= TimesEight;
     83 }
     84 
     85 static inline Scale ShiftToScale(int i) {
     86  MOZ_ASSERT(IsShiftInScaleRange(i));
     87  return Scale(i);
     88 }
     89 
     90 static inline Scale ScaleFromElemWidth(int shift) {
     91  switch (shift) {
     92    case 1:
     93      return TimesOne;
     94    case 2:
     95      return TimesTwo;
     96    case 4:
     97      return TimesFour;
     98    case 8:
     99      return TimesEight;
    100  }
    101 
    102  MOZ_CRASH("Invalid scale");
    103 }
    104 
    105 static inline Scale ScaleFromScalarType(Scalar::Type type) {
    106  return ScaleFromElemWidth(Scalar::byteSize(type));
    107 }
    108 
    109 #ifdef JS_JITSPEW
    110 static inline const char* StringFromScale(Scale scale) {
    111  switch (scale) {
    112    case TimesOne:
    113      return "TimesOne";
    114    case TimesTwo:
    115      return "TimesTwo";
    116    case TimesFour:
    117      return "TimesFour";
    118    case TimesEight:
    119      return "TimesEight";
    120    default:
    121      break;
    122  }
    123  MOZ_CRASH("Unknown Scale");
    124 }
    125 #endif
    126 
    127 // Used for 32-bit immediates which do not require relocation.
    128 struct Imm32 {
    129  int32_t value;
    130 
    131  explicit Imm32(int32_t value) : value(value) {}
    132  explicit Imm32(FrameType type) : Imm32(int32_t(type)) {}
    133  explicit Imm32(ExceptionResumeKind kind) : Imm32(int32_t(kind)) {}
    134 
    135  static inline Imm32 ShiftOf(enum Scale s) {
    136    switch (s) {
    137      case TimesOne:
    138        return Imm32(0);
    139      case TimesTwo:
    140        return Imm32(1);
    141      case TimesFour:
    142        return Imm32(2);
    143      case TimesEight:
    144        return Imm32(3);
    145      default:
    146        MOZ_CRASH("Invalid scale");
    147    };
    148  }
    149 
    150  static inline Imm32 FactorOf(enum Scale s) {
    151    return Imm32(1 << ShiftOf(s).value);
    152  }
    153 };
    154 
    155 // Pointer-sized integer to be embedded as an immediate in an instruction.
    156 struct ImmWord {
    157  uintptr_t value;
    158 
    159  explicit ImmWord(uintptr_t value) : value(value) {}
    160 };
    161 
    162 // Used for 64-bit immediates which do not require relocation.
    163 struct Imm64 {
    164  uint64_t value;
    165 
    166  explicit Imm64(int64_t value) : value(value) {}
    167 
    168  Imm32 low() const { return Imm32(int32_t(value)); }
    169 
    170  Imm32 hi() const { return Imm32(int32_t(value >> 32)); }
    171 };
    172 
    173 #ifdef DEBUG
    174 static inline bool IsCompilingWasm() {
    175  return GetJitContext()->isCompilingWasm();
    176 }
    177 #endif
    178 
    179 // Pointer to be embedded as an immediate in an instruction.
    180 struct ImmPtr {
    181  void* value;
    182 
    183  struct NoCheckToken {};
    184 
    185  explicit constexpr ImmPtr(std::nullptr_t) : value(nullptr) {
    186    // Explicit constructor for nullptr. This ensures ImmPtr(0) can't be called.
    187    // Either use ImmPtr(nullptr) or ImmWord(0).
    188  }
    189 
    190  explicit ImmPtr(void* value, NoCheckToken) : value(value) {
    191    // A special unchecked variant for contexts where we know it is safe to
    192    // use an immptr. This is assuming the caller knows what they're doing.
    193  }
    194 
    195  explicit ImmPtr(const void* value) : value(const_cast<void*>(value)) {
    196    // To make code serialization-safe, wasm compilation should only
    197    // compile pointer immediates using a SymbolicAddress.
    198    MOZ_ASSERT(!IsCompilingWasm());
    199  }
    200 
    201  template <class R>
    202  explicit ImmPtr(R (*pf)()) : value(JS_FUNC_TO_DATA_PTR(void*, pf)) {
    203    MOZ_ASSERT(!IsCompilingWasm());
    204  }
    205 
    206  template <class R, class A1>
    207  explicit ImmPtr(R (*pf)(A1)) : value(JS_FUNC_TO_DATA_PTR(void*, pf)) {
    208    MOZ_ASSERT(!IsCompilingWasm());
    209  }
    210 
    211  template <class R, class A1, class A2>
    212  explicit ImmPtr(R (*pf)(A1, A2)) : value(JS_FUNC_TO_DATA_PTR(void*, pf)) {
    213    MOZ_ASSERT(!IsCompilingWasm());
    214  }
    215 
    216  template <class R, class A1, class A2, class A3>
    217  explicit ImmPtr(R (*pf)(A1, A2, A3)) : value(JS_FUNC_TO_DATA_PTR(void*, pf)) {
    218    MOZ_ASSERT(!IsCompilingWasm());
    219  }
    220 
    221  template <class R, class A1, class A2, class A3, class A4>
    222  explicit ImmPtr(R (*pf)(A1, A2, A3, A4))
    223      : value(JS_FUNC_TO_DATA_PTR(void*, pf)) {
    224    MOZ_ASSERT(!IsCompilingWasm());
    225  }
    226 };
    227 
    228 // The same as ImmPtr except that the intention is to patch this
    229 // instruction. The initial value of the immediate is 'addr' and this value is
    230 // either clobbered or used in the patching process.
    231 struct PatchedImmPtr {
    232  void* value;
    233 
    234  explicit PatchedImmPtr() : value(nullptr) {}
    235  explicit PatchedImmPtr(const void* value) : value(const_cast<void*>(value)) {}
    236 };
    237 
    238 class AssemblerShared;
    239 class ImmGCPtr;
    240 
    241 // Used for immediates which require relocation.
    242 class ImmGCPtr {
    243 public:
    244  const gc::Cell* value;
    245 
    246  explicit ImmGCPtr(const gc::Cell* ptr) : value(ptr) {
    247    // Nursery pointers can't be used if the main thread might be currently
    248    // performing a minor GC.
    249    MOZ_ASSERT_IF(ptr && !ptr->isTenured(),
    250                  !CurrentThreadIsOffThreadCompiling());
    251 
    252    // wasm shouldn't be creating GC things
    253    MOZ_ASSERT(!IsCompilingWasm());
    254  }
    255  explicit ImmGCPtr(const JSOffThreadAtom* atom) : ImmGCPtr(atom->raw()) {}
    256 
    257 private:
    258  ImmGCPtr() : value(0) {}
    259 };
    260 
    261 // Pointer to trampoline code. Trampoline code is kept alive until the runtime
    262 // is destroyed, so does not need to be traced.
    263 struct TrampolinePtr {
    264  uint8_t* value;
    265 
    266  TrampolinePtr() : value(nullptr) {}
    267  explicit TrampolinePtr(uint8_t* value) : value(value) { MOZ_ASSERT(value); }
    268 };
    269 
    270 // Pointer to be embedded as an immediate that is loaded/stored from by an
    271 // instruction.
    272 struct AbsoluteAddress {
    273  void* addr;
    274 
    275  explicit AbsoluteAddress(const void* addr) : addr(const_cast<void*>(addr)) {
    276    MOZ_ASSERT(!IsCompilingWasm());
    277  }
    278 
    279  AbsoluteAddress offset(ptrdiff_t delta) {
    280    return AbsoluteAddress(((uint8_t*)addr) + delta);
    281  }
    282 };
    283 
    284 // The same as AbsoluteAddress except that the intention is to patch this
    285 // instruction. The initial value of the immediate is 'addr' and this value is
    286 // either clobbered or used in the patching process.
    287 struct PatchedAbsoluteAddress {
    288  void* addr;
    289 
    290  explicit PatchedAbsoluteAddress() : addr(nullptr) {}
    291  explicit PatchedAbsoluteAddress(const void* addr)
    292      : addr(const_cast<void*>(addr)) {}
    293  explicit PatchedAbsoluteAddress(uintptr_t addr)
    294      : addr(reinterpret_cast<void*>(addr)) {}
    295 };
    296 
    297 // Specifies an address computed in the form of a register base and a constant,
    298 // 32-bit offset.
    299 struct Address {
    300  RegisterOrSP base;
    301  int32_t offset;
    302 
    303  Address(Register base, int32_t offset)
    304      : base(RegisterOrSP(base)), offset(offset) {}
    305 
    306 #ifdef JS_HAS_HIDDEN_SP
    307  Address(RegisterOrSP base, int32_t offset) : base(base), offset(offset) {}
    308 #endif
    309 
    310  Address() = delete;
    311 
    312  bool operator==(const Address& other) const {
    313    return base == other.base && offset == other.offset;
    314  }
    315 
    316  bool operator!=(const Address& other) const { return !(*this == other); }
    317 };
    318 
    319 #if JS_BITS_PER_WORD == 32
    320 
    321 static inline Address LowWord(const Address& address) {
    322  using mozilla::CheckedInt;
    323 
    324  CheckedInt<int32_t> offset =
    325      CheckedInt<int32_t>(address.offset) + INT64LOW_OFFSET;
    326  MOZ_ALWAYS_TRUE(offset.isValid());
    327  return Address(address.base, offset.value());
    328 }
    329 
    330 static inline Address HighWord(const Address& address) {
    331  using mozilla::CheckedInt;
    332 
    333  CheckedInt<int32_t> offset =
    334      CheckedInt<int32_t>(address.offset) + INT64HIGH_OFFSET;
    335  MOZ_ALWAYS_TRUE(offset.isValid());
    336  return Address(address.base, offset.value());
    337 }
    338 
    339 #endif
    340 
    341 // Specifies an address computed in the form of a register base, a register
    342 // index with a scale, and a constant, 32-bit offset.
    343 struct BaseIndex {
    344  RegisterOrSP base;
    345  Register index;
    346  Scale scale;
    347  int32_t offset;
    348 
    349  BaseIndex(Register base, Register index, Scale scale, int32_t offset = 0)
    350      : base(RegisterOrSP(base)), index(index), scale(scale), offset(offset) {}
    351 
    352 #ifdef JS_HAS_HIDDEN_SP
    353  BaseIndex(RegisterOrSP base, Register index, Scale scale, int32_t offset = 0)
    354      : base(base), index(index), scale(scale), offset(offset) {}
    355 #endif
    356 
    357  BaseIndex() = delete;
    358 };
    359 
    360 #if JS_BITS_PER_WORD == 32
    361 
    362 static inline BaseIndex LowWord(const BaseIndex& address) {
    363  using mozilla::CheckedInt;
    364 
    365  CheckedInt<int32_t> offset =
    366      CheckedInt<int32_t>(address.offset) + INT64LOW_OFFSET;
    367  MOZ_ALWAYS_TRUE(offset.isValid());
    368  return BaseIndex(address.base, address.index, address.scale, offset.value());
    369 }
    370 
    371 static inline BaseIndex HighWord(const BaseIndex& address) {
    372  using mozilla::CheckedInt;
    373 
    374  CheckedInt<int32_t> offset =
    375      CheckedInt<int32_t>(address.offset) + INT64HIGH_OFFSET;
    376  MOZ_ALWAYS_TRUE(offset.isValid());
    377  return BaseIndex(address.base, address.index, address.scale, offset.value());
    378 }
    379 
    380 #endif
    381 
    382 // A BaseIndex used to access Values.  Note that |offset| is *not* scaled by
    383 // sizeof(Value).  Use this *only* if you're indexing into a series of Values
    384 // that aren't object elements or object slots (for example, values on the
    385 // stack, values in an arguments object, &c.).  If you're indexing into an
    386 // object's elements or slots, don't use this directly!  Use
    387 // BaseObject{Element,Slot}Index instead.
    388 struct BaseValueIndex : BaseIndex {
    389  BaseValueIndex(Register base, Register index, int32_t offset = 0)
    390      : BaseIndex(RegisterOrSP(base), index, ValueScale, offset) {}
    391 
    392 #ifdef JS_HAS_HIDDEN_SP
    393  BaseValueIndex(RegisterOrSP base, Register index, int32_t offset = 0)
    394      : BaseIndex(base, index, ValueScale, offset) {}
    395 #endif
    396 };
    397 
    398 // Specifies the address of an indexed Value within object elements from a
    399 // base.  The index must not already be scaled by sizeof(Value)!
    400 struct BaseObjectElementIndex : BaseValueIndex {
    401  BaseObjectElementIndex(Register base, Register index, int32_t offset = 0)
    402      : BaseValueIndex(base, index, offset) {}
    403 
    404 #ifdef JS_HAS_HIDDEN_SP
    405  BaseObjectElementIndex(RegisterOrSP base, Register index, int32_t offset = 0)
    406      : BaseValueIndex(base, index, offset) {}
    407 #endif
    408 
    409  static void staticAssertions();
    410 };
    411 
    412 // Like BaseObjectElementIndex, except for object slots.
    413 struct BaseObjectSlotIndex : BaseValueIndex {
    414  BaseObjectSlotIndex(Register base, Register index)
    415      : BaseValueIndex(base, index) {}
    416 
    417 #ifdef JS_HAS_HIDDEN_SP
    418  BaseObjectSlotIndex(RegisterOrSP base, Register index)
    419      : BaseValueIndex(base, index) {}
    420 #endif
    421 
    422  static void staticAssertions();
    423 };
    424 
    425 enum class RelocationKind {
    426  // The target is immovable, so patching is only needed if the source
    427  // buffer is relocated and the reference is relative.
    428  HARDCODED,
    429 
    430  // The target is the start of a JitCode buffer, which must be traced
    431  // during garbage collection. Relocations and patching may be needed.
    432  JITCODE
    433 };
    434 
    435 class CodeOffset {
    436  size_t offset_;
    437 
    438  static const size_t NOT_BOUND = size_t(-1);
    439 
    440 public:
    441  explicit CodeOffset(size_t offset) : offset_(offset) {}
    442  CodeOffset() : offset_(NOT_BOUND) {}
    443 
    444  size_t offset() const {
    445    MOZ_ASSERT(bound());
    446    return offset_;
    447  }
    448 
    449  void bind(size_t offset) {
    450    MOZ_ASSERT(!bound());
    451    offset_ = offset;
    452    MOZ_ASSERT(bound());
    453  }
    454  bool bound() const { return offset_ != NOT_BOUND; }
    455 
    456  void offsetBy(size_t delta) {
    457    MOZ_ASSERT(bound());
    458    MOZ_ASSERT(offset_ + delta >= offset_, "no overflow");
    459    offset_ += delta;
    460  }
    461 };
    462 
    463 // A code label contains an absolute reference to a point in the code. Thus, it
    464 // cannot be patched until after linking.
    465 // When the source label is resolved into a memory address, this address is
    466 // patched into the destination address.
    467 // Some need to distinguish between multiple ways of patching that address.
    468 // See JS_CODELABEL_LINKMODE.
    469 class CodeLabel {
    470  // The destination position, where the absolute reference should get
    471  // patched into.
    472  CodeOffset patchAt_;
    473 
    474  // The source label (relative) in the code to where the destination should
    475  // get patched to.
    476  CodeOffset target_;
    477 
    478 #ifdef JS_CODELABEL_LINKMODE
    479 public:
    480  enum LinkMode { Uninitialized = 0, RawPointer, MoveImmediate, JumpImmediate };
    481 
    482 private:
    483  LinkMode linkMode_ = Uninitialized;
    484 #endif
    485 
    486 public:
    487  CodeLabel() = default;
    488  explicit CodeLabel(const CodeOffset& patchAt) : patchAt_(patchAt) {}
    489  CodeLabel(const CodeOffset& patchAt, const CodeOffset& target)
    490      : patchAt_(patchAt), target_(target) {}
    491  CodeOffset* patchAt() { return &patchAt_; }
    492  CodeOffset* target() { return &target_; }
    493  CodeOffset patchAt() const { return patchAt_; }
    494  CodeOffset target() const { return target_; }
    495 #ifdef JS_CODELABEL_LINKMODE
    496  LinkMode linkMode() const { return linkMode_; }
    497  void setLinkMode(LinkMode value) { linkMode_ = value; }
    498 #endif
    499 };
    500 
    501 using CodeLabelVector = Vector<CodeLabel, 0, SystemAllocPolicy>;
    502 
    503 class CodeLocationLabel {
    504  uint8_t* raw_ = nullptr;
    505 
    506 public:
    507  CodeLocationLabel(JitCode* code, CodeOffset base) {
    508    MOZ_ASSERT(base.offset() < code->instructionsSize());
    509    raw_ = code->raw() + base.offset();
    510  }
    511  explicit CodeLocationLabel(JitCode* code) { raw_ = code->raw(); }
    512  explicit CodeLocationLabel(uint8_t* raw) {
    513    MOZ_ASSERT(raw);
    514    raw_ = raw;
    515  }
    516 
    517  ptrdiff_t operator-(const CodeLocationLabel& other) const {
    518    return raw_ - other.raw_;
    519  }
    520 
    521  uint8_t* raw() const { return raw_; }
    522 };
    523 
    524 }  // namespace jit
    525 
    526 namespace wasm {
    527 
    528 // Represents an instruction to be patched and the intended pointee. These
    529 // links are accumulated in the MacroAssembler, but patching is done outside
    530 // the MacroAssembler (in Module::staticallyLink).
    531 
    532 struct SymbolicAccess {
    533  SymbolicAccess(jit::CodeOffset patchAt, SymbolicAddress target)
    534      : patchAt(patchAt), target(target) {}
    535 
    536  jit::CodeOffset patchAt;
    537  SymbolicAddress target;
    538 };
    539 
    540 using SymbolicAccessVector = Vector<SymbolicAccess, 0, SystemAllocPolicy>;
    541 
    542 // Describes a single wasm or asm.js memory access for the purpose of generating
    543 // code and metadata.
    544 
    545 class MemoryAccessDesc {
    546  uint32_t memoryIndex_;
    547  uint64_t offset_;
    548  uint32_t align_;
    549  Scalar::Type type_;
    550  jit::Synchronization sync_;
    551  wasm::TrapSiteDesc trapDesc_;
    552  wasm::SimdOp widenOp_;
    553  enum { Plain, ZeroExtend, Splat, Widen } loadOp_;
    554  // Used for an assertion in MacroAssembler about offset length
    555  mozilla::DebugOnly<bool> hugeMemory_;
    556 
    557 public:
    558  explicit MemoryAccessDesc(
    559      uint32_t memoryIndex, Scalar::Type type, uint32_t align, uint64_t offset,
    560      wasm::TrapSiteDesc trapDesc, mozilla::DebugOnly<bool> hugeMemory,
    561      jit::Synchronization sync = jit::Synchronization::None())
    562      : memoryIndex_(memoryIndex),
    563        offset_(offset),
    564        align_(align),
    565        type_(type),
    566        sync_(sync),
    567        trapDesc_(trapDesc),
    568        widenOp_(wasm::SimdOp::Limit),
    569        loadOp_(Plain),
    570        hugeMemory_(hugeMemory) {
    571    MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
    572  }
    573 
    574  uint32_t memoryIndex() const {
    575    MOZ_ASSERT(memoryIndex_ != UINT32_MAX);
    576    return memoryIndex_;
    577  }
    578 
    579  // The offset is a 64-bit value because of memory64. Almost always, it will
    580  // fit in 32 bits, and therefore offset32() is used almost everywhere in the
    581  // engine. The compiler front-ends must use offset64() to bypass the check
    582  // performed by offset32(), and must resolve offsets that don't fit in 32 bits
    583  // early in the compilation pipeline so that no large offsets are observed
    584  // later.
    585  uint32_t offset32() const {
    586    MOZ_ASSERT(offset_ <= UINT32_MAX);
    587    return uint32_t(offset_);
    588  }
    589  uint64_t offset64() const { return offset_; }
    590 
    591  // The offset can be cleared without worrying about its magnitude.
    592  void clearOffset() { offset_ = 0; }
    593 
    594  // The offset can be set (after compile-time evaluation) but only to values
    595  // that fit in 32 bits.
    596  void setOffset32(uint32_t offset) { offset_ = offset; }
    597 
    598  uint32_t align() const { return align_; }
    599  Scalar::Type type() const { return type_; }
    600  unsigned byteSize() const { return Scalar::byteSize(type()); }
    601  jit::Synchronization sync() const { return sync_; }
    602  const TrapSiteDesc& trapDesc() const { return trapDesc_; }
    603  wasm::SimdOp widenSimdOp() const {
    604    MOZ_ASSERT(isWidenSimd128Load());
    605    return widenOp_;
    606  }
    607  bool isAtomic() const { return !sync_.isNone(); }
    608  bool isZeroExtendSimd128Load() const { return loadOp_ == ZeroExtend; }
    609  bool isSplatSimd128Load() const { return loadOp_ == Splat; }
    610  bool isWidenSimd128Load() const { return loadOp_ == Widen; }
    611 
    612  mozilla::DebugOnly<bool> isHugeMemory() const { return hugeMemory_; }
    613 #ifdef DEBUG
    614  void assertOffsetInGuardPages() const;
    615 #else
    616  void assertOffsetInGuardPages() const {}
    617 #endif
    618 
    619  void setZeroExtendSimd128Load() {
    620    MOZ_ASSERT(type() == Scalar::Float32 || type() == Scalar::Float64);
    621    MOZ_ASSERT(!isAtomic());
    622    MOZ_ASSERT(loadOp_ == Plain);
    623    loadOp_ = ZeroExtend;
    624  }
    625 
    626  void setSplatSimd128Load() {
    627    MOZ_ASSERT(type() == Scalar::Uint8 || type() == Scalar::Uint16 ||
    628               type() == Scalar::Float32 || type() == Scalar::Float64);
    629    MOZ_ASSERT(!isAtomic());
    630    MOZ_ASSERT(loadOp_ == Plain);
    631    loadOp_ = Splat;
    632  }
    633 
    634  void setWidenSimd128Load(wasm::SimdOp op) {
    635    MOZ_ASSERT(type() == Scalar::Float64);
    636    MOZ_ASSERT(!isAtomic());
    637    MOZ_ASSERT(loadOp_ == Plain);
    638    widenOp_ = op;
    639    loadOp_ = Widen;
    640  }
    641 };
    642 
    643 }  // namespace wasm
    644 
    645 namespace jit {
    646 
    647 // The base class of all Assemblers for all archs.
    648 class AssemblerShared {
    649  wasm::InliningContext inliningContext_;
    650  wasm::CallSites callSites_;
    651  wasm::CallSiteTargetVector callSiteTargets_;
    652  wasm::TrapSites trapSites_;
    653  wasm::SymbolicAccessVector symbolicAccesses_;
    654  wasm::TryNoteVector tryNotes_;
    655  wasm::CodeRangeUnwindInfoVector codeRangesUnwind_;
    656  wasm::CallRefMetricsPatchVector callRefMetricsPatches_;
    657  wasm::AllocSitePatchVector allocSitesPatches_;
    658 
    659 #ifdef DEBUG
    660  // To facilitate figuring out which part of SM created each instruction as
    661  // shown by IONFLAGS=codegen, this maintains a stack of (notionally)
    662  // code-creating routines, which is printed in the log output every time an
    663  // entry is pushed or popped.  Do not push/pop entries directly; instead use
    664  // `class AutoCreatedBy`.
    665  mozilla::Vector<const char*> creators_;
    666 #endif
    667 
    668 protected:
    669  CodeLabelVector codeLabels_;
    670 
    671  bool enoughMemory_;
    672  bool embedsNurseryPointers_;
    673 
    674 public:
    675  AssemblerShared() : enoughMemory_(true), embedsNurseryPointers_(false) {}
    676 
    677  ~AssemblerShared();
    678 
    679 #ifdef DEBUG
    680  // Do not use these directly; instead use `class AutoCreatedBy`.
    681  void pushCreator(const char*);
    682  void popCreator();
    683  // See comment on the implementation of `hasCreator` for guidance on what to
    684  // do if you get failures of the assertion `MOZ_ASSERT(hasCreator())`,
    685  bool hasCreator() const;
    686 #endif
    687 
    688  void propagateOOM(bool success) { enoughMemory_ &= success; }
    689 
    690  void setOOM() { enoughMemory_ = false; }
    691 
    692  bool oom() const { return !enoughMemory_; }
    693 
    694  bool embedsNurseryPointers() const { return embedsNurseryPointers_; }
    695 
    696  void addCodeLabel(CodeLabel label) {
    697    propagateOOM(codeLabels_.append(label));
    698  }
    699  size_t numCodeLabels() const { return codeLabels_.length(); }
    700  CodeLabel codeLabel(size_t i) { return codeLabels_[i]; }
    701  CodeLabelVector& codeLabels() { return codeLabels_; }
    702 
    703  // WebAssembly metadata emitted by masm operations accumulated on the
    704  // MacroAssembler, and swapped into a wasm::CompiledCode after finish().
    705 
    706  template <typename... Args>
    707  void append(const wasm::CallSiteDesc& desc, CodeOffset retAddr,
    708              Args&&... args) {
    709    enoughMemory_ &= callSites_.append(desc, retAddr.offset());
    710    enoughMemory_ &= callSiteTargets_.emplaceBack(std::forward<Args>(args)...);
    711  }
    712  void append(wasm::Trap trap, wasm::TrapMachineInsn insn, uint32_t pcOffset,
    713              const wasm::TrapSiteDesc& desc) {
    714    enoughMemory_ &= trapSites_.append(trap, insn, pcOffset, desc);
    715  }
    716  void append(const wasm::MemoryAccessDesc& access, wasm::TrapMachineInsn insn,
    717              FaultingCodeOffset pcOffset) {
    718    append(wasm::Trap::OutOfBounds, insn, pcOffset.get(), access.trapDesc());
    719  }
    720  void append(wasm::SymbolicAccess access) {
    721    enoughMemory_ &= symbolicAccesses_.append(access);
    722  }
    723  // This one returns an index as the try note so that it can be looked up
    724  // later to add the end point and stack position of the try block.
    725  [[nodiscard]] bool append(wasm::TryNote tryNote, size_t* tryNoteIndex) {
    726    if (!tryNotes_.append(tryNote)) {
    727      enoughMemory_ = false;
    728      return false;
    729    }
    730    *tryNoteIndex = tryNotes_.length() - 1;
    731    return true;
    732  }
    733 
    734  void append(wasm::CodeRangeUnwindInfo::UnwindHow unwindHow,
    735              uint32_t pcOffset) {
    736    enoughMemory_ &= codeRangesUnwind_.emplaceBack(pcOffset, unwindHow);
    737  }
    738  void append(wasm::CallRefMetricsPatch patch) {
    739    enoughMemory_ &= callRefMetricsPatches_.append(patch);
    740  }
    741  void append(wasm::AllocSitePatch patch) {
    742    enoughMemory_ &= allocSitesPatches_.append(patch);
    743  }
    744 
    745  wasm::InliningContext& inliningContext() { return inliningContext_; }
    746  wasm::CallSites& callSites() { return callSites_; }
    747  wasm::CallSiteTargetVector& callSiteTargets() { return callSiteTargets_; }
    748  wasm::TrapSites& trapSites() { return trapSites_; }
    749  wasm::SymbolicAccessVector& symbolicAccesses() { return symbolicAccesses_; }
    750  wasm::TryNoteVector& tryNotes() { return tryNotes_; }
    751  wasm::CodeRangeUnwindInfoVector& codeRangeUnwindInfos() {
    752    return codeRangesUnwind_;
    753  }
    754  wasm::CallRefMetricsPatchVector& callRefMetricsPatches() {
    755    return callRefMetricsPatches_;
    756  }
    757  wasm::AllocSitePatchVector& allocSitesPatches() { return allocSitesPatches_; }
    758 };
    759 
    760 // AutoCreatedBy pushes and later pops a who-created-these-insns? tag into the
    761 // JitSpew_Codegen output.  These could be created fairly frequently, so a
    762 // dummy inlineable-out version is provided for non-debug builds.  The tag
    763 // text can be completely arbitrary -- it serves only to help readers of the
    764 // output text to relate instructions back to the part(s) of SM that created
    765 // them.
    766 #ifdef DEBUG
    767 class MOZ_RAII AutoCreatedBy {
    768 private:
    769  AssemblerShared& ash_;
    770 
    771 public:
    772  AutoCreatedBy(AssemblerShared& ash, const char* who) : ash_(ash) {
    773    ash_.pushCreator(who);
    774  }
    775  ~AutoCreatedBy() { ash_.popCreator(); }
    776 };
    777 #else
    778 class MOZ_RAII AutoCreatedBy {
    779 public:
    780  inline AutoCreatedBy(AssemblerShared& ash, const char* who) {}
    781  // A user-defined constructor is necessary to stop some compilers from
    782  // complaining about unused variables.
    783  inline ~AutoCreatedBy() {}
    784 };
    785 #endif
    786 
    787 // Base class for architecture specific ABIArgGenerator classes.
    788 class ABIArgGeneratorShared {
    789 protected:
    790  ABIKind kind_;
    791  uint32_t stackOffset_;
    792 
    793  explicit ABIArgGeneratorShared(ABIKind kind);
    794 
    795 public:
    796  ABIKind abi() const { return kind_; }
    797  uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
    798 };
    799 
    800 // [SMDOC] ABI special registers
    801 //
    802 // There are a number of special registers that can be used for different
    803 // purposes during "ABI calls". These are defined per-architecture in their
    804 // Assembler-XYZ.h header. The documentation for them is centralized here to
    805 // keep them all in-sync.
    806 //
    807 // The WebAssembly and System ABI's are similar but distinct, and so some of
    808 // these can be used in both contexts and others only in one. This is
    809 // unfortunate and should be formalized better. See "The WASM ABIs" in
    810 // WasmFrame.h for documentation on the Wasm ABI.
    811 //
    812 // The relevant similarities/differences for ABI registers are that:
    813 //   1. Wasm functions have special InstanceReg/HeapReg registers.
    814 //   2. Wasm functions do not have non-volatile registers.
    815 //   3. Wasm and System ABI have the same integer argument and return registers.
    816 //   4. Wasm and System ABI may have different FP argument and return registers.
    817 //      (notably ARM32 softfp and x87 FP are different).
    818 //
    819 // TODO: understand and describe the relationship with the various
    820 // MacroAssembler scratch registers. It looks like all of these must be
    821 // distinct from the MacroAssembler scratch registers.
    822 //
    823 // # InstanceReg
    824 //
    825 // Instance pointer argument register for WebAssembly functions in the
    826 // WebAssembly ABI.
    827 //
    828 // This must not alias any other register used for passing function arguments
    829 // or return values. Preserved by WebAssembly functions.
    830 //
    831 // The register must be non-volatile in the system ABI, as some code relies on
    832 // this to avoid reloading the register.
    833 //
    834 // See "The WASM ABIs" in WasmFrame.h for more information.
    835 //
    836 // # HeapReg
    837 //
    838 // Pointer to the base of (memory 0) for WebAssembly functions in the
    839 // WebAssembly ABI.
    840 //
    841 // This must not alias any other register used for passing function arguments
    842 // or return values. Preserved by WebAssembly functions.
    843 //
    844 // The register must be non-volatile in the system ABI, as some code relies on
    845 // this to avoid reloading the register.
    846 //
    847 // This register is not available on all architectures. It is notably absent
    848 // from x86.
    849 //
    850 // See "The WASM ABIs" in WasmFrame.h for more information.
    851 //
    852 // # ABINonArgReg (4 available)
    853 //
    854 // A register that can be clobbered in the prologue of a function.
    855 //
    856 // They are each distinct and have the following guarantees:
    857 //   - Will not be a System/Wasm ABI argument register.
    858 //   - Will not be the InstanceReg or HeapReg.
    859 //   - Could be a System/Wasm ABI result register.
    860 //   - Could be a System ABI non-volatile register.
    861 //
    862 // # ABINonArgDoubleReg (1 available)
    863 //
    864 // A floating-point register that can be clobbered in the prologue of a
    865 // function. May be volatile or non-volatile.
    866 //
    867 // # ABINonArgReturnReg (2 available)
    868 //
    869 // A register that can be clobbered in the prologue or epilogue of a function.
    870 //
    871 // They are each distinct and have the following guarantees:
    872 //   - All the guarantees of ABINonArgReg.
    873 //   - Will not be a System/Wasm ABI return register.
    874 //   - Will be distinct from ABINonVolatileReg (see below).
    875 //
    876 // There are only two of these, and the constraint is x86.
    877 //
    878 // # ABINonVolatileReg (1 available)
    879 //
    880 // A register that is:
    881 //   - Non-volatile in the System ABI
    882 //   - (implied by above) Not an argument or return register.
    883 //   - Distinct from the ABINonArgReturnReg.
    884 //
    885 // # ABINonArgReturnVolatileReg (1 available)
    886 //
    887 // A register that can be clobbered in the prologue or epilogue of a system ABI
    888 // function.
    889 //
    890 // They are each distinct and have the following guarantees:
    891 //   - All the guarantees of ABINonArgReturnReg.
    892 //   - Will be a volatile register in the System ABI.
    893 //
    894 
    895 }  // namespace jit
    896 }  // namespace js
    897 
    898 #endif /* jit_shared_Assembler_shared_h */