tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

LIR.h (72871B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef jit_LIR_h
      8 #define jit_LIR_h
      9 
     10 // This file declares the core data structures for LIR: storage allocations for
     11 // inputs and outputs, as well as the interface instructions must conform to.
     12 
     13 #include "mozilla/Array.h"
     14 #include "mozilla/Attributes.h"
     15 #include "mozilla/Casting.h"
     16 
     17 #include "jit/Bailouts.h"
     18 #include "jit/FixedList.h"
     19 #include "jit/InlineList.h"
     20 #include "jit/JitAllocPolicy.h"
     21 #include "jit/LIROpsGenerated.h"
     22 #include "jit/MIR-wasm.h"
     23 #include "jit/MIR.h"
     24 #include "jit/MIRGraph.h"
     25 #include "jit/Registers.h"
     26 #include "jit/Safepoints.h"
     27 #include "util/Memory.h"
     28 
     29 namespace js {
     30 namespace jit {
     31 
     32 class LUse;
     33 class LGeneralReg;
     34 class LFloatReg;
     35 class LStackSlot;
     36 class LStackArea;
     37 class LArgument;
     38 class LConstantIndex;
     39 class LInstruction;
     40 class LNode;
     41 class LDefinition;
     42 class MBasicBlock;
     43 class MIRGenerator;
     44 
     45 static const uint32_t VREG_INCREMENT = 1;
     46 
     47 static const uint32_t THIS_FRAME_ARGSLOT = 0;
     48 
     49 #if defined(JS_NUNBOX32)
     50 static const uint32_t BOX_PIECES = 2;
     51 static const uint32_t VREG_TYPE_OFFSET = 0;
     52 static const uint32_t VREG_DATA_OFFSET = 1;
     53 static const uint32_t TYPE_INDEX = 0;
     54 static const uint32_t PAYLOAD_INDEX = 1;
     55 static const uint32_t INT64LOW_INDEX = 0;
     56 static const uint32_t INT64HIGH_INDEX = 1;
     57 #elif defined(JS_PUNBOX64)
     58 static const uint32_t BOX_PIECES = 1;
     59 #else
     60 #  error "Unknown!"
     61 #endif
     62 
     63 static const uint32_t INT64_PIECES = sizeof(int64_t) / sizeof(uintptr_t);
     64 
     65 // Represents storage for an operand. For constants, the pointer is tagged
     66 // with a single bit, and the untagged pointer is a pointer to a Value.
     67 class LAllocation {
     68  uintptr_t bits_;
     69 
     70  // 3 bits gives us enough for an interesting set of Kinds and also fits
     71  // within the alignment bits of pointers to Value, which are always
     72  // 8-byte aligned.
     73  static const uintptr_t KIND_BITS = 3;
     74  static const uintptr_t KIND_SHIFT = 0;
     75  static const uintptr_t KIND_MASK = (1 << KIND_BITS) - 1;
     76 
     77 protected:
     78 #ifdef JS_64BIT
     79  static const uintptr_t DATA_BITS = sizeof(uint32_t) * 8;
     80 #else
     81  static const uintptr_t DATA_BITS = (sizeof(uint32_t) * 8) - KIND_BITS;
     82 #endif
     83  static const uintptr_t DATA_SHIFT = KIND_SHIFT + KIND_BITS;
     84 
     85 public:
     86  enum Kind {
     87    CONSTANT_VALUE,  // MConstant*.
     88    CONSTANT_INDEX,  // Constant arbitrary index.
     89    USE,         // Use of a virtual register, with physical allocation policy.
     90    GPR,         // General purpose register.
     91    FPU,         // Floating-point register.
     92    STACK_SLOT,  // Stack slot.
     93    STACK_AREA,  // Stack area.
     94    ARGUMENT_SLOT  // Argument slot.
     95  };
     96 
     97  static const uintptr_t DATA_MASK = (uintptr_t(1) << DATA_BITS) - 1;
     98 
     99 protected:
    100  uint32_t data() const {
    101    MOZ_ASSERT(!hasIns());
    102    return mozilla::AssertedCast<uint32_t>(bits_ >> DATA_SHIFT);
    103  }
    104  void setData(uintptr_t data) {
    105    MOZ_ASSERT(!hasIns());
    106    MOZ_ASSERT(data <= DATA_MASK);
    107    bits_ &= ~(DATA_MASK << DATA_SHIFT);
    108    bits_ |= (data << DATA_SHIFT);
    109  }
    110  void setKindAndData(Kind kind, uintptr_t data) {
    111    MOZ_ASSERT(data <= DATA_MASK);
    112    bits_ = (uintptr_t(kind) << KIND_SHIFT) | data << DATA_SHIFT;
    113    MOZ_ASSERT(!hasIns());
    114  }
    115 
    116  bool hasIns() const { return isStackArea(); }
    117  const LInstruction* ins() const {
    118    MOZ_ASSERT(hasIns());
    119    return reinterpret_cast<const LInstruction*>(bits_ &
    120                                                 ~(KIND_MASK << KIND_SHIFT));
    121  }
    122  LInstruction* ins() {
    123    MOZ_ASSERT(hasIns());
    124    return reinterpret_cast<LInstruction*>(bits_ & ~(KIND_MASK << KIND_SHIFT));
    125  }
    126  void setKindAndIns(Kind kind, LInstruction* ins) {
    127    uintptr_t data = reinterpret_cast<uintptr_t>(ins);
    128    MOZ_ASSERT((data & (KIND_MASK << KIND_SHIFT)) == 0);
    129    bits_ = data | (uintptr_t(kind) << KIND_SHIFT);
    130    MOZ_ASSERT(hasIns());
    131  }
    132 
    133  LAllocation(Kind kind, uintptr_t data) { setKindAndData(kind, data); }
    134  LAllocation(Kind kind, LInstruction* ins) { setKindAndIns(kind, ins); }
    135  explicit LAllocation(Kind kind) { setKindAndData(kind, 0); }
    136 
    137 public:
    138  LAllocation() : bits_(0) { MOZ_ASSERT(isBogus()); }
    139 
    140  // The MConstant pointer must have its low bits cleared.
    141  explicit LAllocation(const MConstant* c) {
    142    MOZ_ASSERT(c);
    143    bits_ = uintptr_t(c);
    144    MOZ_ASSERT((bits_ & (KIND_MASK << KIND_SHIFT)) == 0);
    145    bits_ |= CONSTANT_VALUE << KIND_SHIFT;
    146  }
    147  inline explicit LAllocation(AnyRegister reg);
    148 
    149  Kind kind() const { return (Kind)((bits_ >> KIND_SHIFT) & KIND_MASK); }
    150 
    151  bool isBogus() const { return bits_ == 0; }
    152  bool isUse() const { return kind() == USE; }
    153  bool isConstant() const { return isConstantValue() || isConstantIndex(); }
    154  bool isConstantValue() const { return kind() == CONSTANT_VALUE; }
    155  bool isConstantIndex() const { return kind() == CONSTANT_INDEX; }
    156  bool isGeneralReg() const { return kind() == GPR; }
    157  bool isFloatReg() const { return kind() == FPU; }
    158  bool isStackSlot() const { return kind() == STACK_SLOT; }
    159  bool isStackArea() const { return kind() == STACK_AREA; }
    160  bool isArgument() const { return kind() == ARGUMENT_SLOT; }
    161  bool isAnyRegister() const { return isGeneralReg() || isFloatReg(); }
    162  bool isMemory() const { return isStackSlot() || isArgument(); }
    163  inline uint32_t memorySlot() const;
    164  inline LUse* toUse();
    165  inline const LUse* toUse() const;
    166  inline const LGeneralReg* toGeneralReg() const;
    167  inline const LFloatReg* toFloatReg() const;
    168  inline const LStackSlot* toStackSlot() const;
    169  inline LStackArea* toStackArea();
    170  inline const LStackArea* toStackArea() const;
    171  inline const LArgument* toArgument() const;
    172  inline const LConstantIndex* toConstantIndex() const;
    173  inline AnyRegister toAnyRegister() const;
    174 
    175  const MConstant* toConstant() const {
    176    MOZ_ASSERT(isConstantValue());
    177    return reinterpret_cast<const MConstant*>(bits_ &
    178                                              ~(KIND_MASK << KIND_SHIFT));
    179  }
    180 
    181  bool operator==(const LAllocation& other) const {
    182    return bits_ == other.bits_;
    183  }
    184 
    185  bool operator!=(const LAllocation& other) const {
    186    return bits_ != other.bits_;
    187  }
    188 
    189  HashNumber hash() const { return bits_; }
    190 
    191  uintptr_t asRawBits() const { return bits_; }
    192 
    193  bool aliases(const LAllocation& other) const;
    194 
    195 #ifdef JS_JITSPEW
    196  UniqueChars toString() const;
    197  void dump() const;
    198 #endif
    199 };
    200 
    201 class LUse : public LAllocation {
    202  static const uint32_t POLICY_BITS = 3;
    203  static const uint32_t POLICY_SHIFT = 0;
    204  static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
    205 #ifdef JS_CODEGEN_ARM64
    206  static const uint32_t REG_BITS = 7;
    207 #else
    208  static const uint32_t REG_BITS = 6;
    209 #endif
    210  static const uint32_t REG_SHIFT = POLICY_SHIFT + POLICY_BITS;
    211  static const uint32_t REG_MASK = (1 << REG_BITS) - 1;
    212 
    213  // Whether the physical register for this operand may be reused for a def.
    214  static const uint32_t USED_AT_START_BITS = 1;
    215  static const uint32_t USED_AT_START_SHIFT = REG_SHIFT + REG_BITS;
    216  static const uint32_t USED_AT_START_MASK = (1 << USED_AT_START_BITS) - 1;
    217 
    218  // The REG field will hold the register code for any Register or
    219  // FloatRegister, though not for an AnyRegister.
    220  static_assert(std::max(Registers::Total, FloatRegisters::Total) <=
    221                    REG_MASK + 1,
    222                "The field must be able to represent any register code");
    223 
    224 public:
    225  // Virtual registers get the remaining bits.
    226  static const uint32_t VREG_BITS =
    227      DATA_BITS - (USED_AT_START_SHIFT + USED_AT_START_BITS);
    228  static const uint32_t VREG_SHIFT = USED_AT_START_SHIFT + USED_AT_START_BITS;
    229  static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1;
    230 
    231  enum Policy {
    232    // Input should be in a read-only register or stack slot.
    233    ANY,
    234 
    235    // Input must be in a read-only register.
    236    REGISTER,
    237 
    238    // Input must be in a specific, read-only register.
    239    FIXED,
    240 
    241    // Keep the used virtual register alive, and use whatever allocation is
    242    // available. This is similar to ANY but hints to the register allocator
    243    // that it is never useful to optimize this site.
    244    KEEPALIVE,
    245 
    246    // Input must be allocated on the stack.  Only used when extracting stack
    247    // results from stack result areas.
    248    STACK,
    249 
    250    // For snapshot inputs, indicates that the associated instruction will
    251    // write this input to its output register before bailing out.
    252    // The register allocator may thus allocate that output register, and
    253    // does not need to keep the virtual register alive (alternatively,
    254    // this may be treated as KEEPALIVE).
    255    RECOVERED_INPUT
    256  };
    257 
    258  void set(Policy policy, uint32_t reg, bool usedAtStart) {
    259    MOZ_ASSERT(reg <= REG_MASK, "Register code must fit in field");
    260    setKindAndData(USE, (policy << POLICY_SHIFT) | (reg << REG_SHIFT) |
    261                            ((usedAtStart ? 1 : 0) << USED_AT_START_SHIFT));
    262  }
    263 
    264 public:
    265  LUse(uint32_t vreg, Policy policy, bool usedAtStart = false) {
    266    set(policy, 0, usedAtStart);
    267    setVirtualRegister(vreg);
    268  }
    269  explicit LUse(Policy policy, bool usedAtStart = false) {
    270    set(policy, 0, usedAtStart);
    271  }
    272  explicit LUse(Register reg, bool usedAtStart = false) {
    273    set(FIXED, reg.code(), usedAtStart);
    274  }
    275  explicit LUse(FloatRegister reg, bool usedAtStart = false) {
    276    set(FIXED, reg.code(), usedAtStart);
    277  }
    278  LUse(Register reg, uint32_t virtualRegister, bool usedAtStart = false) {
    279    set(FIXED, reg.code(), usedAtStart);
    280    setVirtualRegister(virtualRegister);
    281  }
    282  LUse(FloatRegister reg, uint32_t virtualRegister, bool usedAtStart = false) {
    283    set(FIXED, reg.code(), usedAtStart);
    284    setVirtualRegister(virtualRegister);
    285  }
    286 
    287  void setVirtualRegister(uint32_t index) {
    288    MOZ_ASSERT(index < VREG_MASK);
    289 
    290    uint32_t old = data() & ~(VREG_MASK << VREG_SHIFT);
    291    setData(old | (index << VREG_SHIFT));
    292  }
    293 
    294  Policy policy() const {
    295    Policy policy = (Policy)((data() >> POLICY_SHIFT) & POLICY_MASK);
    296    return policy;
    297  }
    298  uint32_t virtualRegister() const {
    299    uint32_t index = (data() >> VREG_SHIFT) & VREG_MASK;
    300    MOZ_ASSERT(index != 0);
    301    return index;
    302  }
    303  uint32_t registerCode() const {
    304    MOZ_ASSERT(policy() == FIXED);
    305    return (data() >> REG_SHIFT) & REG_MASK;
    306  }
    307  bool isFixedRegister() const { return policy() == FIXED; }
    308  bool usedAtStart() const {
    309    return !!((data() >> USED_AT_START_SHIFT) & USED_AT_START_MASK);
    310  }
    311 };
    312 
    313 static const uint32_t MAX_VIRTUAL_REGISTERS = LUse::VREG_MASK;
    314 
    315 class LBoxAllocation {
    316 #ifdef JS_NUNBOX32
    317  LAllocation type_;
    318  LAllocation payload_;
    319 #else
    320  LAllocation value_;
    321 #endif
    322 
    323 public:
    324 #ifdef JS_NUNBOX32
    325  LBoxAllocation(LAllocation type, LAllocation payload)
    326      : type_(type), payload_(payload) {}
    327 
    328  LAllocation type() const { return type_; }
    329  LAllocation payload() const { return payload_; }
    330 #else
    331  explicit LBoxAllocation(LAllocation value) : value_(value) {}
    332 
    333  LAllocation value() const { return value_; }
    334 #endif
    335 };
    336 
    337 template <class ValT>
    338 class LInt64Value {
    339 #if JS_BITS_PER_WORD == 32
    340  ValT high_;
    341  ValT low_;
    342 #else
    343  ValT value_;
    344 #endif
    345 
    346 public:
    347  LInt64Value() = default;
    348 
    349 #if JS_BITS_PER_WORD == 32
    350  LInt64Value(ValT high, ValT low) : high_(high), low_(low) {}
    351 
    352  ValT high() const { return high_; }
    353  ValT low() const { return low_; }
    354 
    355  const ValT* pointerHigh() const { return &high_; }
    356  const ValT* pointerLow() const { return &low_; }
    357 #else
    358  explicit LInt64Value(ValT value) : value_(value) {}
    359 
    360  ValT value() const { return value_; }
    361  const ValT* pointer() const { return &value_; }
    362 #endif
    363 };
    364 
    365 using LInt64Allocation = LInt64Value<LAllocation>;
    366 
    367 class LGeneralReg : public LAllocation {
    368 public:
    369  explicit LGeneralReg(Register reg) : LAllocation(GPR, reg.code()) {}
    370 
    371  Register reg() const { return Register::FromCode(data()); }
    372 };
    373 
    374 class LFloatReg : public LAllocation {
    375 public:
    376  explicit LFloatReg(FloatRegister reg) : LAllocation(FPU, reg.code()) {}
    377 
    378  FloatRegister reg() const { return FloatRegister::FromCode(data()); }
    379 };
    380 
    381 // Arbitrary constant index.
    382 class LConstantIndex : public LAllocation {
    383  explicit LConstantIndex(uint32_t index)
    384      : LAllocation(CONSTANT_INDEX, index) {}
    385 
    386 public:
    387  static LConstantIndex FromIndex(uint32_t index) {
    388    return LConstantIndex(index);
    389  }
    390 
    391  uint32_t index() const { return data(); }
    392 };
    393 
    394 // Stack slots are indices into the stack. The indices are byte indices.
    395 class LStackSlot : public LAllocation {
    396  // Stack slots are aligned to 32-bit word boundaries.
    397  static constexpr uint32_t SLOT_ALIGNMENT = 4;
    398 
    399  // Stack slot width is stored in the two least significant bits.
    400  static constexpr uint32_t WIDTH_MASK = SLOT_ALIGNMENT - 1;
    401 
    402  // Remaining bits hold the stack slot offset.
    403  static constexpr uint32_t SLOT_MASK = ~WIDTH_MASK;
    404 
    405 public:
    406  enum Width {
    407    Word,
    408    DoubleWord,
    409    QuadWord,
    410  };
    411 
    412  class SlotAndWidth {
    413    uint32_t data_;
    414 
    415    explicit SlotAndWidth(uint32_t data) : data_(data) {}
    416 
    417   public:
    418    static SlotAndWidth fromData(uint32_t data) { return SlotAndWidth(data); }
    419 
    420    explicit SlotAndWidth(uint32_t slot, Width width) {
    421      MOZ_ASSERT(slot % SLOT_ALIGNMENT == 0);
    422      MOZ_ASSERT(uint32_t(width) < SLOT_ALIGNMENT);
    423      data_ = slot | uint32_t(width);
    424    }
    425    uint32_t data() const { return data_; }
    426    uint32_t slot() const { return data_ & SLOT_MASK; }
    427    Width width() const { return Width(data_ & WIDTH_MASK); }
    428  };
    429 
    430  explicit LStackSlot(SlotAndWidth slotAndWidth)
    431      : LAllocation(STACK_SLOT, slotAndWidth.data()) {}
    432 
    433  LStackSlot(uint32_t slot, Width width)
    434      : LStackSlot(SlotAndWidth(slot, width)) {}
    435 
    436  uint32_t slot() const { return SlotAndWidth::fromData(data()).slot(); }
    437  Width width() const { return SlotAndWidth::fromData(data()).width(); }
    438 
    439  // |Type| is LDefinition::Type, but can't forward declare a nested definition.
    440  template <typename Type>
    441  static Width width(Type type);
    442 
    443  static uint32_t ByteWidth(Width width) {
    444    switch (width) {
    445      case Width::Word:
    446        return 4;
    447      case Width::DoubleWord:
    448        return 8;
    449      case Width::QuadWord:
    450        return 16;
    451    }
    452    MOZ_CRASH("invalid width");
    453  }
    454 };
    455 
    456 // Stack area indicates a contiguous stack allocation meant to receive call
    457 // results that don't fit in registers.
    458 class LStackArea : public LAllocation {
    459 public:
    460  explicit LStackArea(LInstruction* stackArea)
    461      : LAllocation(STACK_AREA, stackArea) {}
    462 
    463  // Byte index of base of stack area, in the same coordinate space as
    464  // LStackSlot::slot().
    465  inline uint32_t base() const;
    466  inline void setBase(uint32_t base);
    467 
    468  // Size in bytes of the stack area.
    469  inline uint32_t size() const;
    470  inline uint32_t alignment() const { return 8; }
    471 
    472  class ResultIterator {
    473    const LStackArea& alloc_;
    474    uint32_t idx_;
    475 
    476   public:
    477    explicit ResultIterator(const LStackArea& alloc) : alloc_(alloc), idx_(0) {}
    478 
    479    inline bool done() const;
    480    inline void next();
    481    inline LAllocation alloc() const;
    482    inline bool isWasmAnyRef() const;
    483 
    484    explicit operator bool() const { return !done(); }
    485  };
    486 
    487  ResultIterator results() const { return ResultIterator(*this); }
    488 
    489  inline LStackSlot resultAlloc(LInstruction* lir, LDefinition* def) const;
    490 };
    491 
    492 // Arguments are reverse indices into the stack. The indices are byte indices.
    493 class LArgument : public LAllocation {
    494 public:
    495  explicit LArgument(uint32_t index) : LAllocation(ARGUMENT_SLOT, index) {}
    496 
    497  uint32_t index() const { return data(); }
    498 };
    499 
    500 inline uint32_t LAllocation::memorySlot() const {
    501  MOZ_ASSERT(isMemory());
    502  return isStackSlot() ? toStackSlot()->slot() : toArgument()->index();
    503 }
    504 
    505 // Represents storage for a definition.
    506 class LDefinition {
    507  // Bits containing policy, type, and virtual register.
    508  uint32_t bits_;
    509 
    510  // Before register allocation, this optionally contains a fixed policy.
    511  // Register allocation assigns this field to a physical policy if none is
    512  // fixed.
    513  //
    514  // Right now, pre-allocated outputs are limited to the following:
    515  //   * Physical argument stack slots.
    516  //   * Physical registers.
    517  LAllocation output_;
    518 
    519  static const uint32_t TYPE_BITS = 4;
    520  static const uint32_t TYPE_SHIFT = 0;
    521  static const uint32_t TYPE_MASK = (1 << TYPE_BITS) - 1;
    522  static const uint32_t POLICY_BITS = 2;
    523  static const uint32_t POLICY_SHIFT = TYPE_SHIFT + TYPE_BITS;
    524  static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
    525 
    526  static const uint32_t VREG_BITS =
    527      (sizeof(uint32_t) * 8) - (POLICY_BITS + TYPE_BITS);
    528  static const uint32_t VREG_SHIFT = POLICY_SHIFT + POLICY_BITS;
    529  static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1;
    530 
    531 public:
    532  // Note that definitions, by default, are always allocated a register,
    533  // unless the policy specifies that an input can be re-used and that input
    534  // is a stack slot.
    535  enum Policy {
    536    // The policy is predetermined by the LAllocation attached to this
    537    // definition. The allocation may be:
    538    //   * A register, which may not appear as any fixed temporary.
    539    //   * A stack slot or argument.
    540    //
    541    // Register allocation will not modify a fixed allocation.
    542    FIXED,
    543 
    544    // A random register of an appropriate class will be assigned.
    545    REGISTER,
    546 
    547    // An area on the stack must be assigned.  Used when defining stack results
    548    // and stack result areas.
    549    STACK,
    550 
    551    // One definition per instruction must re-use the first input
    552    // allocation, which (for now) must be a register.
    553    MUST_REUSE_INPUT
    554  };
    555 
    556  enum Type {
    557    GENERAL,  // Generic, integer or pointer-width data (GPR).
    558    INT32,    // int32 data (GPR).
    559    OBJECT,   // Pointer that may be collected as garbage (GPR).
    560    SLOTS,    // Slots/elements pointer that may be moved by minor GCs (GPR).
    561    WASM_ANYREF,       // Tagged pointer that may be collected as garbage (GPR).
    562    WASM_STRUCT_DATA,  // Pointer to wasm struct OOL storage that may be moved
    563                       // by minor GCs (GPR).
    564    WASM_ARRAY_DATA,   // Pointer to wasm array IL or OOL storage; in the OOL
    565                       // case it may be moved by minor GCs (GPR).
    566    FLOAT32,           // 32-bit floating-point value (FPU).
    567    DOUBLE,            // 64-bit floating-point value (FPU).
    568    SIMD128,           // 128-bit SIMD vector (FPU).
    569    STACKRESULTS,  // A variable-size stack allocation that may contain objects.
    570 #ifdef JS_NUNBOX32
    571    // A type virtual register must be followed by a payload virtual
    572    // register, as both will be tracked as a single gcthing.
    573    TYPE,
    574    PAYLOAD
    575 #else
    576    BOX  // Joined box, for punbox systems. (GPR, gcthing)
    577 #endif
    578  };
    579 
    580  void set(uint32_t index, Type type, Policy policy) {
    581    static_assert(MAX_VIRTUAL_REGISTERS <= VREG_MASK);
    582    bits_ =
    583        (index << VREG_SHIFT) | (policy << POLICY_SHIFT) | (type << TYPE_SHIFT);
    584 #ifndef ENABLE_WASM_SIMD
    585    MOZ_ASSERT(this->type() != SIMD128);
    586 #endif
    587  }
    588 
    589 public:
    590  LDefinition(uint32_t index, Type type, Policy policy = REGISTER) {
    591    set(index, type, policy);
    592  }
    593 
    594  explicit LDefinition(Type type, Policy policy = REGISTER) {
    595    set(0, type, policy);
    596  }
    597 
    598  LDefinition(Type type, const LAllocation& a) : output_(a) {
    599    set(0, type, FIXED);
    600  }
    601 
    602  LDefinition(uint32_t index, Type type, const LAllocation& a) : output_(a) {
    603    set(index, type, FIXED);
    604  }
    605 
    606  LDefinition() : bits_(0) { MOZ_ASSERT(isBogusTemp()); }
    607 
    608  static LDefinition BogusTemp() { return LDefinition(); }
    609 
    610  Policy policy() const {
    611    return (Policy)((bits_ >> POLICY_SHIFT) & POLICY_MASK);
    612  }
    613  Type type() const { return (Type)((bits_ >> TYPE_SHIFT) & TYPE_MASK); }
    614 
    615  static bool isFloatRegCompatible(Type type, FloatRegister reg) {
    616    if (type == FLOAT32) {
    617      return reg.isSingle();
    618    }
    619    if (type == DOUBLE) {
    620      return reg.isDouble();
    621    }
    622    MOZ_ASSERT(type == SIMD128);
    623    return reg.isSimd128();
    624  }
    625 
    626  bool isCompatibleReg(const AnyRegister& r) const {
    627    if (isFloatReg() && r.isFloat()) {
    628      return isFloatRegCompatible(type(), r.fpu());
    629    }
    630    return !isFloatReg() && !r.isFloat();
    631  }
    632  bool isCompatibleDef(const LDefinition& other) const {
    633 #if defined(JS_CODEGEN_ARM)
    634    if (isFloatReg() && other.isFloatReg()) {
    635      return type() == other.type();
    636    }
    637    return !isFloatReg() && !other.isFloatReg();
    638 #else
    639    return isFloatReg() == other.isFloatReg();
    640 #endif
    641  }
    642 
    643  static bool isFloatReg(Type type) {
    644    return type == FLOAT32 || type == DOUBLE || type == SIMD128;
    645  }
    646  bool isFloatReg() const { return isFloatReg(type()); }
    647 
    648  uint32_t virtualRegister() const {
    649    uint32_t index = (bits_ >> VREG_SHIFT) & VREG_MASK;
    650    // MOZ_ASSERT(index != 0);
    651    return index;
    652  }
    653  LAllocation* output() { return &output_; }
    654  const LAllocation* output() const { return &output_; }
    655  bool isFixed() const { return policy() == FIXED; }
    656  bool isBogusTemp() const { return isFixed() && output()->isBogus(); }
    657  void setVirtualRegister(uint32_t index) {
    658    MOZ_ASSERT(index < VREG_MASK);
    659    bits_ &= ~(VREG_MASK << VREG_SHIFT);
    660    bits_ |= index << VREG_SHIFT;
    661  }
    662  void setOutput(const LAllocation& a) {
    663    output_ = a;
    664    if (!a.isUse()) {
    665      bits_ &= ~(POLICY_MASK << POLICY_SHIFT);
    666      bits_ |= FIXED << POLICY_SHIFT;
    667    }
    668  }
    669  void setReusedInput(uint32_t operand) {
    670    output_ = LConstantIndex::FromIndex(operand);
    671  }
    672  uint32_t getReusedInput() const {
    673    MOZ_ASSERT(policy() == LDefinition::MUST_REUSE_INPUT);
    674    return output_.toConstantIndex()->index();
    675  }
    676 
    677  // Returns true if this definition should be added to safepoints for GC
    678  // tracing. This includes Value type tags on 32-bit and slots/elements
    679  // pointers.
    680  inline bool isSafepointGCType(LNode* ins) const;
    681 
    682  static inline Type TypeFrom(MIRType type) {
    683    switch (type) {
    684      case MIRType::Boolean:
    685      case MIRType::Int32:
    686        // The stack slot allocator doesn't currently support allocating
    687        // 1-byte slots, so for now we lower MIRType::Boolean into INT32.
    688        static_assert(sizeof(bool) <= sizeof(int32_t),
    689                      "bool doesn't fit in an int32 slot");
    690        return LDefinition::INT32;
    691      case MIRType::String:
    692      case MIRType::Symbol:
    693      case MIRType::BigInt:
    694      case MIRType::Object:
    695        return LDefinition::OBJECT;
    696      case MIRType::Double:
    697        return LDefinition::DOUBLE;
    698      case MIRType::Float32:
    699        return LDefinition::FLOAT32;
    700 #if defined(JS_PUNBOX64)
    701      case MIRType::Value:
    702        return LDefinition::BOX;
    703 #endif
    704      case MIRType::Slots:
    705      case MIRType::Elements:
    706        return LDefinition::SLOTS;
    707      case MIRType::WasmAnyRef:
    708        return LDefinition::WASM_ANYREF;
    709      case MIRType::WasmStructData:
    710        return LDefinition::WASM_STRUCT_DATA;
    711      case MIRType::WasmArrayData:
    712        return LDefinition::WASM_ARRAY_DATA;
    713      case MIRType::Pointer:
    714      case MIRType::IntPtr:
    715        return LDefinition::GENERAL;
    716 #if defined(JS_PUNBOX64)
    717      case MIRType::Int64:
    718        return LDefinition::GENERAL;
    719 #endif
    720      case MIRType::StackResults:
    721        return LDefinition::STACKRESULTS;
    722      case MIRType::Simd128:
    723        return LDefinition::SIMD128;
    724      default:
    725        MOZ_CRASH("unexpected type");
    726    }
    727  }
    728 
    729  UniqueChars toString() const;
    730 
    731 #ifdef JS_JITSPEW
    732  void dump() const;
    733 #endif
    734 };
    735 
    736 class LInt64Definition : public LInt64Value<LDefinition> {
    737 public:
    738  using LInt64Value<LDefinition>::LInt64Value;
    739 
    740  static LInt64Definition BogusTemp() { return LInt64Definition(); }
    741 
    742  bool isBogusTemp() const {
    743 #if JS_BITS_PER_WORD == 32
    744    MOZ_ASSERT(high().isBogusTemp() == low().isBogusTemp());
    745    return high().isBogusTemp();
    746 #else
    747    return value().isBogusTemp();
    748 #endif
    749  }
    750 };
    751 
    752 template <>
    753 inline LStackSlot::Width LStackSlot::width(LDefinition::Type type) {
    754  switch (type) {
    755 #if JS_BITS_PER_WORD == 32
    756    case LDefinition::GENERAL:
    757    case LDefinition::OBJECT:
    758    case LDefinition::SLOTS:
    759    case LDefinition::WASM_ANYREF:
    760    case LDefinition::WASM_STRUCT_DATA:
    761    case LDefinition::WASM_ARRAY_DATA:
    762 #endif
    763 #ifdef JS_NUNBOX32
    764    case LDefinition::TYPE:
    765    case LDefinition::PAYLOAD:
    766 #endif
    767    case LDefinition::INT32:
    768    case LDefinition::FLOAT32:
    769      return LStackSlot::Word;
    770 #if JS_BITS_PER_WORD == 64
    771    case LDefinition::GENERAL:
    772    case LDefinition::OBJECT:
    773    case LDefinition::SLOTS:
    774    case LDefinition::WASM_ANYREF:
    775    case LDefinition::WASM_STRUCT_DATA:
    776    case LDefinition::WASM_ARRAY_DATA:
    777 #endif
    778 #ifdef JS_PUNBOX64
    779    case LDefinition::BOX:
    780 #endif
    781    case LDefinition::DOUBLE:
    782      return LStackSlot::DoubleWord;
    783    case LDefinition::SIMD128:
    784      return LStackSlot::QuadWord;
    785    case LDefinition::STACKRESULTS:
    786      MOZ_CRASH("Stack results area must be allocated manually");
    787  }
    788  MOZ_CRASH("Unknown slot type");
    789 }
    790 
    791 // Forward declarations of LIR types.
    792 #define LIROP(op) class L##op;
    793 LIR_OPCODE_LIST(LIROP)
    794 #undef LIROP
    795 
    796 class LSnapshot;
    797 class LSafepoint;
    798 class LElementVisitor;
    799 
    800 constexpr size_t MaxNumLInstructionOperands = 63;
    801 
    802 // The common base class for LPhi and LInstruction.
    803 class LNode {
    804 protected:
    805  MDefinition* mir_;
    806 
    807 private:
    808  LBlock* block_;
    809  uint32_t id_;
    810 
    811 protected:
    812  // Bitfields below are all uint32_t to make sure MSVC packs them correctly.
    813  uint32_t op_ : 10;
    814  uint32_t isCall_ : 1;
    815 
    816  // LPhi::numOperands() may not fit in this bitfield, so we only use this
    817  // field for LInstruction.
    818  uint32_t nonPhiNumOperands_ : 6;
    819  static_assert((1 << 6) - 1 == MaxNumLInstructionOperands,
    820                "packing constraints");
    821 
    822  // For LInstruction, the first operand is stored at offset
    823  // sizeof(LInstruction) + nonPhiOperandsOffset_ * sizeof(uintptr_t).
    824  uint32_t nonPhiOperandsOffset_ : 5;
    825  uint32_t numDefs_ : 4;
    826  uint32_t numTemps_ : 4;
    827 
    828 public:
    829  enum class Opcode {
    830 #define LIROP(name) name,
    831    LIR_OPCODE_LIST(LIROP)
    832 #undef LIROP
    833        Invalid
    834  };
    835 
    836  LNode(Opcode op, uint32_t nonPhiNumOperands, uint32_t numDefs,
    837        uint32_t numTemps)
    838      : mir_(nullptr),
    839        block_(nullptr),
    840        id_(0),
    841        op_(uint32_t(op)),
    842        isCall_(false),
    843        nonPhiNumOperands_(nonPhiNumOperands),
    844        nonPhiOperandsOffset_(0),
    845        numDefs_(numDefs),
    846        numTemps_(numTemps) {
    847    MOZ_ASSERT(op < Opcode::Invalid);
    848    MOZ_ASSERT(op_ == uint32_t(op), "opcode must fit in bitfield");
    849    MOZ_ASSERT(nonPhiNumOperands_ == nonPhiNumOperands,
    850               "nonPhiNumOperands must fit in bitfield");
    851    MOZ_ASSERT(numDefs_ == numDefs, "numDefs must fit in bitfield");
    852    MOZ_ASSERT(numTemps_ == numTemps, "numTemps must fit in bitfield");
    853  }
    854 
    855  const char* opName() {
    856    switch (op()) {
    857 #define LIR_NAME_INS(name) \
    858  case Opcode::name:       \
    859    return #name;
    860      LIR_OPCODE_LIST(LIR_NAME_INS)
    861 #undef LIR_NAME_INS
    862      default:
    863        MOZ_CRASH("Invalid op");
    864    }
    865  }
    866 
    867  // Hook for opcodes to add extra high level detail about what code will be
    868  // emitted for the op.
    869 private:
    870  const char* extraName() const { return nullptr; }
    871 
    872 public:
    873 #ifdef JS_JITSPEW
    874  const char* getExtraName() const;
    875 #endif
    876 
    877  Opcode op() const { return Opcode(op_); }
    878 
    879  bool isInstruction() const { return op() != Opcode::Phi; }
    880  inline LInstruction* toInstruction();
    881  inline const LInstruction* toInstruction() const;
    882 
    883  // Returns the number of outputs of this instruction. If an output is
    884  // unallocated, it is an LDefinition, defining a virtual register.
    885  size_t numDefs() const { return numDefs_; }
    886 
    887  bool isCall() const { return isCall_; }
    888 
    889  // Does this call preserve the given register?
    890  // By default, it is assumed that all registers are clobbered by a call.
    891  inline bool isCallPreserved(AnyRegister reg) const;
    892 
    893  uint32_t id() const { return id_; }
    894  void setId(uint32_t id) {
    895    MOZ_ASSERT(!id_);
    896    MOZ_ASSERT(id);
    897    id_ = id;
    898  }
    899  void setMir(MDefinition* mir) { mir_ = mir; }
    900  MDefinition* mirRaw() const {
    901    /* Untyped MIR for this op. Prefer mir() methods in subclasses. */
    902    return mir_;
    903  }
    904  LBlock* block() const { return block_; }
    905  void setBlock(LBlock* block) { block_ = block; }
    906 
    907  // For an instruction which has a MUST_REUSE_INPUT output, whether that
    908  // output register will be restored to its original value when bailing out.
    909  inline bool recoversInput() const;
    910 
    911 #ifdef JS_JITSPEW
    912  void dump(GenericPrinter& out);
    913  void dump();
    914  static void printName(GenericPrinter& out, Opcode op);
    915  void printName(GenericPrinter& out);
    916  void printOperands(GenericPrinter& out);
    917 #endif
    918 
    919 public:
    920  // Opcode testing and casts.
    921 #define LIROP(name)                                      \
    922  bool is##name() const { return op() == Opcode::name; } \
    923  inline L##name* to##name();                            \
    924  inline const L##name* to##name() const;
    925  LIR_OPCODE_LIST(LIROP)
    926 #undef LIROP
    927 
    928 // Note: GenerateOpcodeFiles.py generates LIROpsGenerated.h based on this
    929 // macro.
    930 #define LIR_HEADER(opcode) \
    931  static constexpr LNode::Opcode classOpcode = LNode::Opcode::opcode;
    932 };
    933 
    934 extern const char* const LIROpNames[];
    935 inline const char* LIRCodeName(LNode::Opcode op) {
    936  return LIROpNames[static_cast<size_t>(op)];
    937 }
    938 
    939 class LInstruction : public LNode,
    940                     public TempObject,
    941                     public InlineListNode<LInstruction> {
    942  // This snapshot could be set after a ResumePoint.  It is used to restart
    943  // from the resume point pc.
    944  LSnapshot* snapshot_;
    945 
    946  // Structure capturing the set of stack slots and registers which are known
    947  // to hold either gcthings or Values.
    948  LSafepoint* safepoint_;
    949 
    950  LMoveGroup* inputMoves_;
    951  LMoveGroup* fixReuseMoves_;
    952  LMoveGroup* movesAfter_;
    953 
    954 protected:
    955  LInstruction(Opcode opcode, uint32_t numOperands, uint32_t numDefs,
    956               uint32_t numTemps)
    957      : LNode(opcode, numOperands, numDefs, numTemps),
    958        snapshot_(nullptr),
    959        safepoint_(nullptr),
    960        inputMoves_(nullptr),
    961        fixReuseMoves_(nullptr),
    962        movesAfter_(nullptr) {}
    963 
    964  void setIsCall() { isCall_ = true; }
    965 
    966 public:
    967  inline LDefinition* getDef(size_t index);
    968 
    969  void setDef(size_t index, const LDefinition& def) { *getDef(index) = def; }
    970 
    971  LAllocation* getOperand(size_t index) const {
    972    MOZ_ASSERT(index < numOperands());
    973    MOZ_ASSERT(nonPhiOperandsOffset_ > 0);
    974    uintptr_t p = reinterpret_cast<uintptr_t>(this + 1) +
    975                  nonPhiOperandsOffset_ * sizeof(uintptr_t);
    976    return reinterpret_cast<LAllocation*>(p) + index;
    977  }
    978  void setOperand(size_t index, const LAllocation& a) {
    979    *getOperand(index) = a;
    980  }
    981 
    982  LBoxAllocation getBoxOperand(size_t index) const {
    983 #ifdef JS_NUNBOX32
    984    return LBoxAllocation(*getOperand(index + TYPE_INDEX),
    985                          *getOperand(index + PAYLOAD_INDEX));
    986 #else
    987    return LBoxAllocation(*getOperand(index));
    988 #endif
    989  }
    990 
    991  void initOperandsOffset(size_t offset) {
    992    MOZ_ASSERT(nonPhiOperandsOffset_ == 0);
    993    MOZ_ASSERT(offset >= sizeof(LInstruction));
    994    MOZ_ASSERT(((offset - sizeof(LInstruction)) % sizeof(uintptr_t)) == 0);
    995    offset = (offset - sizeof(LInstruction)) / sizeof(uintptr_t);
    996    nonPhiOperandsOffset_ = offset;
    997    MOZ_ASSERT(nonPhiOperandsOffset_ == offset, "offset must fit in bitfield");
    998  }
    999 
   1000  void changePolicyOfReusedInputToAny(LDefinition* def) {
   1001    // MUST_REUSE_INPUT is implemented by allocating an output register and
   1002    // moving the input to it. Register hints are used to avoid unnecessary
   1003    // moves. We give the input an LUse::ANY policy to avoid requiring a
   1004    // register for the input.
   1005    MOZ_ASSERT(def->policy() == LDefinition::MUST_REUSE_INPUT);
   1006    LUse* inputUse = getOperand(def->getReusedInput())->toUse();
   1007    MOZ_ASSERT(inputUse->policy() == LUse::REGISTER);
   1008    MOZ_ASSERT(inputUse->usedAtStart());
   1009    *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY,
   1010                     /* usedAtStart = */ true);
   1011  }
   1012 
   1013  // Returns information about temporary registers needed. Each temporary
   1014  // register is an LDefinition with a fixed or virtual register and
   1015  // either GENERAL, FLOAT32, or DOUBLE type.
   1016  size_t numTemps() const { return numTemps_; }
   1017  inline LDefinition* getTemp(size_t index);
   1018 
   1019  LSnapshot* snapshot() const { return snapshot_; }
   1020  LSafepoint* safepoint() const { return safepoint_; }
   1021  LMoveGroup* inputMoves() const { return inputMoves_; }
   1022  void setInputMoves(LMoveGroup* moves) { inputMoves_ = moves; }
   1023  LMoveGroup* fixReuseMoves() const { return fixReuseMoves_; }
   1024  void setFixReuseMoves(LMoveGroup* moves) { fixReuseMoves_ = moves; }
   1025  LMoveGroup* movesAfter() const { return movesAfter_; }
   1026  void setMovesAfter(LMoveGroup* moves) { movesAfter_ = moves; }
   1027  uint32_t numOperands() const { return nonPhiNumOperands_; }
   1028  void assignSnapshot(LSnapshot* snapshot);
   1029  void initSafepoint(TempAllocator& alloc);
   1030 
   1031  // InputIter iterates over all operands including snapshot inputs.
   1032  // NonSnapshotInputIter does not include snapshot inputs.
   1033  //
   1034  // There can be many snapshot inputs and these are always KEEPALIVE uses or
   1035  // constants. NonSnapshotInputIter can be used in places where we're not
   1036  // interested in those.
   1037  template <bool WithSnapshotUses>
   1038  class InputIterImpl;
   1039  using InputIter = InputIterImpl<true>;
   1040  using NonSnapshotInputIter = InputIterImpl<false>;
   1041 
   1042  // Iterators for an instruction's outputs and temps. These skip BogusTemp
   1043  // definitions.
   1044  template <bool Temps>
   1045  class DefIterImpl;
   1046  using TempIter = DefIterImpl<true>;
   1047  using OutputIter = DefIterImpl<false>;
   1048 };
   1049 
   1050 LInstruction* LNode::toInstruction() {
   1051  MOZ_ASSERT(isInstruction());
   1052  return static_cast<LInstruction*>(this);
   1053 }
   1054 
   1055 const LInstruction* LNode::toInstruction() const {
   1056  MOZ_ASSERT(isInstruction());
   1057  return static_cast<const LInstruction*>(this);
   1058 }
   1059 
   1060 class LElementVisitor {
   1061 #ifdef TRACK_SNAPSHOTS
   1062  LInstruction* ins_ = nullptr;
   1063 #endif
   1064 
   1065 protected:
   1066 #ifdef TRACK_SNAPSHOTS
   1067  LInstruction* instruction() { return ins_; }
   1068 
   1069  void setElement(LInstruction* ins) { ins_ = ins; }
   1070 #else
   1071  void setElement(LInstruction* ins) {}
   1072 #endif
   1073 };
   1074 
   1075 using LInstructionIterator = InlineList<LInstruction>::iterator;
   1076 using LInstructionReverseIterator = InlineList<LInstruction>::reverse_iterator;
   1077 
   1078 class MPhi;
   1079 
   1080 // Phi is a pseudo-instruction that emits no code, and is an annotation for the
   1081 // register allocator. Like its equivalent in MIR, phis are collected at the
   1082 // top of blocks and are meant to be executed in parallel, choosing the input
   1083 // corresponding to the predecessor taken in the control flow graph.
   1084 class LPhi final : public LNode {
   1085  LAllocation* const inputs_;
   1086  LDefinition def_;
   1087 
   1088 public:
   1089  LIR_HEADER(Phi)
   1090 
   1091  LPhi(MPhi* ins, LAllocation* inputs)
   1092      : LNode(classOpcode,
   1093              /* nonPhiNumOperands = */ 0,
   1094              /* numDefs = */ 1,
   1095              /* numTemps = */ 0),
   1096        inputs_(inputs) {
   1097    setMir(ins);
   1098  }
   1099 
   1100  LDefinition* getDef(size_t index) {
   1101    MOZ_ASSERT(index == 0);
   1102    return &def_;
   1103  }
   1104  void setDef(size_t index, const LDefinition& def) {
   1105    MOZ_ASSERT(index == 0);
   1106    def_ = def;
   1107  }
   1108  size_t numOperands() const { return mir_->toPhi()->numOperands(); }
   1109  LAllocation* getOperand(size_t index) {
   1110    MOZ_ASSERT(index < numOperands());
   1111    return &inputs_[index];
   1112  }
   1113  void setOperand(size_t index, const LAllocation& a) {
   1114    MOZ_ASSERT(index < numOperands());
   1115    inputs_[index] = a;
   1116  }
   1117 
   1118  // Phis don't have temps, so calling numTemps/getTemp is pointless.
   1119  size_t numTemps() const = delete;
   1120  LDefinition* getTemp(size_t index) = delete;
   1121 };
   1122 
   1123 class LMoveGroup;
   1124 class LBlock {
   1125  MBasicBlock* block_;
   1126  FixedList<LPhi> phis_;
   1127  InlineList<LInstruction> instructions_;
   1128  LMoveGroup* entryMoveGroup_;
   1129  LMoveGroup* exitMoveGroup_;
   1130  Label label_;
   1131  // If true, this block will be generated out of line.
   1132  bool isOutOfLine_;
   1133 
   1134 public:
   1135  explicit LBlock(MBasicBlock* block);
   1136  [[nodiscard]] bool init(TempAllocator& alloc);
   1137 
   1138  void add(LInstruction* ins) {
   1139    ins->setBlock(this);
   1140    instructions_.pushBack(ins);
   1141  }
   1142  size_t numPhis() const { return phis_.length(); }
   1143  LPhi* getPhi(size_t index) { return &phis_[index]; }
   1144  const LPhi* getPhi(size_t index) const { return &phis_[index]; }
   1145  MBasicBlock* mir() const { return block_; }
   1146  bool isOutOfLine() const { return isOutOfLine_; }
   1147  LInstructionIterator begin() { return instructions_.begin(); }
   1148  LInstructionIterator begin(LInstruction* at) {
   1149    return instructions_.begin(at);
   1150  }
   1151  LInstructionIterator end() { return instructions_.end(); }
   1152  LInstructionReverseIterator rbegin() { return instructions_.rbegin(); }
   1153  LInstructionReverseIterator rbegin(LInstruction* at) {
   1154    return instructions_.rbegin(at);
   1155  }
   1156  LInstructionReverseIterator rend() { return instructions_.rend(); }
   1157  InlineList<LInstruction>& instructions() { return instructions_; }
   1158  void insertAfter(LInstruction* at, LInstruction* ins) {
   1159    instructions_.insertAfter(at, ins);
   1160  }
   1161  void insertBefore(LInstruction* at, LInstruction* ins) {
   1162    instructions_.insertBefore(at, ins);
   1163  }
   1164  const LNode* firstElementWithId() const {
   1165    return !phis_.empty() ? static_cast<const LNode*>(getPhi(0))
   1166                          : firstInstructionWithId();
   1167  }
   1168  uint32_t firstId() const { return firstElementWithId()->id(); }
   1169  uint32_t lastId() const { return lastInstructionWithId()->id(); }
   1170  const LInstruction* firstInstructionWithId() const;
   1171  const LInstruction* lastInstructionWithId() const {
   1172    const LInstruction* last = *instructions_.rbegin();
   1173    MOZ_ASSERT(last->id());
   1174    // The last instruction is a control flow instruction which does not have
   1175    // any output.
   1176    MOZ_ASSERT(last->numDefs() == 0);
   1177    return last;
   1178  }
   1179 
   1180  // Return the label to branch to when branching to this block.
   1181  Label* label() {
   1182    MOZ_ASSERT(!isTrivial());
   1183    return &label_;
   1184  }
   1185 
   1186  LMoveGroup* getEntryMoveGroup(TempAllocator& alloc);
   1187  LMoveGroup* getExitMoveGroup(TempAllocator& alloc);
   1188 
   1189  // Test whether this basic block is empty except for a simple goto, and
   1190  // which is not forming a loop. No code will be emitted for such blocks.
   1191  bool isTrivial() { return begin()->isGoto() && !mir()->isLoopHeader(); }
   1192 
   1193  // Test whether this basic block is a sequence of MoveGroups followed by a
   1194  // simple goto, and is not a loop header.  If so return the target of the
   1195  // jump, otherwise return nullptr.
   1196  LBlock* isMoveGroupsThenGoto();
   1197 
   1198 #ifdef JS_JITSPEW
   1199  void dump(GenericPrinter& out);
   1200  void dump();
   1201 #endif
   1202 };
   1203 
   1204 namespace details {
   1205 template <size_t Defs, size_t Temps>
   1206 class LInstructionFixedDefsTempsHelper : public LInstruction {
   1207  mozilla::Array<LDefinition, Defs + Temps> defsAndTemps_;
   1208 
   1209 protected:
   1210  LInstructionFixedDefsTempsHelper(Opcode opcode, uint32_t numOperands)
   1211      : LInstruction(opcode, numOperands, Defs, Temps) {}
   1212 
   1213 public:
   1214  // Override the methods in LInstruction with more optimized versions
   1215  // for when we know the exact instruction type.
   1216  LDefinition* getDef(size_t index) {
   1217    MOZ_ASSERT(index < Defs);
   1218    return &defsAndTemps_[index];
   1219  }
   1220  LDefinition* getTemp(size_t index) {
   1221    MOZ_ASSERT(index < Temps);
   1222    return &defsAndTemps_[Defs + index];
   1223  }
   1224  LInt64Definition getInt64Temp(size_t index) {
   1225    MOZ_ASSERT(index + INT64_PIECES <= Temps);
   1226 #if JS_BITS_PER_WORD == 32
   1227    return LInt64Definition(defsAndTemps_[Defs + index + INT64HIGH_INDEX],
   1228                            defsAndTemps_[Defs + index + INT64LOW_INDEX]);
   1229 #else
   1230    return LInt64Definition(defsAndTemps_[Defs + index]);
   1231 #endif
   1232  }
   1233 
   1234  void setDef(size_t index, const LDefinition& def) {
   1235    MOZ_ASSERT(index < Defs);
   1236    defsAndTemps_[index] = def;
   1237  }
   1238  void setTemp(size_t index, const LDefinition& a) {
   1239    MOZ_ASSERT(index < Temps);
   1240    defsAndTemps_[Defs + index] = a;
   1241  }
   1242  void setInt64Temp(size_t index, const LInt64Definition& a) {
   1243 #if JS_BITS_PER_WORD == 32
   1244    setTemp(index, a.low());
   1245    setTemp(index + 1, a.high());
   1246 #else
   1247    setTemp(index, a.value());
   1248 #endif
   1249  }
   1250 
   1251  // Default accessor, assuming a single output.
   1252  const LDefinition* output() {
   1253    MOZ_ASSERT(numDefs() == 1);
   1254    return getDef(0);
   1255  }
   1256  static size_t offsetOfDef(size_t index) {
   1257    using T = LInstructionFixedDefsTempsHelper<0, 0>;
   1258    return offsetof(T, defsAndTemps_) + index * sizeof(LDefinition);
   1259  }
   1260  static size_t offsetOfTemp(uint32_t numDefs, uint32_t index) {
   1261    using T = LInstructionFixedDefsTempsHelper<0, 0>;
   1262    return offsetof(T, defsAndTemps_) + (numDefs + index) * sizeof(LDefinition);
   1263  }
   1264 };
   1265 }  // namespace details
   1266 
   1267 inline LDefinition* LInstruction::getDef(size_t index) {
   1268  MOZ_ASSERT(index < numDefs());
   1269  using T = details::LInstructionFixedDefsTempsHelper<0, 0>;
   1270  uint8_t* p = reinterpret_cast<uint8_t*>(this) + T::offsetOfDef(index);
   1271  return reinterpret_cast<LDefinition*>(p);
   1272 }
   1273 
   1274 inline LDefinition* LInstruction::getTemp(size_t index) {
   1275  MOZ_ASSERT(index < numTemps());
   1276  using T = details::LInstructionFixedDefsTempsHelper<0, 0>;
   1277  uint8_t* p =
   1278      reinterpret_cast<uint8_t*>(this) + T::offsetOfTemp(numDefs(), index);
   1279  return reinterpret_cast<LDefinition*>(p);
   1280 }
   1281 
   1282 template <size_t Defs, size_t Operands, size_t Temps>
   1283 class LInstructionHelper
   1284    : public details::LInstructionFixedDefsTempsHelper<Defs, Temps> {
   1285  MOZ_NO_UNIQUE_ADDRESS mozilla::Array<LAllocation, Operands> operands_;
   1286 
   1287 protected:
   1288  explicit LInstructionHelper(LNode::Opcode opcode)
   1289      : details::LInstructionFixedDefsTempsHelper<Defs, Temps>(opcode,
   1290                                                               Operands) {
   1291    static_assert(
   1292        Operands == 0 || sizeof(operands_) == Operands * sizeof(LAllocation),
   1293        "mozilla::Array should not contain other fields");
   1294    if (Operands > 0) {
   1295      using T = LInstructionHelper<Defs, Operands, Temps>;
   1296      this->initOperandsOffset(offsetof(T, operands_));
   1297    }
   1298  }
   1299 
   1300 public:
   1301  // Override the methods in LInstruction with more optimized versions
   1302  // for when we know the exact instruction type.
   1303  LAllocation* getOperand(size_t index) { return &operands_[index]; }
   1304  const LAllocation* getOperand(size_t index) const {
   1305    return &operands_[index];
   1306  }
   1307  void setOperand(size_t index, const LAllocation& a) { operands_[index] = a; }
   1308  LBoxAllocation getBoxOperand(size_t index) const {
   1309 #ifdef JS_NUNBOX32
   1310    return LBoxAllocation(operands_[index + TYPE_INDEX],
   1311                          operands_[index + PAYLOAD_INDEX]);
   1312 #else
   1313    return LBoxAllocation(operands_[index]);
   1314 #endif
   1315  }
   1316  void setBoxOperand(size_t index, const LBoxAllocation& alloc) {
   1317 #ifdef JS_NUNBOX32
   1318    operands_[index + TYPE_INDEX] = alloc.type();
   1319    operands_[index + PAYLOAD_INDEX] = alloc.payload();
   1320 #else
   1321    operands_[index] = alloc.value();
   1322 #endif
   1323  }
   1324  void setInt64Operand(size_t index, const LInt64Allocation& alloc) {
   1325 #if JS_BITS_PER_WORD == 32
   1326    operands_[index + INT64LOW_INDEX] = alloc.low();
   1327    operands_[index + INT64HIGH_INDEX] = alloc.high();
   1328 #else
   1329    operands_[index] = alloc.value();
   1330 #endif
   1331  }
   1332  LInt64Allocation getInt64Operand(size_t offset) const {
   1333 #if JS_BITS_PER_WORD == 32
   1334    return LInt64Allocation(operands_[offset + INT64HIGH_INDEX],
   1335                            operands_[offset + INT64LOW_INDEX]);
   1336 #else
   1337    return LInt64Allocation(operands_[offset]);
   1338 #endif
   1339  }
   1340 };
   1341 
   1342 template <size_t Defs, size_t Temps>
   1343 class LVariadicInstruction
   1344    : public details::LInstructionFixedDefsTempsHelper<Defs, Temps> {
   1345 protected:
   1346  LVariadicInstruction(LNode::Opcode opcode, size_t numOperands)
   1347      : details::LInstructionFixedDefsTempsHelper<Defs, Temps>(opcode,
   1348                                                               numOperands) {}
   1349 
   1350 public:
   1351  void setBoxOperand(size_t index, const LBoxAllocation& a) {
   1352 #ifdef JS_NUNBOX32
   1353    this->setOperand(index + TYPE_INDEX, a.type());
   1354    this->setOperand(index + PAYLOAD_INDEX, a.payload());
   1355 #else
   1356    this->setOperand(index, a.value());
   1357 #endif
   1358  }
   1359 };
   1360 
   1361 template <size_t Defs, size_t Operands, size_t Temps>
   1362 class LCallInstructionHelper
   1363    : public LInstructionHelper<Defs, Operands, Temps> {
   1364 protected:
   1365  explicit LCallInstructionHelper(LNode::Opcode opcode)
   1366      : LInstructionHelper<Defs, Operands, Temps>(opcode) {
   1367    this->setIsCall();
   1368  }
   1369 };
   1370 
   1371 // Base class for control instructions (goto, branch, etc.)
   1372 template <size_t Succs, size_t Operands, size_t Temps>
   1373 class LControlInstructionHelper
   1374    : public LInstructionHelper<0, Operands, Temps> {
   1375  MOZ_NO_UNIQUE_ADDRESS mozilla::Array<MBasicBlock*, Succs> successors_;
   1376 
   1377 protected:
   1378  explicit LControlInstructionHelper(LNode::Opcode opcode)
   1379      : LInstructionHelper<0, Operands, Temps>(opcode) {}
   1380 
   1381 public:
   1382  size_t numSuccessors() const { return Succs; }
   1383  MBasicBlock* getSuccessor(size_t i) const { return successors_[i]; }
   1384 
   1385  void setSuccessor(size_t i, MBasicBlock* successor) {
   1386    successors_[i] = successor;
   1387  }
   1388 };
   1389 
   1390 class LRecoverInfo : public TempObject {
   1391 public:
   1392  using Instructions = Vector<MNode*, 2, JitAllocPolicy>;
   1393 
   1394 private:
   1395  // List of instructions needed to recover the stack frames.
   1396  // Outer frames are stored before inner frames.
   1397  Instructions instructions_;
   1398 
   1399  // Cached offset where this resume point is encoded.
   1400  RecoverOffset recoverOffset_;
   1401 
   1402  // Whether this LRecoverInfo has any side-effect associated with it.
   1403  bool hasSideEffects_ = false;
   1404 
   1405  explicit LRecoverInfo(TempAllocator& alloc);
   1406  [[nodiscard]] bool init(MResumePoint* mir);
   1407 
   1408  // Fill the instruction vector such as all instructions needed for the
   1409  // recovery are pushed before the current instruction.
   1410  template <typename Node>
   1411  [[nodiscard]] bool appendOperands(Node* ins);
   1412  [[nodiscard]] bool appendDefinition(MDefinition* def);
   1413  [[nodiscard]] bool appendResumePoint(MResumePoint* rp);
   1414 
   1415 public:
   1416  static LRecoverInfo* New(MIRGenerator* gen, MResumePoint* mir);
   1417 
   1418  // Resume point of the inner most function.
   1419  MResumePoint* mir() const { return instructions_.back()->toResumePoint(); }
   1420  RecoverOffset recoverOffset() const { return recoverOffset_; }
   1421  void setRecoverOffset(RecoverOffset offset) {
   1422    MOZ_ASSERT(recoverOffset_ == INVALID_RECOVER_OFFSET);
   1423    recoverOffset_ = offset;
   1424  }
   1425 
   1426  MNode** begin() { return instructions_.begin(); }
   1427  MNode** end() { return instructions_.end(); }
   1428  size_t numInstructions() const { return instructions_.length(); }
   1429  bool hasSideEffects() { return hasSideEffects_; }
   1430 
   1431  class OperandIter {
   1432   private:
   1433    MNode** it_;
   1434    MNode** end_;
   1435    size_t op_;
   1436    size_t opEnd_;
   1437    MResumePoint* rp_;
   1438    MNode* node_;
   1439 
   1440   public:
   1441    explicit OperandIter(LRecoverInfo* recoverInfo)
   1442        : it_(recoverInfo->begin()),
   1443          end_(recoverInfo->end()),
   1444          op_(0),
   1445          opEnd_(0),
   1446          rp_(nullptr),
   1447          node_(nullptr) {
   1448      settle();
   1449    }
   1450 
   1451    void settle() {
   1452      opEnd_ = (*it_)->numOperands();
   1453      while (opEnd_ == 0) {
   1454        ++it_;
   1455        op_ = 0;
   1456        opEnd_ = (*it_)->numOperands();
   1457      }
   1458      node_ = *it_;
   1459      if (node_->isResumePoint()) {
   1460        rp_ = node_->toResumePoint();
   1461      }
   1462    }
   1463 
   1464    MDefinition* operator*() {
   1465      if (rp_) {  // de-virtualize MResumePoint::getOperand calls.
   1466        return rp_->getOperand(op_);
   1467      }
   1468      return node_->getOperand(op_);
   1469    }
   1470    MDefinition* operator->() {
   1471      if (rp_) {  // de-virtualize MResumePoint::getOperand calls.
   1472        return rp_->getOperand(op_);
   1473      }
   1474      return node_->getOperand(op_);
   1475    }
   1476 
   1477    OperandIter& operator++() {
   1478      ++op_;
   1479      if (op_ != opEnd_) {
   1480        return *this;
   1481      }
   1482      op_ = 0;
   1483      ++it_;
   1484      node_ = rp_ = nullptr;
   1485      if (!*this) {
   1486        settle();
   1487      }
   1488      return *this;
   1489    }
   1490 
   1491    explicit operator bool() const { return it_ == end_; }
   1492 
   1493 #ifdef DEBUG
   1494    bool canOptimizeOutIfUnused();
   1495 #endif
   1496  };
   1497 };
   1498 
   1499 // An LSnapshot is the reflection of an MResumePoint in LIR. Unlike
   1500 // MResumePoints, they cannot be shared, as they are filled in by the register
   1501 // allocator in order to capture the precise low-level stack state in between an
   1502 // instruction's input and output. During code generation, LSnapshots are
   1503 // compressed and saved in the compiled script.
   1504 class LSnapshot : public TempObject {
   1505 private:
   1506  LAllocation* slots_;
   1507  LRecoverInfo* recoverInfo_;
   1508  SnapshotOffset snapshotOffset_;
   1509  uint32_t numSlots_;
   1510  BailoutKind bailoutKind_;
   1511 
   1512  LSnapshot(LRecoverInfo* recover, BailoutKind kind);
   1513  [[nodiscard]] bool init(MIRGenerator* gen);
   1514 
   1515 public:
   1516  static LSnapshot* New(MIRGenerator* gen, LRecoverInfo* recover,
   1517                        BailoutKind kind);
   1518 
   1519  size_t numEntries() const { return numSlots_; }
   1520  size_t numSlots() const { return numSlots_ / BOX_PIECES; }
   1521  LAllocation* payloadOfSlot(size_t i) {
   1522    MOZ_ASSERT(i < numSlots());
   1523    size_t entryIndex = (i * BOX_PIECES) + (BOX_PIECES - 1);
   1524    return getEntry(entryIndex);
   1525  }
   1526 #ifdef JS_NUNBOX32
   1527  LAllocation* typeOfSlot(size_t i) {
   1528    MOZ_ASSERT(i < numSlots());
   1529    size_t entryIndex = (i * BOX_PIECES) + (BOX_PIECES - 2);
   1530    return getEntry(entryIndex);
   1531  }
   1532 #endif
   1533  LAllocation* getEntry(size_t i) {
   1534    MOZ_ASSERT(i < numSlots_);
   1535    return &slots_[i];
   1536  }
   1537  void setEntry(size_t i, const LAllocation& alloc) {
   1538    MOZ_ASSERT(i < numSlots_);
   1539    slots_[i] = alloc;
   1540  }
   1541  LRecoverInfo* recoverInfo() const { return recoverInfo_; }
   1542  MResumePoint* mir() const { return recoverInfo()->mir(); }
   1543  SnapshotOffset snapshotOffset() const { return snapshotOffset_; }
   1544  void setSnapshotOffset(SnapshotOffset offset) {
   1545    MOZ_ASSERT(snapshotOffset_ == INVALID_SNAPSHOT_OFFSET);
   1546    snapshotOffset_ = offset;
   1547  }
   1548  BailoutKind bailoutKind() const { return bailoutKind_; }
   1549  void rewriteRecoveredInput(LUse input);
   1550 };
   1551 
   1552 struct SafepointSlotEntry {
   1553  // Flag indicating whether this is a slot in the stack or argument space.
   1554  uint32_t stack : 1;
   1555 
   1556  // Byte offset of the slot, as in LStackSlot or LArgument.
   1557  uint32_t slot : 31;
   1558 
   1559  SafepointSlotEntry() : stack(0), slot(0) {}
   1560  SafepointSlotEntry(bool stack, uint32_t slot) : stack(stack), slot(slot) {}
   1561  explicit SafepointSlotEntry(const LAllocation* a)
   1562      : stack(a->isStackSlot()), slot(a->memorySlot()) {}
   1563 };
   1564 
   1565 // Used for the type or payload half of a JS Value on 32-bit platforms.
   1566 class SafepointNunboxEntry {
   1567  static constexpr size_t VregBits = 31;
   1568  uint32_t isType_ : 1;
   1569  uint32_t vreg_ : VregBits;
   1570  LAllocation alloc_;
   1571 
   1572  static_assert(MAX_VIRTUAL_REGISTERS <= (uint32_t(1) << VregBits) - 1);
   1573 
   1574 public:
   1575  SafepointNunboxEntry(bool isType, uint32_t vreg, LAllocation alloc)
   1576      : isType_(isType), vreg_(vreg), alloc_(alloc) {
   1577    MOZ_ASSERT(alloc.isGeneralReg() || alloc.isMemory());
   1578  }
   1579  bool isType() const { return isType_; }
   1580  uint32_t vreg() const { return vreg_; }
   1581  LAllocation alloc() const { return alloc_; }
   1582 };
   1583 
   1584 enum class WasmSafepointKind : uint8_t {
   1585  // For wasm call instructions (isCall() == true) where registers are spilled
   1586  // by register allocation.
   1587  LirCall,
   1588  // For wasm instructions (isCall() == false) which will spill/restore live
   1589  // registers manually in codegen.
   1590  CodegenCall,
   1591  // For resumable wasm traps where registers will be spilled by the trap
   1592  // handler.
   1593  Trap,
   1594  // For stack switch call.
   1595  StackSwitch,
   1596 };
   1597 
   1598 class LSafepoint : public TempObject {
   1599  using SlotEntry = SafepointSlotEntry;
   1600  using NunboxEntry = SafepointNunboxEntry;
   1601 
   1602 public:
   1603  using SlotList = Vector<SlotEntry, 0, JitAllocPolicy>;
   1604  using NunboxList = Vector<NunboxEntry, 0, JitAllocPolicy>;
   1605 
   1606 private:
   1607  // The information in a safepoint describes the registers and gc related
   1608  // values that are live at the start of the associated instruction.
   1609 
   1610  // The set of registers which are live at an OOL call made within the
   1611  // instruction. This includes any registers for inputs which are not
   1612  // use-at-start, any registers for temps, and any registers live after the
   1613  // call except outputs of the instruction.
   1614  //
   1615  // For call instructions, the live regs are empty. Call instructions may
   1616  // have register inputs or temporaries, which will *not* be in the live
   1617  // registers: if passed to the call, the values passed will be marked via
   1618  // TraceJitExitFrame, and no registers can be live after the instruction
   1619  // except its outputs.
   1620  LiveRegisterSet liveRegs_;
   1621 
   1622  // The subset of liveRegs which contains gcthing pointers.
   1623  LiveGeneralRegisterSet gcRegs_;
   1624 
   1625 #ifdef CHECK_OSIPOINT_REGISTERS
   1626  // Clobbered regs of the current instruction. This set is never written to
   1627  // the safepoint; it's only used by assertions during compilation.
   1628  LiveRegisterSet clobberedRegs_;
   1629 #endif
   1630 
   1631  // Offset to a position in the safepoint stream, or
   1632  // INVALID_SAFEPOINT_OFFSET.
   1633  uint32_t safepointOffset_;
   1634 
   1635  // Assembler buffer displacement to OSI point's call location.
   1636  uint32_t osiCallPointOffset_;
   1637 
   1638  // List of slots which have gcthing pointers.
   1639  SlotList gcSlots_;
   1640 
   1641 #ifdef JS_NUNBOX32
   1642  // List of registers (in liveRegs) and slots which contain pieces of Values.
   1643  NunboxList nunboxParts_;
   1644 #elif JS_PUNBOX64
   1645  // List of slots which have Values.
   1646  SlotList valueSlots_;
   1647 
   1648  // The subset of liveRegs which have Values.
   1649  LiveGeneralRegisterSet valueRegs_;
   1650 #endif
   1651 
   1652  // The subset of liveRegs which contains pointers to slots/elements.
   1653  LiveGeneralRegisterSet slotsOrElementsRegs_;
   1654  // List of slots which have slots/elements pointers.
   1655  SlotList slotsOrElementsSlots_;
   1656 
   1657  // The subset of liveRegs which contains wasm::AnyRef's.
   1658  LiveGeneralRegisterSet wasmAnyRefRegs_;
   1659  // List of slots which have wasm::AnyRef's.
   1660  SlotList wasmAnyRefSlots_;
   1661 
   1662  // The subset of liveRegs which contains wasm struct data (OOL) pointers.
   1663  LiveGeneralRegisterSet wasmStructDataRegs_;
   1664  // List of slots which have have wasm struct data (OOL) pointers.
   1665  SlotList wasmStructDataSlots_;
   1666 
   1667  // The subset of liveRegs which contains wasm array data (IL or OOL) pointers.
   1668  LiveGeneralRegisterSet wasmArrayDataRegs_;
   1669  // List of slots which have have wasm array data (IL or OOL) pointers.
   1670  SlotList wasmArrayDataSlots_;
   1671 
   1672  // Wasm only: with what kind of instruction is this LSafepoint associated?
   1673  WasmSafepointKind wasmSafepointKind_;
   1674 
   1675  // Wasm only: what is the value of masm.framePushed() that corresponds to
   1676  // the lowest-addressed word covered by the StackMap that we will generate
   1677  // from this LSafepoint?  This depends on the instruction:
   1678  //
   1679  // WasmSafepointKind::LirCall:
   1680  //    masm.framePushed() - StackArgAreaSizeUnaligned(arg types for the call),
   1681  //    because the map does not include the outgoing args themselves, but
   1682  //    it does cover any and all alignment space above them.
   1683  //
   1684  // WasmSafepointKind::CodegenCall and WasmSafepointKind::Trap:
   1685  //    masm.framePushed() unmodified. Note that when constructing the
   1686  //    StackMap we will add entries below this point to take account of
   1687  //    registers dumped on the stack.
   1688  uint32_t framePushedAtStackMapBase_;
   1689 
   1690 public:
   1691  void assertInvariants() {
   1692    // Every register in valueRegs, gcRegs, wasmAnyRefRegs, wasmStructDataRegs
   1693    // and wasmArrayDataRegs should also be in liveRegs.
   1694 #ifndef JS_NUNBOX32
   1695    MOZ_ASSERT((valueRegs().bits() & ~liveRegs().gprs().bits()) == 0);
   1696 #endif
   1697    MOZ_ASSERT((gcRegs().bits() & ~liveRegs().gprs().bits()) == 0);
   1698    MOZ_ASSERT((wasmAnyRefRegs().bits() & ~liveRegs().gprs().bits()) == 0);
   1699    MOZ_ASSERT((wasmStructDataRegs().bits() & ~liveRegs().gprs().bits()) == 0);
   1700    MOZ_ASSERT((wasmArrayDataRegs().bits() & ~liveRegs().gprs().bits()) == 0);
   1701  }
   1702 
   1703  explicit LSafepoint(TempAllocator& alloc)
   1704      : safepointOffset_(INVALID_SAFEPOINT_OFFSET),
   1705        osiCallPointOffset_(0),
   1706        gcSlots_(alloc),
   1707 #ifdef JS_NUNBOX32
   1708        nunboxParts_(alloc),
   1709 #else
   1710        valueSlots_(alloc),
   1711 #endif
   1712        slotsOrElementsSlots_(alloc),
   1713        wasmAnyRefSlots_(alloc),
   1714        wasmStructDataSlots_(alloc),
   1715        wasmArrayDataSlots_(alloc),
   1716        wasmSafepointKind_(WasmSafepointKind::LirCall),
   1717        framePushedAtStackMapBase_(0) {
   1718    assertInvariants();
   1719  }
   1720  void addLiveRegister(AnyRegister reg) {
   1721    liveRegs_.addUnchecked(reg);
   1722    assertInvariants();
   1723  }
   1724  const LiveRegisterSet& liveRegs() const { return liveRegs_; }
   1725 #ifdef CHECK_OSIPOINT_REGISTERS
   1726  void addClobberedRegister(AnyRegister reg) {
   1727    clobberedRegs_.addUnchecked(reg);
   1728    assertInvariants();
   1729  }
   1730  const LiveRegisterSet& clobberedRegs() const { return clobberedRegs_; }
   1731 #endif
   1732  LiveGeneralRegisterSet gcRegs() const { return gcRegs_; }
   1733  [[nodiscard]] bool addGcSlot(bool stack, uint32_t slot) {
   1734    bool result = gcSlots_.append(SlotEntry(stack, slot));
   1735    if (result) {
   1736      assertInvariants();
   1737    }
   1738    return result;
   1739  }
   1740  SlotList& gcSlots() { return gcSlots_; }
   1741 
   1742  SlotList& slotsOrElementsSlots() { return slotsOrElementsSlots_; }
   1743  LiveGeneralRegisterSet slotsOrElementsRegs() const {
   1744    return slotsOrElementsRegs_;
   1745  }
   1746  [[nodiscard]] bool addSlotsOrElementsSlot(bool stack, uint32_t slot) {
   1747    bool result = slotsOrElementsSlots_.append(SlotEntry(stack, slot));
   1748    if (result) {
   1749      assertInvariants();
   1750    }
   1751    return result;
   1752  }
   1753  [[nodiscard]] bool addSlotsOrElementsPointer(LAllocation alloc) {
   1754    if (alloc.isMemory()) {
   1755      return addSlotsOrElementsSlot(alloc.isStackSlot(), alloc.memorySlot());
   1756    }
   1757    MOZ_ASSERT(alloc.isGeneralReg());
   1758    slotsOrElementsRegs_.addUnchecked(alloc.toGeneralReg()->reg());
   1759    assertInvariants();
   1760    return true;
   1761  }
   1762 
   1763  SlotList& wasmStructDataSlots() { return wasmStructDataSlots_; }
   1764  LiveGeneralRegisterSet wasmStructDataRegs() const {
   1765    return wasmStructDataRegs_;
   1766  }
   1767  [[nodiscard]] bool addWasmStructDataSlot(bool stack, uint32_t slot) {
   1768    bool result = wasmStructDataSlots_.append(SlotEntry(stack, slot));
   1769    if (result) {
   1770      assertInvariants();
   1771    }
   1772    return result;
   1773  }
   1774  [[nodiscard]] bool addWasmStructDataPointer(LAllocation alloc) {
   1775    if (alloc.isMemory()) {
   1776      return addWasmStructDataSlot(alloc.isStackSlot(), alloc.memorySlot());
   1777    }
   1778    MOZ_ASSERT(alloc.isGeneralReg());
   1779    wasmStructDataRegs_.addUnchecked(alloc.toGeneralReg()->reg());
   1780    assertInvariants();
   1781    return true;
   1782  }
   1783 
   1784  SlotList& wasmArrayDataSlots() { return wasmArrayDataSlots_; }
   1785  LiveGeneralRegisterSet wasmArrayDataRegs() const {
   1786    return wasmArrayDataRegs_;
   1787  }
   1788  [[nodiscard]] bool addWasmArrayDataSlot(bool stack, uint32_t slot) {
   1789    bool result = wasmArrayDataSlots_.append(SlotEntry(stack, slot));
   1790    if (result) {
   1791      assertInvariants();
   1792    }
   1793    return result;
   1794  }
   1795  [[nodiscard]] bool addWasmArrayDataPointer(LAllocation alloc) {
   1796    if (alloc.isMemory()) {
   1797      return addWasmArrayDataSlot(alloc.isStackSlot(), alloc.memorySlot());
   1798    }
   1799    MOZ_ASSERT(alloc.isGeneralReg());
   1800    wasmArrayDataRegs_.addUnchecked(alloc.toGeneralReg()->reg());
   1801    assertInvariants();
   1802    return true;
   1803  }
   1804 
   1805  bool hasSlotsOrElementsPointer(LAllocation alloc) const {
   1806    if (alloc.isGeneralReg()) {
   1807      return slotsOrElementsRegs().has(alloc.toGeneralReg()->reg());
   1808    }
   1809    for (size_t i = 0; i < slotsOrElementsSlots_.length(); i++) {
   1810      const SlotEntry& entry = slotsOrElementsSlots_[i];
   1811      if (entry.stack == alloc.isStackSlot() &&
   1812          entry.slot == alloc.memorySlot()) {
   1813        return true;
   1814      }
   1815    }
   1816    return false;
   1817  }
   1818 
   1819  [[nodiscard]] bool addGcPointer(LAllocation alloc) {
   1820    if (alloc.isMemory()) {
   1821      return addGcSlot(alloc.isStackSlot(), alloc.memorySlot());
   1822    }
   1823    MOZ_ASSERT(alloc.isGeneralReg());
   1824    gcRegs_.addUnchecked(alloc.toGeneralReg()->reg());
   1825    assertInvariants();
   1826    return true;
   1827  }
   1828 
   1829  bool hasGcPointer(LAllocation alloc) const {
   1830    if (alloc.isGeneralReg()) {
   1831      return gcRegs().has(alloc.toGeneralReg()->reg());
   1832    }
   1833    MOZ_ASSERT(alloc.isMemory());
   1834    for (size_t i = 0; i < gcSlots_.length(); i++) {
   1835      if (gcSlots_[i].stack == alloc.isStackSlot() &&
   1836          gcSlots_[i].slot == alloc.memorySlot()) {
   1837        return true;
   1838      }
   1839    }
   1840    return false;
   1841  }
   1842 
   1843  LiveGeneralRegisterSet wasmAnyRefRegs() const { return wasmAnyRefRegs_; }
   1844 
   1845  [[nodiscard]] bool addWasmAnyRefSlot(bool stack, uint32_t slot) {
   1846    bool result = wasmAnyRefSlots_.append(SlotEntry(stack, slot));
   1847    if (result) {
   1848      assertInvariants();
   1849    }
   1850    return result;
   1851  }
   1852  SlotList& wasmAnyRefSlots() { return wasmAnyRefSlots_; }
   1853 
   1854  [[nodiscard]] bool addWasmAnyRef(LAllocation alloc) {
   1855    if (alloc.isMemory()) {
   1856      return addWasmAnyRefSlot(alloc.isStackSlot(), alloc.memorySlot());
   1857    }
   1858    MOZ_ASSERT(alloc.isGeneralReg());
   1859    wasmAnyRefRegs_.addUnchecked(alloc.toGeneralReg()->reg());
   1860    assertInvariants();
   1861    return true;
   1862  }
   1863  bool hasWasmAnyRef(LAllocation alloc) const {
   1864    if (alloc.isGeneralReg()) {
   1865      return wasmAnyRefRegs().has(alloc.toGeneralReg()->reg());
   1866    }
   1867    MOZ_ASSERT(alloc.isMemory());
   1868    for (size_t i = 0; i < wasmAnyRefSlots_.length(); i++) {
   1869      if (wasmAnyRefSlots_[i].stack == alloc.isStackSlot() &&
   1870          wasmAnyRefSlots_[i].slot == alloc.memorySlot()) {
   1871        return true;
   1872      }
   1873    }
   1874    return false;
   1875  }
   1876 
   1877  // Return true if all GC-managed pointers from `alloc` are recorded in this
   1878  // safepoint.
   1879  bool hasAllWasmAnyRefsFromStackArea(LAllocation alloc) const {
   1880    for (LStackArea::ResultIterator iter = alloc.toStackArea()->results(); iter;
   1881         iter.next()) {
   1882      if (iter.isWasmAnyRef() && !hasWasmAnyRef(iter.alloc())) {
   1883        return false;
   1884      }
   1885    }
   1886    return true;
   1887  }
   1888 
   1889 #ifdef JS_NUNBOX32
   1890  [[nodiscard]] bool addNunboxPart(bool isType, uint32_t vreg,
   1891                                   LAllocation alloc) {
   1892    bool result = nunboxParts_.emplaceBack(isType, vreg, alloc);
   1893    if (result) {
   1894      assertInvariants();
   1895    }
   1896    return result;
   1897  }
   1898 
   1899 #  ifdef DEBUG
   1900  bool hasNunboxPart(bool isType, LAllocation alloc) const {
   1901    for (auto entry : nunboxParts_) {
   1902      if (entry.alloc() == alloc && entry.isType() == isType) {
   1903        return true;
   1904      }
   1905    }
   1906    return false;
   1907  }
   1908 #  endif
   1909 
   1910  NunboxList& nunboxParts() { return nunboxParts_; }
   1911 
   1912 #elif JS_PUNBOX64
   1913  [[nodiscard]] bool addValueSlot(bool stack, uint32_t slot) {
   1914    bool result = valueSlots_.append(SlotEntry(stack, slot));
   1915    if (result) {
   1916      assertInvariants();
   1917    }
   1918    return result;
   1919  }
   1920  SlotList& valueSlots() { return valueSlots_; }
   1921 
   1922  bool hasValueSlot(bool stack, uint32_t slot) const {
   1923    for (size_t i = 0; i < valueSlots_.length(); i++) {
   1924      if (valueSlots_[i].stack == stack && valueSlots_[i].slot == slot) {
   1925        return true;
   1926      }
   1927    }
   1928    return false;
   1929  }
   1930 
   1931  LiveGeneralRegisterSet valueRegs() const { return valueRegs_; }
   1932 
   1933  [[nodiscard]] bool addBoxedValue(LAllocation alloc) {
   1934    if (alloc.isMemory()) {
   1935      return addValueSlot(alloc.isStackSlot(), alloc.memorySlot());
   1936    }
   1937    MOZ_ASSERT(alloc.isGeneralReg());
   1938    valueRegs_.addUnchecked(alloc.toGeneralReg()->reg());
   1939    assertInvariants();
   1940    return true;
   1941  }
   1942 
   1943  bool hasBoxedValue(LAllocation alloc) const {
   1944    if (alloc.isGeneralReg()) {
   1945      return valueRegs().has(alloc.toGeneralReg()->reg());
   1946    }
   1947    return hasValueSlot(alloc.isStackSlot(), alloc.memorySlot());
   1948  }
   1949 
   1950 #endif  // JS_PUNBOX64
   1951 
   1952  [[nodiscard]] bool addGCAllocation(uint32_t vregId, LDefinition* def,
   1953                                     LAllocation a);
   1954 
   1955  bool encoded() const { return safepointOffset_ != INVALID_SAFEPOINT_OFFSET; }
   1956  uint32_t offset() const {
   1957    MOZ_ASSERT(encoded());
   1958    return safepointOffset_;
   1959  }
   1960  void setOffset(uint32_t offset) { safepointOffset_ = offset; }
   1961  uint32_t osiReturnPointOffset() const {
   1962    // In general, pointer arithmetic on code is bad, but in this case,
   1963    // getting the return address from a call instruction, stepping over pools
   1964    // would be wrong.
   1965    return osiCallPointOffset_ + Assembler::PatchWrite_NearCallSize();
   1966  }
   1967  uint32_t osiCallPointOffset() const { return osiCallPointOffset_; }
   1968  void setOsiCallPointOffset(uint32_t osiCallPointOffset) {
   1969    MOZ_ASSERT(!osiCallPointOffset_);
   1970    osiCallPointOffset_ = osiCallPointOffset;
   1971  }
   1972 
   1973  WasmSafepointKind wasmSafepointKind() const { return wasmSafepointKind_; }
   1974  void setWasmSafepointKind(WasmSafepointKind kind) {
   1975    wasmSafepointKind_ = kind;
   1976  }
   1977 
   1978  // See comment on framePushedAtStackMapBase_.
   1979  uint32_t framePushedAtStackMapBase() const {
   1980    return framePushedAtStackMapBase_;
   1981  }
   1982  void setFramePushedAtStackMapBase(uint32_t n) {
   1983    MOZ_ASSERT(framePushedAtStackMapBase_ == 0);
   1984    framePushedAtStackMapBase_ = n;
   1985  }
   1986 };
   1987 
   1988 struct WasmRefIsSubtypeDefs {
   1989  LAllocation superSTV;
   1990  LDefinition scratch1;
   1991  LDefinition scratch2;
   1992 };
   1993 
   1994 template <bool WithSnapshotUses>
   1995 class LInstruction::InputIterImpl {
   1996 private:
   1997  LInstruction& ins_;
   1998  size_t idx_ = 0;
   1999  bool snapshot_ = false;
   2000 
   2001  void handleOperandsEnd() {
   2002    // Iterate on the snapshot when iteration over all operands is done.
   2003    if constexpr (WithSnapshotUses) {
   2004      if (!snapshot_ && idx_ == ins_.numOperands() && ins_.snapshot()) {
   2005        idx_ = 0;
   2006        snapshot_ = true;
   2007      }
   2008    }
   2009  }
   2010 
   2011 public:
   2012  explicit InputIterImpl(LInstruction& ins) : ins_(ins) { handleOperandsEnd(); }
   2013 
   2014  bool more() const {
   2015    if (WithSnapshotUses && snapshot_) {
   2016      return idx_ < ins_.snapshot()->numEntries();
   2017    }
   2018    if (idx_ < ins_.numOperands()) {
   2019      return true;
   2020    }
   2021    if (WithSnapshotUses && ins_.snapshot() && ins_.snapshot()->numEntries()) {
   2022      return true;
   2023    }
   2024    return false;
   2025  }
   2026 
   2027  bool isSnapshotInput() const { return snapshot_; }
   2028 
   2029  void next() {
   2030    MOZ_ASSERT(more());
   2031    idx_++;
   2032    handleOperandsEnd();
   2033  }
   2034 
   2035  void replace(const LAllocation& alloc) {
   2036    if (WithSnapshotUses && snapshot_) {
   2037      ins_.snapshot()->setEntry(idx_, alloc);
   2038    } else {
   2039      ins_.setOperand(idx_, alloc);
   2040    }
   2041  }
   2042 
   2043  LAllocation* operator*() const {
   2044    if (WithSnapshotUses && snapshot_) {
   2045      return ins_.snapshot()->getEntry(idx_);
   2046    }
   2047    return ins_.getOperand(idx_);
   2048  }
   2049 
   2050  LAllocation* operator->() const { return **this; }
   2051 };
   2052 
   2053 // Iterator for instruction outputs or temps. Skips BogusTemp definitions.
   2054 template <bool Temps>
   2055 class LInstruction::DefIterImpl {
   2056 private:
   2057  LInstruction* ins_;
   2058  size_t idx_ = 0;
   2059  const size_t len_;
   2060 
   2061 public:
   2062  explicit DefIterImpl(LInstruction* ins)
   2063      : ins_(ins), len_(Temps ? ins->numTemps() : ins->numDefs()) {
   2064    settle();
   2065  }
   2066  bool done() const {
   2067    MOZ_ASSERT(idx_ <= len_);
   2068    return idx_ == len_;
   2069  }
   2070  void operator++(int) {
   2071    MOZ_ASSERT(!done());
   2072    idx_++;
   2073    settle();
   2074  }
   2075  void settle() {
   2076    while (!done() && get()->isBogusTemp()) {
   2077      idx_++;
   2078    }
   2079  }
   2080  size_t index() const {
   2081    MOZ_ASSERT(!done());
   2082    return idx_;
   2083  }
   2084  LDefinition* get() const {
   2085    MOZ_ASSERT(!done());
   2086    if constexpr (Temps) {
   2087      return ins_->getTemp(idx_);
   2088    }
   2089    return ins_->getDef(idx_);
   2090  }
   2091  LDefinition* operator*() const { return get(); }
   2092  LDefinition* operator->() const { return **this; }
   2093 };
   2094 
   2095 bool LDefinition::isSafepointGCType(LNode* ins) const {
   2096  switch (type()) {
   2097    case LDefinition::OBJECT:
   2098    case LDefinition::SLOTS:
   2099    case LDefinition::WASM_ANYREF:
   2100    case LDefinition::WASM_STRUCT_DATA:
   2101    case LDefinition::WASM_ARRAY_DATA:
   2102 #ifdef JS_NUNBOX32
   2103    case LDefinition::TYPE:
   2104    case LDefinition::PAYLOAD:
   2105 #else
   2106    case LDefinition::BOX:
   2107 #endif
   2108      return true;
   2109    case LDefinition::STACKRESULTS: {
   2110      LStackArea alloc(ins->toInstruction());
   2111      for (auto iter = alloc.results(); iter; iter.next()) {
   2112        if (iter.isWasmAnyRef()) {
   2113          return true;
   2114        }
   2115      }
   2116      return false;
   2117    }
   2118    case LDefinition::GENERAL:
   2119    case LDefinition::INT32:
   2120    case LDefinition::FLOAT32:
   2121    case LDefinition::DOUBLE:
   2122    case LDefinition::SIMD128:
   2123      return false;
   2124  }
   2125  MOZ_CRASH("invalid type");
   2126 }
   2127 
   2128 class LIRGraph {
   2129  struct ValueHasher {
   2130    using Lookup = Value;
   2131    static HashNumber hash(const Value& v) { return HashNumber(v.asRawBits()); }
   2132    static bool match(const Value& lhs, const Value& rhs) { return lhs == rhs; }
   2133  };
   2134 
   2135  FixedList<LBlock> blocks_;
   2136 
   2137  // constantPool_ is a mozilla::Vector, not a js::Vector, because
   2138  // js::Vector<Value> is prohibited as unsafe. This particular Vector of
   2139  // Values is safe because it is only used within the scope of an
   2140  // AutoSuppressGC (in IonCompile), which inhibits GC.
   2141  mozilla::Vector<Value, 0, JitAllocPolicy> constantPool_;
   2142  using ConstantPoolMap = HashMap<Value, uint32_t, ValueHasher, JitAllocPolicy>;
   2143  ConstantPoolMap constantPoolMap_;
   2144  uint32_t numVirtualRegisters_;
   2145  uint32_t numInstructions_;
   2146 
   2147  // Number of instructions with a safepoint.
   2148  uint32_t numSafepoints_ = 0;
   2149  // Number of non-call instructions with a safepoint.
   2150  uint32_t numNonCallSafepoints_ = 0;
   2151 
   2152  // Number of call-instructions in this LIR graph.
   2153  uint32_t numCallInstructions_ = 0;
   2154 
   2155  // Size of stack slots needed for local spills.
   2156  uint32_t localSlotsSize_;
   2157  // Number of JS::Value stack slots needed for argument construction for calls.
   2158  uint32_t argumentSlotCount_;
   2159  // Count the number of extra times a single safepoint would be encoded.
   2160  uint32_t extraSafepointUses_;
   2161 
   2162  MIRGraph& mir_;
   2163 
   2164 public:
   2165  explicit LIRGraph(MIRGraph* mir);
   2166 
   2167  [[nodiscard]] bool init() {
   2168    return blocks_.init(mir_.alloc(), mir_.numBlocks());
   2169  }
   2170  MIRGraph& mir() const { return mir_; }
   2171  size_t numBlocks() const { return blocks_.length(); }
   2172  LBlock* getBlock(size_t i) { return &blocks_[i]; }
   2173  uint32_t numBlockIds() const { return mir_.numBlockIds(); }
   2174  [[nodiscard]] bool initBlock(MBasicBlock* mir) {
   2175    auto* block = &blocks_[mir->id()];
   2176    auto* lir = new (block) LBlock(mir);
   2177    return lir->init(mir_.alloc());
   2178  }
   2179  uint32_t getVirtualRegister() {
   2180    numVirtualRegisters_ += VREG_INCREMENT;
   2181    return numVirtualRegisters_;
   2182  }
   2183  uint32_t numVirtualRegisters() const {
   2184    // Virtual registers are 1-based, not 0-based, so add one as a
   2185    // convenience for 0-based arrays.
   2186    return numVirtualRegisters_ + 1;
   2187  }
   2188  uint32_t getInstructionId() { return numInstructions_++; }
   2189  uint32_t numInstructions() const { return numInstructions_; }
   2190 
   2191  void incNumCallInstructions() { numCallInstructions_++; }
   2192  uint32_t numCallInstructions() const { return numCallInstructions_; }
   2193 
   2194  void setLocalSlotsSize(uint32_t localSlotsSize) {
   2195    localSlotsSize_ = localSlotsSize;
   2196  }
   2197  uint32_t localSlotsSize() const { return localSlotsSize_; }
   2198  void setArgumentSlotCount(uint32_t argumentSlotCount) {
   2199    argumentSlotCount_ = argumentSlotCount;
   2200  }
   2201  uint32_t argumentSlotCount() const { return argumentSlotCount_; }
   2202  void addExtraSafepointUses(uint32_t extra) { extraSafepointUses_ += extra; }
   2203  uint32_t extraSafepointUses() const { return extraSafepointUses_; }
   2204  [[nodiscard]] bool addConstantToPool(const Value& v, uint32_t* index);
   2205  size_t numConstants() const { return constantPool_.length(); }
   2206  Value* constantPool() { return &constantPool_[0]; }
   2207 
   2208  void noteNeedsSafepoint(LInstruction* ins) {
   2209    numSafepoints_++;
   2210    if (!ins->isCall()) {
   2211      numNonCallSafepoints_++;
   2212    }
   2213  }
   2214 
   2215  size_t numNonCallSafepoints() const { return numNonCallSafepoints_; }
   2216  size_t numSafepoints() const { return numSafepoints_; }
   2217 
   2218 #ifdef JS_JITSPEW
   2219  void dump(GenericPrinter& out);
   2220  void dump();
   2221 #endif
   2222 };
   2223 
   2224 LAllocation::LAllocation(AnyRegister reg) {
   2225  if (reg.isFloat()) {
   2226    *this = LFloatReg(reg.fpu());
   2227  } else {
   2228    *this = LGeneralReg(reg.gpr());
   2229  }
   2230 }
   2231 
   2232 AnyRegister LAllocation::toAnyRegister() const {
   2233  MOZ_ASSERT(isAnyRegister());
   2234  if (isFloatReg()) {
   2235    return AnyRegister(toFloatReg()->reg());
   2236  }
   2237  return AnyRegister(toGeneralReg()->reg());
   2238 }
   2239 
   2240 }  // namespace jit
   2241 }  // namespace js
   2242 
   2243 #include "jit/shared/LIR-shared.h"
   2244 #if defined(JS_CODEGEN_X86)
   2245 #  include "jit/x86/LIR-x86.h"
   2246 #elif defined(JS_CODEGEN_X64)
   2247 #  include "jit/x64/LIR-x64.h"
   2248 #elif defined(JS_CODEGEN_ARM)
   2249 #  include "jit/arm/LIR-arm.h"
   2250 #elif defined(JS_CODEGEN_ARM64)
   2251 #  include "jit/arm64/LIR-arm64.h"
   2252 #elif defined(JS_CODEGEN_LOONG64)
   2253 #  include "jit/loong64/LIR-loong64.h"
   2254 #elif defined(JS_CODEGEN_RISCV64)
   2255 #  include "jit/riscv64/LIR-riscv64.h"
   2256 #elif defined(JS_CODEGEN_MIPS64)
   2257 #  include "jit/mips-shared/LIR-mips-shared.h"
   2258 #  include "jit/mips64/LIR-mips64.h"
   2259 #elif defined(JS_CODEGEN_WASM32)
   2260 #  include "jit/wasm32/LIR-wasm32.h"
   2261 #elif defined(JS_CODEGEN_NONE)
   2262 #  include "jit/none/LIR-none.h"
   2263 #else
   2264 #  error "Unknown architecture!"
   2265 #endif
   2266 
   2267 #undef LIR_HEADER
   2268 
   2269 namespace js {
   2270 namespace jit {
   2271 
   2272 #define LIROP(name)                           \
   2273  L##name* LNode::to##name() {                \
   2274    MOZ_ASSERT(is##name());                   \
   2275    return static_cast<L##name*>(this);       \
   2276  }                                           \
   2277  const L##name* LNode::to##name() const {    \
   2278    MOZ_ASSERT(is##name());                   \
   2279    return static_cast<const L##name*>(this); \
   2280  }
   2281 LIR_OPCODE_LIST(LIROP)
   2282 #undef LIROP
   2283 
   2284 #define LALLOC_CAST(type)               \
   2285  L##type* LAllocation::to##type() {    \
   2286    MOZ_ASSERT(is##type());             \
   2287    return static_cast<L##type*>(this); \
   2288  }
   2289 #define LALLOC_CONST_CAST(type)                  \
   2290  const L##type* LAllocation::to##type() const { \
   2291    MOZ_ASSERT(is##type());                      \
   2292    return static_cast<const L##type*>(this);    \
   2293  }
   2294 
   2295 LALLOC_CAST(Use)
   2296 LALLOC_CONST_CAST(Use)
   2297 LALLOC_CONST_CAST(GeneralReg)
   2298 LALLOC_CONST_CAST(FloatReg)
   2299 LALLOC_CONST_CAST(StackSlot)
   2300 LALLOC_CAST(StackArea)
   2301 LALLOC_CONST_CAST(StackArea)
   2302 LALLOC_CONST_CAST(Argument)
   2303 LALLOC_CONST_CAST(ConstantIndex)
   2304 
   2305 #undef LALLOC_CAST
   2306 
   2307 }  // namespace jit
   2308 }  // namespace js
   2309 
   2310 #endif /* jit_LIR_h */