tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

CacheIRCompiler.h (50224B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef jit_CacheIRCompiler_h
      8 #define jit_CacheIRCompiler_h
      9 
     10 #include "mozilla/Casting.h"
     11 #include "mozilla/Maybe.h"
     12 
     13 #include "jit/CacheIR.h"
     14 #include "jit/CacheIRReader.h"
     15 #include "jit/CacheIRWriter.h"
     16 #include "jit/JitOptions.h"
     17 #include "jit/MacroAssembler.h"
     18 #include "jit/PerfSpewer.h"
     19 #include "jit/SharedICRegisters.h"
     20 #include "js/ScalarType.h"  // js::Scalar::Type
     21 
     22 namespace JS {
     23 class BigInt;
     24 }
     25 
     26 namespace js {
     27 
     28 class TypedArrayObject;
     29 enum class UnaryMathFunction : uint8_t;
     30 
     31 namespace jit {
     32 
     33 class BaselineCacheIRCompiler;
     34 class ICCacheIRStub;
     35 class IonCacheIRCompiler;
     36 class IonScript;
     37 
     38 enum class ICStubEngine : uint8_t;
     39 
     40 // [SMDOC] CacheIR Value Representation and Tracking
     41 //
     42 // While compiling an IC stub the CacheIR compiler needs to keep track of the
     43 // physical location for each logical piece of data we care about, as well as
     44 // ensure that in the case of a stub failing, we are able to restore the input
     45 // state so that a subsequent stub can attempt to provide a value.
     46 //
     47 // OperandIds are created in the CacheIR front-end to keep track of values that
     48 // are passed between CacheIR ops during the execution of a given CacheIR stub.
     49 // In the CacheRegisterAllocator these OperandIds are given OperandLocations,
     50 // that represent the physical location of the OperandId at a given point in
     51 // time during CacheRegister allocation.
     52 //
     53 // In the CacheRegisterAllocator physical locations include the stack, and
     54 // registers, as well as whether or not the value has been unboxed or not.
     55 // Constants are also represented separately to provide for on-demand
     56 // materialization.
     57 //
     58 // Intra-op Register allocation:
     59 //
     60 // During the emission of a CacheIR op, code can ask the CacheRegisterAllocator
     61 // for access to a particular OperandId, and the register allocator will
     62 // generate the required code to fill that request.
     63 //
     64 // Input OperandIds should be considered as immutable, and should not be mutated
     65 // during the execution of a stub.
     66 //
     67 // There are also a number of RAII classes that interact with the register
     68 // allocator, in order to provide access to more registers than just those
     69 // provided for by the OperandIds.
     70 //
     71 // - AutoOutputReg: The register which will hold the output value of the stub.
     72 // - AutoScratchReg: By default, an arbitrary scratch register, however a
     73 //   specific register can be requested.
     74 // - AutoScratchRegMaybeOutput: Any arbitrary scratch register, but the output
     75 //   register may be used as well.
     76 //
     77 // These RAII classes take ownership of a register for the duration of their
     78 // lifetime so they can be used for computation or output. The register
     79 // allocator can spill values with OperandLocations in order to try to ensure
     80 // that a register is made available for use.
     81 //
     82 // If a specific register is required (via AutoScratchRegister), it should be
     83 // the first register acquired, as the register rallocator will be unable to
     84 // allocate the fixed register if the current op is using it for something else.
     85 //
     86 // If no register can be provided after attempting to spill, a
     87 // MOZ_RELEASE_ASSERT ensures the browser will crash. The register allocator is
     88 // not provided enough information in its current design to insert spills and
     89 // fills at arbitrary locations, and so it can fail to find an allocation
     90 // solution. However, this will only happen within the implementation of an
     91 // operand emitter, and because the cache register allocator is mostly
     92 // determinstic, so long as the operand id emitter is tested, this won't
     93 // suddenly crop up in an arbitrary webpage. It's worth noting the most
     94 // difficult platform to support is x86-32, because it has the least number of
     95 // registers available.
     96 //
     97 // FailurePaths checkpoint the state of the register allocator so that the input
     98 // state can be recomputed from the current state before jumping to the next
     99 // stub in the IC chain. An important invariant is that the FailurePath must be
    100 // allocated for each op after all the manipulation of OperandLocations has
    101 // happened, so that its recording is correct.
    102 //
    103 // Inter-op Register Allocation:
    104 //
    105 // The RAII register management classes are RAII because all register state
    106 // outside the OperandLocations is reset before the compilation of each
    107 // individual CacheIR op. This means that you cannot rely on a value surviving
    108 // between ops, even if you use the ability of AutoScratchRegister to name a
    109 // specific register. Values that need to be preserved between ops must be given
    110 // an OperandId.
    111 
    112 // Represents a Value on the Baseline frame's expression stack. Slot 0 is the
    113 // value on top of the stack (the most recently pushed value), slot 1 is the
    114 // value pushed before that, etc.
    115 class BaselineFrameSlot {
    116  uint32_t slot_;
    117 
    118 public:
    119  explicit BaselineFrameSlot(uint32_t slot) : slot_(slot) {}
    120  uint32_t slot() const { return slot_; }
    121 
    122  bool operator==(const BaselineFrameSlot& other) const {
    123    return slot_ == other.slot_;
    124  }
    125  bool operator!=(const BaselineFrameSlot& other) const {
    126    return slot_ != other.slot_;
    127  }
    128 };
    129 
    130 // OperandLocation represents the location of an OperandId. The operand is
    131 // either in a register or on the stack, and is either boxed or unboxed.
    132 class OperandLocation {
    133 public:
    134  enum Kind {
    135    Uninitialized = 0,
    136    PayloadReg,
    137    DoubleReg,
    138    ValueReg,
    139    PayloadStack,
    140    ValueStack,
    141    BaselineFrame,
    142    Constant,
    143  };
    144 
    145 private:
    146  Kind kind_;
    147 
    148  union Data {
    149    struct {
    150      Register reg;
    151      JSValueType type;
    152    } payloadReg;
    153    FloatRegister doubleReg;
    154    ValueOperand valueReg;
    155    struct {
    156      uint32_t stackPushed;
    157      JSValueType type;
    158    } payloadStack;
    159    uint32_t valueStackPushed;
    160    BaselineFrameSlot baselineFrameSlot;
    161    Value constant;
    162 
    163    Data() : valueStackPushed(0) {}
    164  };
    165  Data data_;
    166 
    167 public:
    168  OperandLocation() : kind_(Uninitialized) {}
    169 
    170  Kind kind() const { return kind_; }
    171 
    172  void setUninitialized() { kind_ = Uninitialized; }
    173 
    174  ValueOperand valueReg() const {
    175    MOZ_ASSERT(kind_ == ValueReg);
    176    return data_.valueReg;
    177  }
    178  Register payloadReg() const {
    179    MOZ_ASSERT(kind_ == PayloadReg);
    180    return data_.payloadReg.reg;
    181  }
    182  FloatRegister doubleReg() const {
    183    MOZ_ASSERT(kind_ == DoubleReg);
    184    return data_.doubleReg;
    185  }
    186  uint32_t payloadStack() const {
    187    MOZ_ASSERT(kind_ == PayloadStack);
    188    return data_.payloadStack.stackPushed;
    189  }
    190  uint32_t valueStack() const {
    191    MOZ_ASSERT(kind_ == ValueStack);
    192    return data_.valueStackPushed;
    193  }
    194  JSValueType payloadType() const {
    195    if (kind_ == PayloadReg) {
    196      return data_.payloadReg.type;
    197    }
    198    MOZ_ASSERT(kind_ == PayloadStack);
    199    return data_.payloadStack.type;
    200  }
    201  Value constant() const {
    202    MOZ_ASSERT(kind_ == Constant);
    203    return data_.constant;
    204  }
    205  BaselineFrameSlot baselineFrameSlot() const {
    206    MOZ_ASSERT(kind_ == BaselineFrame);
    207    return data_.baselineFrameSlot;
    208  }
    209 
    210  void setPayloadReg(Register reg, JSValueType type) {
    211    kind_ = PayloadReg;
    212    data_.payloadReg.reg = reg;
    213    data_.payloadReg.type = type;
    214  }
    215  void setDoubleReg(FloatRegister reg) {
    216    kind_ = DoubleReg;
    217    data_.doubleReg = reg;
    218  }
    219  void setValueReg(ValueOperand reg) {
    220    kind_ = ValueReg;
    221    data_.valueReg = reg;
    222  }
    223  void setPayloadStack(uint32_t stackPushed, JSValueType type) {
    224    kind_ = PayloadStack;
    225    data_.payloadStack.stackPushed = stackPushed;
    226    data_.payloadStack.type = type;
    227  }
    228  void setValueStack(uint32_t stackPushed) {
    229    kind_ = ValueStack;
    230    data_.valueStackPushed = stackPushed;
    231  }
    232  void setConstant(const Value& v) {
    233    kind_ = Constant;
    234    data_.constant = v;
    235  }
    236  void setBaselineFrame(BaselineFrameSlot slot) {
    237    kind_ = BaselineFrame;
    238    data_.baselineFrameSlot = slot;
    239  }
    240 
    241  bool isUninitialized() const { return kind_ == Uninitialized; }
    242  bool isInRegister() const { return kind_ == PayloadReg || kind_ == ValueReg; }
    243  bool isOnStack() const {
    244    return kind_ == PayloadStack || kind_ == ValueStack;
    245  }
    246 
    247  size_t stackPushed() const {
    248    if (kind_ == PayloadStack) {
    249      return data_.payloadStack.stackPushed;
    250    }
    251    MOZ_ASSERT(kind_ == ValueStack);
    252    return data_.valueStackPushed;
    253  }
    254  size_t stackSizeInBytes() const {
    255    if (kind_ == PayloadStack) {
    256      return sizeof(uintptr_t);
    257    }
    258    MOZ_ASSERT(kind_ == ValueStack);
    259    return sizeof(js::Value);
    260  }
    261  void adjustStackPushed(int32_t diff) {
    262    if (kind_ == PayloadStack) {
    263      data_.payloadStack.stackPushed += diff;
    264      return;
    265    }
    266    MOZ_ASSERT(kind_ == ValueStack);
    267    data_.valueStackPushed += diff;
    268  }
    269 
    270  bool aliasesReg(Register reg) const {
    271    if (kind_ == PayloadReg) {
    272      return payloadReg() == reg;
    273    }
    274    if (kind_ == ValueReg) {
    275      return valueReg().aliases(reg);
    276    }
    277    return false;
    278  }
    279  bool aliasesReg(ValueOperand reg) const {
    280 #if defined(JS_NUNBOX32)
    281    return aliasesReg(reg.typeReg()) || aliasesReg(reg.payloadReg());
    282 #else
    283    return aliasesReg(reg.valueReg());
    284 #endif
    285  }
    286 
    287  bool aliasesReg(const OperandLocation& other) const;
    288 
    289  bool operator==(const OperandLocation& other) const;
    290  bool operator!=(const OperandLocation& other) const {
    291    return !operator==(other);
    292  }
    293 };
    294 
    295 struct SpilledRegister {
    296  Register reg;
    297  uint32_t stackPushed;
    298 
    299  SpilledRegister(Register reg, uint32_t stackPushed)
    300      : reg(reg), stackPushed(stackPushed) {}
    301  bool operator==(const SpilledRegister& other) const {
    302    return reg == other.reg && stackPushed == other.stackPushed;
    303  }
    304  bool operator!=(const SpilledRegister& other) const {
    305    return !(*this == other);
    306  }
    307 };
    308 
    309 using SpilledRegisterVector = Vector<SpilledRegister, 2, SystemAllocPolicy>;
    310 
    311 // Class to track and allocate registers while emitting IC code.
    312 class MOZ_RAII CacheRegisterAllocator {
    313  // The original location of the inputs to the cache.
    314  Vector<OperandLocation, 4, SystemAllocPolicy> origInputLocations_;
    315 
    316  // The current location of each operand.
    317  Vector<OperandLocation, 8, SystemAllocPolicy> operandLocations_;
    318 
    319  // Free lists for value- and payload-slots on stack
    320  Vector<uint32_t, 2, SystemAllocPolicy> freeValueSlots_;
    321  Vector<uint32_t, 2, SystemAllocPolicy> freePayloadSlots_;
    322 
    323  // The registers allocated while emitting the current CacheIR op.
    324  // This prevents us from allocating a register and then immediately
    325  // clobbering it for something else, while we're still holding on to it.
    326  LiveGeneralRegisterSet currentOpRegs_;
    327 
    328  const AllocatableGeneralRegisterSet allocatableRegs_;
    329 
    330  // Registers that are currently unused and available.
    331  AllocatableGeneralRegisterSet availableRegs_;
    332 
    333  // Registers that are available, but before use they must be saved and
    334  // then restored when returning from the stub.
    335  AllocatableGeneralRegisterSet availableRegsAfterSpill_;
    336 
    337  // Registers we took from availableRegsAfterSpill_ and spilled to the stack.
    338  SpilledRegisterVector spilledRegs_;
    339 
    340  // The number of bytes pushed on the native stack.
    341  uint32_t stackPushed_;
    342 
    343 #ifdef DEBUG
    344  // Flag used to assert individual CacheIR instructions don't allocate
    345  // registers after calling addFailurePath.
    346  bool addedFailurePath_;
    347 #endif
    348 
    349  // The index of the CacheIR instruction we're currently emitting.
    350  uint32_t currentInstruction_;
    351 
    352  // Whether the stack contains a double spilled by AutoScratchFloatRegister.
    353  bool hasAutoScratchFloatRegisterSpill_ = false;
    354 
    355  const CacheIRWriter& writer_;
    356 
    357  CacheRegisterAllocator(const CacheRegisterAllocator&) = delete;
    358  CacheRegisterAllocator& operator=(const CacheRegisterAllocator&) = delete;
    359 
    360  void freeDeadOperandLocations(MacroAssembler& masm);
    361 
    362  void spillOperandToStack(MacroAssembler& masm, OperandLocation* loc);
    363  void spillOperandToStackOrRegister(MacroAssembler& masm,
    364                                     OperandLocation* loc);
    365 
    366  void popPayload(MacroAssembler& masm, OperandLocation* loc, Register dest);
    367  void popValue(MacroAssembler& masm, OperandLocation* loc, ValueOperand dest);
    368  Address payloadAddress(MacroAssembler& masm,
    369                         const OperandLocation* loc) const;
    370  Address valueAddress(MacroAssembler& masm, const OperandLocation* loc) const;
    371 
    372 #ifdef DEBUG
    373  void assertValidState() const;
    374 #endif
    375 
    376 public:
    377  friend class AutoScratchRegister;
    378  friend class AutoScratchRegisterExcluding;
    379 
    380  explicit CacheRegisterAllocator(const CacheIRWriter& writer)
    381      : allocatableRegs_(GeneralRegisterSet::All()),
    382        stackPushed_(0),
    383 #ifdef DEBUG
    384        addedFailurePath_(false),
    385 #endif
    386        currentInstruction_(0),
    387        writer_(writer) {
    388  }
    389 
    390  [[nodiscard]] bool init();
    391 
    392  void initAvailableRegs(const AllocatableGeneralRegisterSet& available) {
    393    availableRegs_ = available;
    394  }
    395  void initAvailableRegsAfterSpill();
    396 
    397  void fixupAliasedInputs(MacroAssembler& masm);
    398 
    399  OperandLocation operandLocation(size_t i) const {
    400    return operandLocations_[i];
    401  }
    402  void setOperandLocation(size_t i, const OperandLocation& loc) {
    403    operandLocations_[i] = loc;
    404  }
    405 
    406  OperandLocation origInputLocation(size_t i) const {
    407    return origInputLocations_[i];
    408  }
    409  void initInputLocation(size_t i, ValueOperand reg) {
    410    origInputLocations_[i].setValueReg(reg);
    411    operandLocations_[i].setValueReg(reg);
    412  }
    413  void initInputLocation(size_t i, Register reg, JSValueType type) {
    414    origInputLocations_[i].setPayloadReg(reg, type);
    415    operandLocations_[i].setPayloadReg(reg, type);
    416  }
    417  void initInputLocation(size_t i, FloatRegister reg) {
    418    origInputLocations_[i].setDoubleReg(reg);
    419    operandLocations_[i].setDoubleReg(reg);
    420  }
    421  void initInputLocation(size_t i, const Value& v) {
    422    origInputLocations_[i].setConstant(v);
    423    operandLocations_[i].setConstant(v);
    424  }
    425  void initInputLocation(size_t i, BaselineFrameSlot slot) {
    426    origInputLocations_[i].setBaselineFrame(slot);
    427    operandLocations_[i].setBaselineFrame(slot);
    428  }
    429 
    430  void initInputLocation(size_t i, const TypedOrValueRegister& reg);
    431  void initInputLocation(size_t i, const ConstantOrRegister& value);
    432 
    433  const SpilledRegisterVector& spilledRegs() const { return spilledRegs_; }
    434 
    435  [[nodiscard]] bool setSpilledRegs(const SpilledRegisterVector& regs) {
    436    spilledRegs_.clear();
    437    return spilledRegs_.appendAll(regs);
    438  }
    439 
    440  bool hasAutoScratchFloatRegisterSpill() const {
    441    return hasAutoScratchFloatRegisterSpill_;
    442  }
    443  void setHasAutoScratchFloatRegisterSpill(bool b) {
    444    MOZ_ASSERT(hasAutoScratchFloatRegisterSpill_ != b);
    445    hasAutoScratchFloatRegisterSpill_ = b;
    446  }
    447 
    448  void nextOp() {
    449 #ifdef DEBUG
    450    assertValidState();
    451    addedFailurePath_ = false;
    452 #endif
    453    currentOpRegs_.clear();
    454    currentInstruction_++;
    455  }
    456 
    457 #ifdef DEBUG
    458  void setAddedFailurePath() {
    459    MOZ_ASSERT(!addedFailurePath_, "multiple failure paths for instruction");
    460    addedFailurePath_ = true;
    461  }
    462 #endif
    463 
    464  bool isDeadAfterInstruction(OperandId opId) const {
    465    return writer_.operandIsDead(opId.id(), currentInstruction_ + 1);
    466  }
    467 
    468  uint32_t stackPushed() const { return stackPushed_; }
    469  void setStackPushed(uint32_t pushed) { stackPushed_ = pushed; }
    470 
    471  bool isAllocatable(Register reg) const { return allocatableRegs_.has(reg); }
    472 
    473  // Allocates a new register.
    474  Register allocateRegister(MacroAssembler& masm);
    475  ValueOperand allocateValueRegister(MacroAssembler& masm);
    476 
    477  void allocateFixedRegister(MacroAssembler& masm, Register reg);
    478  void allocateFixedValueRegister(MacroAssembler& masm, ValueOperand reg);
    479 
    480  // Releases a register so it can be reused later.
    481  void releaseRegister(Register reg) {
    482    MOZ_ASSERT(currentOpRegs_.has(reg));
    483    availableRegs_.add(reg);
    484    currentOpRegs_.take(reg);
    485  }
    486  void releaseValueRegister(ValueOperand reg) {
    487 #ifdef JS_NUNBOX32
    488    releaseRegister(reg.payloadReg());
    489    releaseRegister(reg.typeReg());
    490 #else
    491    releaseRegister(reg.valueReg());
    492 #endif
    493  }
    494 
    495  // Removes spilled values from the native stack. This should only be
    496  // called after all registers have been allocated.
    497  void discardStack(MacroAssembler& masm);
    498 
    499  Address addressOf(MacroAssembler& masm, BaselineFrameSlot slot) const;
    500  BaseValueIndex addressOf(MacroAssembler& masm, Register argcReg,
    501                           BaselineFrameSlot slot) const;
    502 
    503  // Returns the register for the given operand. If the operand is currently
    504  // not in a register, it will load it into one.
    505  ValueOperand useValueRegister(MacroAssembler& masm, ValOperandId val);
    506  Register useRegister(MacroAssembler& masm, TypedOperandId typedId);
    507 
    508  ConstantOrRegister useConstantOrRegister(MacroAssembler& masm,
    509                                           ValOperandId val);
    510 
    511  // Allocates an output register for the given operand.
    512  Register defineRegister(MacroAssembler& masm, TypedOperandId typedId);
    513  ValueOperand defineValueRegister(MacroAssembler& masm, ValOperandId val);
    514 
    515  // Loads (potentially coercing) and unboxes a value into a float register
    516  // This is infallible, as there should have been a previous guard
    517  // to ensure the value is already a number.
    518  // Does not change the allocator's state.
    519  void ensureDoubleRegister(MacroAssembler& masm, NumberOperandId op,
    520                            FloatRegister dest) const;
    521 
    522  // Loads an unboxed value into a scratch register. This can be useful
    523  // especially on 32-bit x86 when there are not enough registers for
    524  // useRegister.
    525  // Does not change the allocator's state.
    526  void copyToScratchRegister(MacroAssembler& masm, TypedOperandId typedId,
    527                             Register dest) const;
    528  void copyToScratchValueRegister(MacroAssembler& masm, ValOperandId valId,
    529                                  ValueOperand dest) const;
    530 
    531  // Returns |val|'s JSValueType or JSVAL_TYPE_UNKNOWN.
    532  JSValueType knownType(ValOperandId val) const;
    533 
    534  // Emits code to restore registers and stack to the state at the start of
    535  // the stub.
    536  void restoreInputState(MacroAssembler& masm, bool discardStack = true);
    537 
    538  // Returns the set of registers storing the IC input operands.
    539  GeneralRegisterSet inputRegisterSet() const;
    540 
    541  void saveIonLiveRegisters(MacroAssembler& masm, LiveRegisterSet liveRegs,
    542                            Register scratch, IonScript* ionScript);
    543  void restoreIonLiveRegisters(MacroAssembler& masm, LiveRegisterSet liveRegs);
    544 };
    545 
    546 // RAII class to allocate a scratch register and release it when we're done
    547 // with it.
    548 class MOZ_RAII AutoScratchRegister {
    549  CacheRegisterAllocator& alloc_;
    550  Register reg_;
    551 
    552  AutoScratchRegister(const AutoScratchRegister&) = delete;
    553  void operator=(const AutoScratchRegister&) = delete;
    554 
    555 public:
    556  AutoScratchRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm,
    557                      Register reg = InvalidReg)
    558      : alloc_(alloc) {
    559    if (reg != InvalidReg) {
    560      alloc.allocateFixedRegister(masm, reg);
    561      reg_ = reg;
    562    } else {
    563      reg_ = alloc.allocateRegister(masm);
    564    }
    565    MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_));
    566  }
    567  ~AutoScratchRegister() { alloc_.releaseRegister(reg_); }
    568 
    569  Register get() const { return reg_; }
    570  operator Register() const { return reg_; }
    571 };
    572 
    573 // On x86, spectreBoundsCheck32 can emit better code if it has a scratch
    574 // register and index masking is enabled.
    575 class MOZ_RAII AutoSpectreBoundsScratchRegister {
    576  mozilla::Maybe<AutoScratchRegister> scratch_;
    577  Register reg_ = InvalidReg;
    578 
    579  AutoSpectreBoundsScratchRegister(const AutoSpectreBoundsScratchRegister&) =
    580      delete;
    581  void operator=(const AutoSpectreBoundsScratchRegister&) = delete;
    582 
    583 public:
    584  AutoSpectreBoundsScratchRegister(CacheRegisterAllocator& alloc,
    585                                   MacroAssembler& masm) {
    586 #ifdef JS_CODEGEN_X86
    587    if (JitOptions.spectreIndexMasking) {
    588      scratch_.emplace(alloc, masm);
    589      reg_ = scratch_->get();
    590    }
    591 #endif
    592  }
    593 
    594  Register get() const { return reg_; }
    595  operator Register() const { return reg_; }
    596 };
    597 
    598 // Scratch Register64. Implemented with a single AutoScratchRegister on 64-bit
    599 // platforms and two AutoScratchRegisters on 32-bit platforms.
    600 class MOZ_RAII AutoScratchRegister64 {
    601  AutoScratchRegister reg1_;
    602 #if JS_BITS_PER_WORD == 32
    603  AutoScratchRegister reg2_;
    604 #endif
    605 
    606 public:
    607  AutoScratchRegister64(const AutoScratchRegister64&) = delete;
    608  void operator=(const AutoScratchRegister64&) = delete;
    609 
    610 #if JS_BITS_PER_WORD == 32
    611  AutoScratchRegister64(CacheRegisterAllocator& alloc, MacroAssembler& masm)
    612      : reg1_(alloc, masm), reg2_(alloc, masm) {}
    613 
    614  Register64 get() const { return Register64(reg1_, reg2_); }
    615 #else
    616  AutoScratchRegister64(CacheRegisterAllocator& alloc, MacroAssembler& masm)
    617      : reg1_(alloc, masm) {}
    618 
    619  Register64 get() const { return Register64(reg1_); }
    620 #endif
    621 
    622  operator Register64() const { return get(); }
    623 };
    624 
    625 // Scratch ValueOperand. Implemented with a single AutoScratchRegister on 64-bit
    626 // platforms and two AutoScratchRegisters on 32-bit platforms.
    627 class MOZ_RAII AutoScratchValueRegister {
    628  AutoScratchRegister reg1_;
    629 #if JS_BITS_PER_WORD == 32
    630  AutoScratchRegister reg2_;
    631 #endif
    632 
    633 public:
    634  AutoScratchValueRegister(const AutoScratchValueRegister&) = delete;
    635  void operator=(const AutoScratchValueRegister&) = delete;
    636 
    637 #if JS_BITS_PER_WORD == 32
    638  AutoScratchValueRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm)
    639      : reg1_(alloc, masm), reg2_(alloc, masm) {}
    640 
    641  ValueOperand get() const { return ValueOperand(reg1_, reg2_); }
    642 #else
    643  AutoScratchValueRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm)
    644      : reg1_(alloc, masm) {}
    645 
    646  ValueOperand get() const { return ValueOperand(reg1_); }
    647 #endif
    648 
    649  operator ValueOperand() const { return get(); }
    650 };
    651 
    652 // The FailurePath class stores everything we need to generate a failure path
    653 // at the end of the IC code. The failure path restores the input registers, if
    654 // needed, and jumps to the next stub.
    655 class FailurePath {
    656  Vector<OperandLocation, 4, SystemAllocPolicy> inputs_;
    657  SpilledRegisterVector spilledRegs_;
    658  NonAssertingLabel label_;
    659  uint32_t stackPushed_;
    660 #ifdef DEBUG
    661  // Flag to ensure FailurePath::label() isn't taken while there's a scratch
    662  // float register which still needs to be restored.
    663  bool hasAutoScratchFloatRegister_ = false;
    664 #endif
    665 
    666 public:
    667  FailurePath() = default;
    668 
    669  FailurePath(FailurePath&& other)
    670      : inputs_(std::move(other.inputs_)),
    671        spilledRegs_(std::move(other.spilledRegs_)),
    672        label_(other.label_),
    673        stackPushed_(other.stackPushed_) {}
    674 
    675  Label* labelUnchecked() { return &label_; }
    676  Label* label() {
    677    MOZ_ASSERT(!hasAutoScratchFloatRegister_);
    678    return labelUnchecked();
    679  }
    680 
    681  void setStackPushed(uint32_t i) { stackPushed_ = i; }
    682  uint32_t stackPushed() const { return stackPushed_; }
    683 
    684  [[nodiscard]] bool appendInput(const OperandLocation& loc) {
    685    return inputs_.append(loc);
    686  }
    687  OperandLocation input(size_t i) const { return inputs_[i]; }
    688 
    689  const SpilledRegisterVector& spilledRegs() const { return spilledRegs_; }
    690 
    691  [[nodiscard]] bool setSpilledRegs(const SpilledRegisterVector& regs) {
    692    MOZ_ASSERT(spilledRegs_.empty());
    693    return spilledRegs_.appendAll(regs);
    694  }
    695 
    696  // If canShareFailurePath(other) returns true, the same machine code will
    697  // be emitted for two failure paths, so we can share them.
    698  bool canShareFailurePath(const FailurePath& other) const;
    699 
    700  void setHasAutoScratchFloatRegister() {
    701 #ifdef DEBUG
    702    MOZ_ASSERT(!hasAutoScratchFloatRegister_);
    703    hasAutoScratchFloatRegister_ = true;
    704 #endif
    705  }
    706 
    707  void clearHasAutoScratchFloatRegister() {
    708 #ifdef DEBUG
    709    MOZ_ASSERT(hasAutoScratchFloatRegister_);
    710    hasAutoScratchFloatRegister_ = false;
    711 #endif
    712  }
    713 };
    714 
    715 /**
    716 * Wrap an offset so that a call can decide to embed a constant
    717 * or load from the stub data.
    718 */
    719 class StubFieldOffset {
    720 private:
    721  uint32_t offset_;
    722  StubField::Type type_;
    723 
    724 public:
    725  StubFieldOffset(uint32_t offset, StubField::Type type)
    726      : offset_(offset), type_(type) {}
    727 
    728  uint32_t getOffset() { return offset_; }
    729  StubField::Type getStubFieldType() { return type_; }
    730 };
    731 
    732 class AutoOutputRegister;
    733 
    734 // Base class for BaselineCacheIRCompiler and IonCacheIRCompiler.
    735 class MOZ_RAII CacheIRCompiler {
    736 protected:
    737  friend class AutoOutputRegister;
    738  friend class AutoStubFrame;
    739  friend class AutoSaveLiveRegisters;
    740  friend class AutoCallVM;
    741  friend class AutoScratchFloatRegister;
    742  friend class AutoAvailableFloatRegister;
    743 
    744  enum class Mode { Baseline, Ion };
    745 
    746  bool enteredStubFrame_;
    747 
    748  bool isBaseline();
    749  bool isIon();
    750  BaselineCacheIRCompiler* asBaseline();
    751  IonCacheIRCompiler* asIon();
    752 
    753  JSContext* cx_;
    754  const CacheIRWriter& writer_;
    755  StackMacroAssembler masm;
    756 
    757  CacheRegisterAllocator allocator;
    758  Vector<FailurePath, 4, SystemAllocPolicy> failurePaths;
    759 
    760  // Float registers that are live. Registers not in this set can be
    761  // clobbered and don't need to be saved before performing a VM call.
    762  // Doing this for non-float registers is a bit more complicated because
    763  // the IC register allocator allocates GPRs.
    764  LiveFloatRegisterSet liveFloatRegs_;
    765 
    766  mozilla::Maybe<TypedOrValueRegister> outputUnchecked_;
    767  Mode mode_;
    768 
    769  // Distance from the IC to the stub data; mostly will be
    770  // sizeof(stubType)
    771  uint32_t stubDataOffset_;
    772 
    773  enum class StubFieldPolicy { Address, Constant };
    774 
    775  StubFieldPolicy stubFieldPolicy_;
    776 
    777  CacheIRCompiler(JSContext* cx, TempAllocator& alloc,
    778                  const CacheIRWriter& writer, uint32_t stubDataOffset,
    779                  Mode mode, StubFieldPolicy policy)
    780      : enteredStubFrame_(false),
    781        cx_(cx),
    782        writer_(writer),
    783        masm(cx, alloc),
    784        allocator(writer_),
    785        liveFloatRegs_(FloatRegisterSet::All()),
    786        mode_(mode),
    787        stubDataOffset_(stubDataOffset),
    788        stubFieldPolicy_(policy) {
    789    MOZ_ASSERT(!writer.failed());
    790  }
    791 
    792  [[nodiscard]] bool addFailurePath(FailurePath** failure);
    793  [[nodiscard]] bool emitFailurePath(size_t i);
    794 
    795  // Returns the set of volatile float registers that are live. These
    796  // registers need to be saved when making non-GC calls with callWithABI.
    797  FloatRegisterSet liveVolatileFloatRegs() const {
    798    return FloatRegisterSet::Intersect(liveFloatRegs_.set(),
    799                                       FloatRegisterSet::Volatile());
    800  }
    801 
    802  // Returns the set of volatile registers that are live. These registers need
    803  // to be saved when making non-GC calls with callWithABI.
    804  LiveRegisterSet liveVolatileRegs() const {
    805    // All volatile GPR registers are treated as live.
    806    return {GeneralRegisterSet::Volatile(), liveVolatileFloatRegs()};
    807  }
    808 
    809  bool objectGuardNeedsSpectreMitigations(ObjOperandId objId) const {
    810    // Instructions like GuardShape need Spectre mitigations if
    811    // (1) mitigations are enabled and (2) the object is used by other
    812    // instructions (if the object is *not* used by other instructions,
    813    // zeroing its register is pointless).
    814    return JitOptions.spectreObjectMitigations &&
    815           !allocator.isDeadAfterInstruction(objId);
    816  }
    817 
    818 private:
    819  void emitPostBarrierShared(Register obj, const ConstantOrRegister& val,
    820                             Register scratch, Register maybeIndex);
    821 
    822  void emitPostBarrierShared(Register obj, ValueOperand val, Register scratch,
    823                             Register maybeIndex) {
    824    emitPostBarrierShared(obj, ConstantOrRegister(val), scratch, maybeIndex);
    825  }
    826 
    827 protected:
    828  template <typename T>
    829  void emitPostBarrierSlot(Register obj, const T& val, Register scratch) {
    830    emitPostBarrierShared(obj, val, scratch, InvalidReg);
    831  }
    832 
    833  template <typename T>
    834  void emitPostBarrierElement(Register obj, const T& val, Register scratch,
    835                              Register index) {
    836    MOZ_ASSERT(index != InvalidReg);
    837    emitPostBarrierShared(obj, val, scratch, index);
    838  }
    839 
    840  bool emitComparePointerResultShared(JSOp op, TypedOperandId lhsId,
    841                                      TypedOperandId rhsId);
    842 
    843  [[nodiscard]] bool emitMathFunctionNumberResultShared(
    844      UnaryMathFunction fun, FloatRegister inputScratch, ValueOperand output);
    845 
    846  template <typename Fn, Fn fn>
    847  [[nodiscard]] bool emitBigIntBinaryOperationShared(BigIntOperandId lhsId,
    848                                                     BigIntOperandId rhsId);
    849 
    850  template <typename Fn, Fn fn>
    851  [[nodiscard]] bool emitBigIntUnaryOperationShared(BigIntOperandId inputId);
    852 
    853  bool emitDoubleIncDecResult(bool isInc, NumberOperandId inputId);
    854 
    855  void emitTypedArrayBoundsCheck(ArrayBufferViewKind viewKind, Register obj,
    856                                 Register index, Register scratch,
    857                                 Register maybeScratch, Register spectreScratch,
    858                                 Label* fail);
    859 
    860  void emitTypedArrayBoundsCheck(ArrayBufferViewKind viewKind, Register obj,
    861                                 Register index, Register scratch,
    862                                 mozilla::Maybe<Register> maybeScratch,
    863                                 mozilla::Maybe<Register> spectreScratch,
    864                                 Label* fail);
    865 
    866  void emitDataViewBoundsCheck(ArrayBufferViewKind viewKind, size_t byteSize,
    867                               Register obj, Register offset, Register scratch,
    868                               Register maybeScratch, Label* fail);
    869 
    870  using AtomicsReadWriteModifyFn = int32_t (*)(TypedArrayObject*, size_t,
    871                                               int32_t);
    872 
    873  [[nodiscard]] bool emitAtomicsReadModifyWriteResult(
    874      ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
    875      Scalar::Type elementType, ArrayBufferViewKind viewKind,
    876      AtomicsReadWriteModifyFn fn);
    877 
    878  using AtomicsReadWriteModify64Fn = JS::BigInt* (*)(JSContext*,
    879                                                     TypedArrayObject*, size_t,
    880                                                     const JS::BigInt*);
    881 
    882  template <AtomicsReadWriteModify64Fn fn>
    883  [[nodiscard]] bool emitAtomicsReadModifyWriteResult64(
    884      ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
    885      ArrayBufferViewKind viewKind);
    886 
    887  void emitActivateIterator(Register objBeingIterated, Register iterObject,
    888                            Register nativeIter, Register scratch,
    889                            Register scratch2, uint32_t enumeratorsAddrOffset);
    890 
    891  CACHE_IR_COMPILER_SHARED_GENERATED
    892 
    893  void emitLoadStubField(StubFieldOffset val, Register dest);
    894  void emitLoadStubFieldConstant(StubFieldOffset val, Register dest);
    895 
    896  void emitLoadValueStubField(StubFieldOffset val, ValueOperand dest);
    897  void emitLoadDoubleValueStubField(StubFieldOffset val, ValueOperand dest,
    898                                    FloatRegister scratch);
    899 
    900  uintptr_t readStubWord(uint32_t offset, StubField::Type type) {
    901    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    902    MOZ_ASSERT((offset % sizeof(uintptr_t)) == 0);
    903    return writer_.readStubField(offset, type).asWord();
    904  }
    905  uint64_t readStubInt64(uint32_t offset, StubField::Type type) {
    906    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    907    MOZ_ASSERT((offset % sizeof(uintptr_t)) == 0);
    908    return writer_.readStubField(offset, type).asInt64();
    909  }
    910  int32_t int32StubField(uint32_t offset) {
    911    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    912    return readStubWord(offset, StubField::Type::RawInt32);
    913  }
    914  uint32_t uint32StubField(uint32_t offset) {
    915    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    916    return readStubWord(offset, StubField::Type::RawInt32);
    917  }
    918  Shape* shapeStubField(uint32_t offset) {
    919    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    920    return (Shape*)readStubWord(offset, StubField::Type::Shape);
    921  }
    922  Shape* weakShapeStubField(uint32_t offset) {
    923    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    924    Shape* shape = (Shape*)readStubWord(offset, StubField::Type::WeakShape);
    925    gc::ReadBarrier(shape);
    926    return shape;
    927  }
    928  JSObject* objectStubField(uint32_t offset) {
    929    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    930    return (JSObject*)readStubWord(offset, StubField::Type::JSObject);
    931  }
    932  JSObject* weakObjectStubField(uint32_t offset) {
    933    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    934    JSObject* obj =
    935        (JSObject*)readStubWord(offset, StubField::Type::WeakObject);
    936    gc::ReadBarrier(obj);
    937    return obj;
    938  }
    939  Value valueStubField(uint32_t offset) {
    940    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    941    uint64_t raw = readStubInt64(offset, StubField::Type::Value);
    942    return Value::fromRawBits(raw);
    943  }
    944  Value weakValueStubField(uint32_t offset) {
    945    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    946    uint64_t raw = readStubInt64(offset, StubField::Type::WeakValue);
    947    Value v = Value::fromRawBits(raw);
    948    gc::ValueReadBarrier(v);
    949    return v;
    950  }
    951  double doubleStubField(uint32_t offset) {
    952    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    953    uint64_t raw = readStubInt64(offset, StubField::Type::Double);
    954    return mozilla::BitwiseCast<double>(raw);
    955  }
    956  JSString* stringStubField(uint32_t offset) {
    957    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    958    return (JSString*)readStubWord(offset, StubField::Type::String);
    959  }
    960  JS::Symbol* symbolStubField(uint32_t offset) {
    961    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    962    return (JS::Symbol*)readStubWord(offset, StubField::Type::Symbol);
    963  }
    964  JS::Compartment* compartmentStubField(uint32_t offset) {
    965    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    966    return (JS::Compartment*)readStubWord(offset, StubField::Type::RawPointer);
    967  }
    968  BaseScript* weakBaseScriptStubField(uint32_t offset) {
    969    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    970    BaseScript* script =
    971        (BaseScript*)readStubWord(offset, StubField::Type::WeakBaseScript);
    972    gc::ReadBarrier(script);
    973    return script;
    974  }
    975  const JSClass* classStubField(uintptr_t offset) {
    976    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    977    return (const JSClass*)readStubWord(offset, StubField::Type::RawPointer);
    978  }
    979  const void* proxyHandlerStubField(uintptr_t offset) {
    980    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    981    return (const void*)readStubWord(offset, StubField::Type::RawPointer);
    982  }
    983  const void* pointerStubField(uintptr_t offset) {
    984    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    985    return (const void*)readStubWord(offset, StubField::Type::RawPointer);
    986  }
    987  jsid idStubField(uint32_t offset) {
    988    MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
    989    return jsid::fromRawBits(readStubWord(offset, StubField::Type::Id));
    990  }
    991 
    992 #ifdef DEBUG
    993  void assertFloatRegisterAvailable(FloatRegister reg);
    994 #endif
    995 
    996  void callVMInternal(MacroAssembler& masm, VMFunctionId id);
    997  template <typename Fn, Fn fn>
    998  void callVM(MacroAssembler& masm);
    999 };
   1000 
   1001 // Ensures the IC's output register is available for writing.
   1002 class MOZ_RAII AutoOutputRegister {
   1003  TypedOrValueRegister output_;
   1004  CacheRegisterAllocator& alloc_;
   1005 
   1006  AutoOutputRegister(const AutoOutputRegister&) = delete;
   1007  void operator=(const AutoOutputRegister&) = delete;
   1008 
   1009 public:
   1010  explicit AutoOutputRegister(CacheIRCompiler& compiler);
   1011  ~AutoOutputRegister();
   1012 
   1013  Register maybeReg() const {
   1014    if (output_.hasValue()) {
   1015      return output_.valueReg().scratchReg();
   1016    }
   1017    if (!output_.typedReg().isFloat()) {
   1018      return output_.typedReg().gpr();
   1019    }
   1020    return InvalidReg;
   1021  }
   1022 
   1023  bool hasValue() const { return output_.hasValue(); }
   1024  ValueOperand valueReg() const { return output_.valueReg(); }
   1025  AnyRegister typedReg() const { return output_.typedReg(); }
   1026 
   1027  JSValueType type() const {
   1028    MOZ_ASSERT(!hasValue());
   1029    return ValueTypeFromMIRType(output_.type());
   1030  }
   1031 
   1032  operator TypedOrValueRegister() const { return output_; }
   1033 };
   1034 
   1035 // Instructions that have to perform a callVM require a stub frame. Call its
   1036 // enter() and leave() methods to enter/leave the stub frame.
   1037 // Hoisted from jit/BaselineCacheIRCompiler.cpp. See there for method
   1038 // definitions.
   1039 class MOZ_RAII AutoStubFrame {
   1040  BaselineCacheIRCompiler& compiler;
   1041 #ifdef DEBUG
   1042  uint32_t framePushedAtEnterStubFrame_;
   1043 #endif
   1044 
   1045  AutoStubFrame(const AutoStubFrame&) = delete;
   1046  void operator=(const AutoStubFrame&) = delete;
   1047 
   1048 public:
   1049  explicit AutoStubFrame(BaselineCacheIRCompiler& compiler);
   1050 
   1051  void enter(MacroAssembler& masm, Register scratch);
   1052  void leave(MacroAssembler& masm);
   1053  void pushInlinedICScript(MacroAssembler& masm, Address icScriptAddr);
   1054  void storeTracedValue(MacroAssembler& masm, ValueOperand val);
   1055  void loadTracedValue(MacroAssembler& masm, uint8_t slotIndex,
   1056                       ValueOperand result);
   1057 
   1058 #ifdef DEBUG
   1059  ~AutoStubFrame();
   1060 #endif
   1061 };
   1062 // AutoSaveLiveRegisters must be used when we make a call that can GC. The
   1063 // constructor ensures all live registers are stored on the stack (where the GC
   1064 // expects them) and the destructor restores these registers.
   1065 class MOZ_RAII AutoSaveLiveRegisters {
   1066  IonCacheIRCompiler& compiler_;
   1067 
   1068  AutoSaveLiveRegisters(const AutoSaveLiveRegisters&) = delete;
   1069  void operator=(const AutoSaveLiveRegisters&) = delete;
   1070 
   1071 public:
   1072  explicit AutoSaveLiveRegisters(IonCacheIRCompiler& compiler);
   1073 
   1074  ~AutoSaveLiveRegisters();
   1075 };
   1076 // Like AutoScratchRegister, but reuse a register of |output| if possible.
   1077 class MOZ_RAII AutoScratchRegisterMaybeOutput {
   1078  mozilla::Maybe<AutoScratchRegister> scratch_;
   1079  Register scratchReg_;
   1080 
   1081  AutoScratchRegisterMaybeOutput(const AutoScratchRegisterMaybeOutput&) =
   1082      delete;
   1083  void operator=(const AutoScratchRegisterMaybeOutput&) = delete;
   1084 
   1085 public:
   1086  AutoScratchRegisterMaybeOutput(CacheRegisterAllocator& alloc,
   1087                                 MacroAssembler& masm,
   1088                                 const AutoOutputRegister& output) {
   1089    scratchReg_ = output.maybeReg();
   1090    if (scratchReg_ == InvalidReg) {
   1091      scratch_.emplace(alloc, masm);
   1092      scratchReg_ = scratch_.ref();
   1093    }
   1094  }
   1095  AutoScratchRegisterMaybeOutput(CacheRegisterAllocator& alloc,
   1096                                 MacroAssembler& masm) {
   1097    scratch_.emplace(alloc, masm);
   1098    scratchReg_ = scratch_.ref();
   1099  }
   1100 
   1101  Register get() const { return scratchReg_; }
   1102  operator Register() const { return scratchReg_; }
   1103 };
   1104 
   1105 // Like AutoScratchRegisterMaybeOutput, but tries to use the ValueOperand's
   1106 // type register for the scratch register on 32-bit.
   1107 //
   1108 // Word of warning: Passing an instance of this class and AutoOutputRegister to
   1109 // functions may not work correctly, because no guarantee is given that the type
   1110 // register is used last when modifying the output's ValueOperand.
   1111 class MOZ_RAII AutoScratchRegisterMaybeOutputType {
   1112  mozilla::Maybe<AutoScratchRegister> scratch_;
   1113  Register scratchReg_;
   1114 
   1115 public:
   1116  AutoScratchRegisterMaybeOutputType(CacheRegisterAllocator& alloc,
   1117                                     MacroAssembler& masm,
   1118                                     const AutoOutputRegister& output) {
   1119 #if defined(JS_NUNBOX32)
   1120    scratchReg_ = output.hasValue() ? output.valueReg().typeReg() : InvalidReg;
   1121 #else
   1122    scratchReg_ = InvalidReg;
   1123 #endif
   1124    if (scratchReg_ == InvalidReg) {
   1125      scratch_.emplace(alloc, masm);
   1126      scratchReg_ = scratch_.ref();
   1127    }
   1128  }
   1129 
   1130  AutoScratchRegisterMaybeOutputType(
   1131      const AutoScratchRegisterMaybeOutputType&) = delete;
   1132 
   1133  void operator=(const AutoScratchRegisterMaybeOutputType&) = delete;
   1134 
   1135  Register get() const { return scratchReg_; }
   1136  operator Register() const { return scratchReg_; }
   1137 };
   1138 
   1139 // AutoCallVM is a wrapper class that unifies methods shared by
   1140 // IonCacheIRCompiler and BaselineCacheIRCompiler that perform a callVM, but
   1141 // require stub specific functionality before performing the VM call.
   1142 //
   1143 // Expected Usage:
   1144 //
   1145 //   OPs with implementations that may be unified by this class must:
   1146 //     - Be listed in the CACHEIR_OPS list but not in the CACHE_IR_SHARED_OPS
   1147 //     list
   1148 //     - Differ only in their use of `AutoSaveLiveRegisters`,
   1149 //       `AutoOutputRegister`, and `AutoScratchRegister`. The Ion
   1150 //       implementation will use `AutoSaveLiveRegisters` and
   1151 //       `AutoOutputRegister`, while the Baseline implementation will use
   1152 //       `AutoScratchRegister`.
   1153 //     - Both use the `callVM` method.
   1154 //
   1155 //   Using AutoCallVM:
   1156 //     - The constructor initializes `AutoOutputRegister` for both compiler
   1157 //       types. Additionally it initializes an `AutoSaveLiveRegisters` for
   1158 //       CacheIRCompilers with the mode Ion, and initializes
   1159 //       `AutoScratchRegisterMaybeOutput` and `AutoStubFrame` variables for
   1160 //       compilers with mode Baseline.
   1161 //     - The `prepare()` method calls the IonCacheIRCompiler method
   1162 //       `prepareVMCall` for IonCacheIRCompilers, calls the `enter()` method of
   1163 //       `AutoStubFrame` for BaselineCacheIRCompilers, and calls the
   1164 //       `discardStack` method of the `Register` class for both compiler types.
   1165 //     - The `call()` method invokes `callVM` on the CacheIRCompiler and stores
   1166 //       the call result according to its type. Finally it calls the `leave`
   1167 //       method of `AutoStubFrame` for BaselineCacheIRCompilers.
   1168 //
   1169 //   Expected Usage Example:
   1170 //     See: `CacheIRCompiler::emitCallGetSparseElementResult()`
   1171 //
   1172 // Restrictions:
   1173 //   - OPs that do not meet the criteria listed above can not be unified with
   1174 //     AutoCallVM
   1175 //
   1176 
   1177 class MOZ_RAII AutoCallVM {
   1178  MacroAssembler& masm_;
   1179  CacheIRCompiler* compiler_;
   1180  CacheRegisterAllocator& allocator_;
   1181  mozilla::Maybe<AutoOutputRegister> output_;
   1182 
   1183  // Baseline specific stuff
   1184  mozilla::Maybe<AutoStubFrame> stubFrame_;
   1185  mozilla::Maybe<AutoScratchRegisterMaybeOutput> scratch_;
   1186 
   1187  // Ion specific stuff
   1188  mozilla::Maybe<AutoSaveLiveRegisters> save_;
   1189 
   1190  void storeResult(JSValueType returnType);
   1191 
   1192  template <typename Fn>
   1193  void storeResult();
   1194 
   1195  void leaveBaselineStubFrame();
   1196 
   1197 public:
   1198  AutoCallVM(MacroAssembler& masm, CacheIRCompiler* compiler,
   1199             CacheRegisterAllocator& allocator);
   1200 
   1201  void prepare();
   1202 
   1203  template <typename Fn, Fn fn>
   1204  void call() {
   1205    compiler_->callVM<Fn, fn>(masm_);
   1206    storeResult<Fn>();
   1207    leaveBaselineStubFrame();
   1208  }
   1209 
   1210  template <typename Fn, Fn fn>
   1211  void callNoResult() {
   1212    compiler_->callVM<Fn, fn>(masm_);
   1213    leaveBaselineStubFrame();
   1214  }
   1215 
   1216  const AutoOutputRegister& output() const { return *output_; }
   1217  ValueOperand outputValueReg() const { return output_->valueReg(); }
   1218 };
   1219 
   1220 // RAII class to allocate FloatReg0 as a scratch register and release it when
   1221 // we're done with it. The previous contents of FloatReg0 may be spilled on the
   1222 // stack and, if necessary, are restored when the destructor runs.
   1223 //
   1224 // When FailurePath is passed to the constructor, FailurePath::label() must not
   1225 // be used during the life time of the AutoScratchFloatRegister. Instead use
   1226 // AutoScratchFloatRegister::failure().
   1227 class MOZ_RAII AutoScratchFloatRegister {
   1228  Label failurePopReg_{};
   1229  CacheIRCompiler* compiler_;
   1230  FailurePath* failure_;
   1231 
   1232  AutoScratchFloatRegister(const AutoScratchFloatRegister&) = delete;
   1233  void operator=(const AutoScratchFloatRegister&) = delete;
   1234 
   1235 public:
   1236  explicit AutoScratchFloatRegister(CacheIRCompiler* compiler)
   1237      : AutoScratchFloatRegister(compiler, nullptr) {}
   1238 
   1239  AutoScratchFloatRegister(CacheIRCompiler* compiler, FailurePath* failure);
   1240 
   1241  ~AutoScratchFloatRegister();
   1242 
   1243  Label* failure();
   1244 
   1245  FloatRegister get() const { return FloatReg0; }
   1246  operator FloatRegister() const { return FloatReg0; }
   1247 };
   1248 
   1249 // This class can be used to assert a certain FloatRegister is available. In
   1250 // Baseline mode, all float registers are available. In Ion mode, only the
   1251 // registers added as fixed temps in LIRGenerator are available.
   1252 class MOZ_RAII AutoAvailableFloatRegister {
   1253  FloatRegister reg_;
   1254 
   1255  AutoAvailableFloatRegister(const AutoAvailableFloatRegister&) = delete;
   1256  void operator=(const AutoAvailableFloatRegister&) = delete;
   1257 
   1258 public:
   1259  explicit AutoAvailableFloatRegister(CacheIRCompiler& compiler,
   1260                                      FloatRegister reg)
   1261      : reg_(reg) {
   1262 #ifdef DEBUG
   1263    compiler.assertFloatRegisterAvailable(reg);
   1264 #endif
   1265  }
   1266 
   1267  FloatRegister get() const { return reg_; }
   1268  operator FloatRegister() const { return reg_; }
   1269 };
   1270 
   1271 // For GC thing fields, map from StubField::Type to the C++ types used.
   1272 template <StubField::Type type>
   1273 struct MapStubFieldToType {};
   1274 template <>
   1275 struct MapStubFieldToType<StubField::Type::Shape> {
   1276  using RawType = Shape*;
   1277  using WrappedType = GCPtr<Shape*>;
   1278 };
   1279 template <>
   1280 struct MapStubFieldToType<StubField::Type::WeakShape> {
   1281  using RawType = Shape*;
   1282  using WrappedType = WeakHeapPtr<Shape*>;
   1283 };
   1284 template <>
   1285 struct MapStubFieldToType<StubField::Type::JSObject> {
   1286  using RawType = JSObject*;
   1287  using WrappedType = GCPtr<JSObject*>;
   1288 };
   1289 template <>
   1290 struct MapStubFieldToType<StubField::Type::WeakObject> {
   1291  using RawType = JSObject*;
   1292  using WrappedType = WeakHeapPtr<JSObject*>;
   1293 };
   1294 template <>
   1295 struct MapStubFieldToType<StubField::Type::Symbol> {
   1296  using RawType = JS::Symbol*;
   1297  using WrappedType = GCPtr<JS::Symbol*>;
   1298 };
   1299 template <>
   1300 struct MapStubFieldToType<StubField::Type::String> {
   1301  using RawType = JSString*;
   1302  using WrappedType = GCPtr<JSString*>;
   1303 };
   1304 template <>
   1305 struct MapStubFieldToType<StubField::Type::WeakBaseScript> {
   1306  using RawType = BaseScript*;
   1307  using WrappedType = WeakHeapPtr<BaseScript*>;
   1308 };
   1309 template <>
   1310 struct MapStubFieldToType<StubField::Type::JitCode> {
   1311  using RawType = JitCode*;
   1312  using WrappedType = GCPtr<JitCode*>;
   1313 };
   1314 template <>
   1315 struct MapStubFieldToType<StubField::Type::Id> {
   1316  using RawType = jsid;
   1317  using WrappedType = GCPtr<jsid>;
   1318 };
   1319 template <>
   1320 struct MapStubFieldToType<StubField::Type::Value> {
   1321  using RawType = Value;
   1322  using WrappedType = GCPtr<Value>;
   1323 };
   1324 template <>
   1325 struct MapStubFieldToType<StubField::Type::WeakValue> {
   1326  using RawType = Value;
   1327  using WrappedType = WeakHeapPtr<Value>;
   1328 };
   1329 
   1330 // See the 'Sharing Baseline stub code' comment in CacheIR.h for a description
   1331 // of this class.
   1332 //
   1333 // CacheIRStubInfo has a trailing variable-length array of bytes. The memory
   1334 // layout is as follows:
   1335 //
   1336 //   Item             | Offset
   1337 //   -----------------+--------------------------------------
   1338 //   CacheIRStubInfo  | 0
   1339 //   CacheIR bytecode | sizeof(CacheIRStubInfo)
   1340 //   Stub field types | sizeof(CacheIRStubInfo) + codeLength_
   1341 //
   1342 // The array of stub field types is terminated by StubField::Type::Limit.
   1343 class CacheIRStubInfo {
   1344  uint32_t codeLength_;
   1345  CacheKind kind_;
   1346  ICStubEngine engine_;
   1347  uint8_t stubDataOffset_;
   1348  bool makesGCCalls_;
   1349 
   1350  CacheIRStubInfo(CacheKind kind, ICStubEngine engine, bool makesGCCalls,
   1351                  uint32_t stubDataOffset, uint32_t codeLength)
   1352      : codeLength_(codeLength),
   1353        kind_(kind),
   1354        engine_(engine),
   1355        stubDataOffset_(stubDataOffset),
   1356        makesGCCalls_(makesGCCalls) {
   1357    MOZ_ASSERT(kind_ == kind, "Kind must fit in bitfield");
   1358    MOZ_ASSERT(engine_ == engine, "Engine must fit in bitfield");
   1359    MOZ_ASSERT(stubDataOffset_ == stubDataOffset,
   1360               "stubDataOffset must fit in uint8_t");
   1361  }
   1362 
   1363  CacheIRStubInfo(const CacheIRStubInfo&) = delete;
   1364  CacheIRStubInfo& operator=(const CacheIRStubInfo&) = delete;
   1365 
   1366 public:
   1367  CacheKind kind() const { return kind_; }
   1368  ICStubEngine engine() const { return engine_; }
   1369  bool makesGCCalls() const { return makesGCCalls_; }
   1370 
   1371  const uint8_t* code() const {
   1372    return reinterpret_cast<const uint8_t*>(this) + sizeof(CacheIRStubInfo);
   1373  }
   1374  uint32_t codeLength() const { return codeLength_; }
   1375  uint32_t stubDataOffset() const { return stubDataOffset_; }
   1376 
   1377  size_t stubDataSize() const;
   1378 
   1379  StubField::Type fieldType(uint32_t i) const {
   1380    static_assert(sizeof(StubField::Type) == sizeof(uint8_t));
   1381    const uint8_t* fieldTypes = code() + codeLength_;
   1382    return static_cast<StubField::Type>(fieldTypes[i]);
   1383  }
   1384 
   1385  static CacheIRStubInfo* New(CacheKind kind, ICStubEngine engine,
   1386                              bool canMakeCalls, uint32_t stubDataOffset,
   1387                              const CacheIRWriter& writer);
   1388 
   1389  template <class Stub, StubField::Type type>
   1390  typename MapStubFieldToType<type>::WrappedType& getStubField(
   1391      Stub* stub, uint32_t offset) const;
   1392 
   1393  template <class Stub, class T>
   1394  T* getPtrStubField(Stub* stub, uint32_t offset) const;
   1395 
   1396  template <StubField::Type type>
   1397  typename MapStubFieldToType<type>::WrappedType& getStubField(
   1398      ICCacheIRStub* stub, uint32_t offset) const {
   1399    return getStubField<ICCacheIRStub, type>(stub, offset);
   1400  }
   1401 
   1402  uintptr_t getStubRawWord(const uint8_t* stubData, uint32_t offset) const {
   1403    MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
   1404    return *reinterpret_cast<const uintptr_t*>(stubData + offset);
   1405  }
   1406 
   1407  uintptr_t getStubRawWord(ICCacheIRStub* stub, uint32_t offset) const {
   1408    uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
   1409    return getStubRawWord(stubData, offset);
   1410  }
   1411 
   1412  int32_t getStubRawInt32(const uint8_t* stubData, uint32_t offset) const {
   1413    MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(int32_t) == 0);
   1414    return *reinterpret_cast<const int32_t*>(stubData + offset);
   1415  }
   1416 
   1417  int32_t getStubRawInt32(ICCacheIRStub* stub, uint32_t offset) const {
   1418    uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
   1419    return getStubRawInt32(stubData, offset);
   1420  }
   1421 
   1422  int64_t getStubRawInt64(const uint8_t* stubData, uint32_t offset) const {
   1423    MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(int64_t) == 0);
   1424    return *reinterpret_cast<const int64_t*>(stubData + offset);
   1425  }
   1426 
   1427  int64_t getStubRawInt64(ICCacheIRStub* stub, uint32_t offset) const {
   1428    uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
   1429    return getStubRawInt64(stubData, offset);
   1430  }
   1431 
   1432  void replaceStubRawWord(uint8_t* stubData, uint32_t offset, uintptr_t oldWord,
   1433                          uintptr_t newWord) const;
   1434 
   1435  void replaceStubRawValueBits(uint8_t* stubData, uint32_t offset,
   1436                               uint64_t oldBits, uint64_t newBits) const;
   1437 };
   1438 
   1439 template <typename T>
   1440 void TraceCacheIRStub(JSTracer* trc, T* stub, const CacheIRStubInfo* stubInfo);
   1441 
   1442 template <typename T>
   1443 bool TraceWeakCacheIRStub(JSTracer* trc, T* stub,
   1444                          const CacheIRStubInfo* stubInfo);
   1445 
   1446 }  // namespace jit
   1447 }  // namespace js
   1448 
   1449 #endif /* jit_CacheIRCompiler_h */