tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

CodeGenerator.cpp (805170B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/CodeGenerator.h"
      8 
      9 #include "mozilla/Assertions.h"
     10 #include "mozilla/CheckedArithmetic.h"
     11 #include "mozilla/DebugOnly.h"
     12 #include "mozilla/EndianUtils.h"
     13 #include "mozilla/EnumeratedArray.h"
     14 #include "mozilla/EnumeratedRange.h"
     15 #include "mozilla/EnumSet.h"
     16 #include "mozilla/IntegerTypeTraits.h"
     17 #include "mozilla/Latin1.h"
     18 #include "mozilla/MathAlgorithms.h"
     19 #include "mozilla/ScopeExit.h"
     20 #include "mozilla/SIMD.h"
     21 
     22 #include <algorithm>
     23 #include <cmath>
     24 #include <limits>
     25 #include <type_traits>
     26 #include <utility>
     27 
     28 #include "jslibmath.h"
     29 #include "jsmath.h"
     30 #include "jsnum.h"
     31 
     32 #include "builtin/MapObject.h"
     33 #include "builtin/RegExp.h"
     34 #include "builtin/String.h"
     35 #include "irregexp/RegExpTypes.h"
     36 #include "jit/ABIArgGenerator.h"
     37 #include "jit/CompileInfo.h"
     38 #include "jit/InlineScriptTree.h"
     39 #include "jit/Invalidation.h"
     40 #include "jit/IonGenericCallStub.h"
     41 #include "jit/IonIC.h"
     42 #include "jit/IonScript.h"
     43 #include "jit/JitcodeMap.h"
     44 #include "jit/JitFrames.h"
     45 #include "jit/JitRuntime.h"
     46 #include "jit/JitSpewer.h"
     47 #include "jit/JitZone.h"
     48 #include "jit/Linker.h"
     49 #include "jit/MIRGenerator.h"
     50 #include "jit/MoveEmitter.h"
     51 #include "jit/RangeAnalysis.h"
     52 #include "jit/RegExpStubConstants.h"
     53 #include "jit/SafepointIndex.h"
     54 #include "jit/SharedICHelpers.h"
     55 #include "jit/SharedICRegisters.h"
     56 #include "jit/VMFunctions.h"
     57 #include "jit/WarpSnapshot.h"
     58 #include "js/ColumnNumber.h"  // JS::LimitedColumnNumberOneOrigin
     59 #include "js/experimental/JitInfo.h"  // JSJit{Getter,Setter}CallArgs, JSJitMethodCallArgsTraits, JSJitInfo
     60 #include "js/friend/DOMProxy.h"  // JS::ExpandoAndGeneration
     61 #include "js/RegExpFlags.h"      // JS::RegExpFlag
     62 #include "js/ScalarType.h"       // js::Scalar::Type
     63 #include "proxy/DOMProxy.h"
     64 #include "proxy/ScriptedProxyHandler.h"
     65 #include "util/DifferentialTesting.h"
     66 #include "util/Unicode.h"
     67 #include "vm/ArrayBufferViewObject.h"
     68 #include "vm/AsyncFunction.h"
     69 #include "vm/AsyncIteration.h"
     70 #include "vm/BuiltinObjectKind.h"
     71 #include "vm/FunctionFlags.h"  // js::FunctionFlags
     72 #include "vm/Interpreter.h"
     73 #include "vm/JSAtomUtils.h"  // AtomizeString
     74 #include "vm/MatchPairs.h"
     75 #include "vm/RegExpObject.h"
     76 #include "vm/RegExpStatics.h"
     77 #include "vm/RuntimeFuses.h"
     78 #include "vm/StaticStrings.h"
     79 #include "vm/StringObject.h"
     80 #include "vm/StringType.h"
     81 #include "vm/TypedArrayObject.h"
     82 #include "wasm/WasmCodegenConstants.h"
     83 #include "wasm/WasmPI.h"
     84 #include "wasm/WasmValType.h"
     85 #ifdef MOZ_VTUNE
     86 #  include "vtune/VTuneWrapper.h"
     87 #endif
     88 #include "wasm/WasmBinary.h"
     89 #include "wasm/WasmGC.h"
     90 #include "wasm/WasmGcObject.h"
     91 #include "wasm/WasmStubs.h"
     92 
     93 #include "builtin/Boolean-inl.h"
     94 #include "jit/MacroAssembler-inl.h"
     95 #include "jit/shared/CodeGenerator-shared-inl.h"
     96 #include "jit/TemplateObject-inl.h"
     97 #include "jit/VMFunctionList-inl.h"
     98 #include "vm/BytecodeUtil-inl.h"
     99 #include "vm/JSScript-inl.h"
    100 #include "wasm/WasmInstance-inl.h"
    101 
    102 using namespace js;
    103 using namespace js::jit;
    104 
    105 using mozilla::CheckedUint32;
    106 using mozilla::DebugOnly;
    107 using mozilla::FloatingPoint;
    108 using mozilla::NegativeInfinity;
    109 using mozilla::PositiveInfinity;
    110 
    111 using JS::ExpandoAndGeneration;
    112 
    113 namespace js {
    114 namespace jit {
    115 
    116 #ifdef CHECK_OSIPOINT_REGISTERS
    117 template <class Op>
    118 static void HandleRegisterDump(Op op, MacroAssembler& masm,
    119                               LiveRegisterSet liveRegs, Register activation,
    120                               Register scratch) {
    121  const size_t baseOffset = JitActivation::offsetOfRegs();
    122 
    123  // Handle live GPRs.
    124  for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
    125    Register reg = *iter;
    126    Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
    127 
    128    if (reg == activation) {
    129      // To use the original value of the activation register (that's
    130      // now on top of the stack), we need the scratch register.
    131      masm.push(scratch);
    132      masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
    133      op(scratch, dump);
    134      masm.pop(scratch);
    135    } else {
    136      op(reg, dump);
    137    }
    138  }
    139 
    140  // Handle live FPRs.
    141  for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
    142    FloatRegister reg = *iter;
    143    Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
    144    op(reg, dump);
    145  }
    146 }
    147 
    148 class StoreOp {
    149  MacroAssembler& masm;
    150 
    151 public:
    152  explicit StoreOp(MacroAssembler& masm) : masm(masm) {}
    153 
    154  void operator()(Register reg, Address dump) { masm.storePtr(reg, dump); }
    155  void operator()(FloatRegister reg, Address dump) {
    156    if (reg.isDouble()) {
    157      masm.storeDouble(reg, dump);
    158    } else if (reg.isSingle()) {
    159      masm.storeFloat32(reg, dump);
    160    } else if (reg.isSimd128()) {
    161      MOZ_CRASH("Unexpected case for SIMD");
    162    } else {
    163      MOZ_CRASH("Unexpected register type.");
    164    }
    165  }
    166 };
    167 
    168 class VerifyOp {
    169  MacroAssembler& masm;
    170  Label* failure_;
    171 
    172 public:
    173  VerifyOp(MacroAssembler& masm, Label* failure)
    174      : masm(masm), failure_(failure) {}
    175 
    176  void operator()(Register reg, Address dump) {
    177    masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
    178  }
    179  void operator()(FloatRegister reg, Address dump) {
    180    if (reg.isDouble()) {
    181      ScratchDoubleScope scratch(masm);
    182      masm.loadDouble(dump, scratch);
    183      masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
    184    } else if (reg.isSingle()) {
    185      ScratchFloat32Scope scratch(masm);
    186      masm.loadFloat32(dump, scratch);
    187      masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
    188    } else if (reg.isSimd128()) {
    189      MOZ_CRASH("Unexpected case for SIMD");
    190    } else {
    191      MOZ_CRASH("Unexpected register type.");
    192    }
    193  }
    194 };
    195 
    196 void CodeGenerator::verifyOsiPointRegs(LSafepoint* safepoint) {
    197  // Ensure the live registers stored by callVM did not change between
    198  // the call and this OsiPoint. Try-catch relies on this invariant.
    199 
    200  // Load pointer to the JitActivation in a scratch register.
    201  AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
    202  Register scratch = allRegs.takeAny();
    203  masm.push(scratch);
    204  masm.loadJitActivation(scratch);
    205 
    206  // If we should not check registers (because the instruction did not call
    207  // into the VM, or a GC happened), we're done.
    208  Label failure, done;
    209  Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
    210  masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
    211 
    212  // Having more than one VM function call made in one visit function at
    213  // runtime is a sec-ciritcal error, because if we conservatively assume that
    214  // one of the function call can re-enter Ion, then the invalidation process
    215  // will potentially add a call at a random location, by patching the code
    216  // before the return address.
    217  masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
    218 
    219  // Set checkRegs to 0, so that we don't try to verify registers after we
    220  // return from this script to the caller.
    221  masm.store32(Imm32(0), checkRegs);
    222 
    223  // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
    224  // temps after calling into the VM. This is fine because no other
    225  // instructions (including this OsiPoint) will depend on them. Also
    226  // backtracking can also use the same register for an input and an output.
    227  // These are marked as clobbered and shouldn't get checked.
    228  LiveRegisterSet liveRegs;
    229  liveRegs.set() = RegisterSet::Intersect(
    230      safepoint->liveRegs().set(),
    231      RegisterSet::Not(safepoint->clobberedRegs().set()));
    232 
    233  VerifyOp op(masm, &failure);
    234  HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
    235 
    236  masm.jump(&done);
    237 
    238  // Do not profile the callWithABI that occurs below.  This is to avoid a
    239  // rare corner case that occurs when profiling interacts with itself:
    240  //
    241  // When slow profiling assertions are turned on, FunctionBoundary ops
    242  // (which update the profiler pseudo-stack) may emit a callVM, which
    243  // forces them to have an osi point associated with them.  The
    244  // FunctionBoundary for inline function entry is added to the caller's
    245  // graph with a PC from the caller's code, but during codegen it modifies
    246  // Gecko Profiler instrumentation to add the callee as the current top-most
    247  // script. When codegen gets to the OSIPoint, and the callWithABI below is
    248  // emitted, the codegen thinks that the current frame is the callee, but
    249  // the PC it's using from the OSIPoint refers to the caller.  This causes
    250  // the profiler instrumentation of the callWithABI below to ASSERT, since
    251  // the script and pc are mismatched.  To avoid this, we simply omit
    252  // instrumentation for these callWithABIs.
    253 
    254  // Any live register captured by a safepoint (other than temp registers)
    255  // must remain unchanged between the call and the OsiPoint instruction.
    256  masm.bind(&failure);
    257  masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
    258 
    259  masm.bind(&done);
    260  masm.pop(scratch);
    261 }
    262 
    263 bool CodeGenerator::shouldVerifyOsiPointRegs(LSafepoint* safepoint) {
    264  if (!checkOsiPointRegisters) {
    265    return false;
    266  }
    267 
    268  if (safepoint->liveRegs().emptyGeneral() &&
    269      safepoint->liveRegs().emptyFloat()) {
    270    return false;  // No registers to check.
    271  }
    272 
    273  return true;
    274 }
    275 
    276 void CodeGenerator::resetOsiPointRegs(LSafepoint* safepoint) {
    277  if (!shouldVerifyOsiPointRegs(safepoint)) {
    278    return;
    279  }
    280 
    281  // Set checkRegs to 0. If we perform a VM call, the instruction
    282  // will set it to 1.
    283  AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
    284  Register scratch = allRegs.takeAny();
    285  masm.push(scratch);
    286  masm.loadJitActivation(scratch);
    287  Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
    288  masm.store32(Imm32(0), checkRegs);
    289  masm.pop(scratch);
    290 }
    291 
    292 static void StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs) {
    293  // Store a copy of all live registers before performing the call.
    294  // When we reach the OsiPoint, we can use this to check nothing
    295  // modified them in the meantime.
    296 
    297  // Load pointer to the JitActivation in a scratch register.
    298  AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
    299  Register scratch = allRegs.takeAny();
    300  masm.push(scratch);
    301  masm.loadJitActivation(scratch);
    302 
    303  Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
    304  masm.add32(Imm32(1), checkRegs);
    305 
    306  StoreOp op(masm);
    307  HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
    308 
    309  masm.pop(scratch);
    310 }
    311 #endif  // CHECK_OSIPOINT_REGISTERS
    312 
    313 // Before doing any call to Cpp, you should ensure that volatile
    314 // registers are evicted by the register allocator.
    315 void CodeGenerator::callVMInternal(VMFunctionId id, LInstruction* ins) {
    316  TrampolinePtr code = gen->jitRuntime()->getVMWrapper(id);
    317  const VMFunctionData& fun = GetVMFunction(id);
    318 
    319  // Stack is:
    320  //    ... frame ...
    321  //    [args]
    322 #ifdef DEBUG
    323  MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
    324  pushedArgs_ = 0;
    325 #endif
    326 
    327 #ifdef CHECK_OSIPOINT_REGISTERS
    328  if (shouldVerifyOsiPointRegs(ins->safepoint())) {
    329    StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
    330  }
    331 #endif
    332 
    333 #ifdef DEBUG
    334  if (ins->mirRaw()) {
    335    MOZ_ASSERT(ins->mirRaw()->isInstruction());
    336    MInstruction* mir = ins->mirRaw()->toInstruction();
    337    MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
    338 
    339    // If this MIR instruction has an overridden AliasSet, set the JitRuntime's
    340    // disallowArbitraryCode_ flag so we can assert this VMFunction doesn't call
    341    // RunScript. Whitelist MInterruptCheck and MCheckOverRecursed because
    342    // interrupt callbacks can call JS (chrome JS or shell testing functions).
    343    bool isWhitelisted = mir->isInterruptCheck() || mir->isCheckOverRecursed();
    344    if (!mir->hasDefaultAliasSet() && !isWhitelisted) {
    345      const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
    346      masm.move32(Imm32(1), ReturnReg);
    347      masm.store32(ReturnReg, AbsoluteAddress(addr));
    348    }
    349  }
    350 #endif
    351 
    352  // Push an exit frame descriptor.
    353  masm.Push(FrameDescriptor(FrameType::IonJS));
    354 
    355  // Call the wrapper function.  The wrapper is in charge to unwind the stack
    356  // when returning from the call.  Failures are handled with exceptions based
    357  // on the return value of the C functions.  To guard the outcome of the
    358  // returned value, use another LIR instruction.
    359  ensureOsiSpace();
    360  uint32_t callOffset = masm.callJit(code);
    361  markSafepointAt(callOffset, ins);
    362 
    363 #ifdef DEBUG
    364  // Reset the disallowArbitraryCode flag after the call.
    365  {
    366    const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
    367    masm.push(ReturnReg);
    368    masm.move32(Imm32(0), ReturnReg);
    369    masm.store32(ReturnReg, AbsoluteAddress(addr));
    370    masm.pop(ReturnReg);
    371  }
    372 #endif
    373 
    374  // Pop rest of the exit frame and the arguments left on the stack.
    375  int framePop =
    376      sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
    377  masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
    378 
    379  // Stack is:
    380  //    ... frame ...
    381 }
    382 
    383 template <typename Fn, Fn fn>
    384 void CodeGenerator::callVM(LInstruction* ins) {
    385  VMFunctionId id = VMFunctionToId<Fn, fn>::id;
    386  callVMInternal(id, ins);
    387 }
    388 
    389 // ArgSeq store arguments for OutOfLineCallVM.
    390 //
    391 // OutOfLineCallVM are created with "oolCallVM" function. The third argument of
    392 // this function is an instance of a class which provides a "generate" in charge
    393 // of pushing the argument, with "pushArg", for a VMFunction.
    394 //
    395 // Such list of arguments can be created by using the "ArgList" function which
    396 // creates one instance of "ArgSeq", where the type of the arguments are
    397 // inferred from the type of the arguments.
    398 //
    399 // The list of arguments must be written in the same order as if you were
    400 // calling the function in C++.
    401 //
    402 // Example:
    403 //   ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
    404 
    405 template <typename... ArgTypes>
    406 class ArgSeq {
    407  std::tuple<std::remove_reference_t<ArgTypes>...> args_;
    408 
    409  template <std::size_t... ISeq>
    410  inline void generate(CodeGenerator* codegen,
    411                       std::index_sequence<ISeq...>) const {
    412    // Arguments are pushed in reverse order, from last argument to first
    413    // argument.
    414    (codegen->pushArg(std::get<sizeof...(ISeq) - 1 - ISeq>(args_)), ...);
    415  }
    416 
    417 public:
    418  explicit ArgSeq(ArgTypes&&... args)
    419      : args_(std::forward<ArgTypes>(args)...) {}
    420 
    421  inline void generate(CodeGenerator* codegen) const {
    422    generate(codegen, std::index_sequence_for<ArgTypes...>{});
    423  }
    424 
    425 #ifdef DEBUG
    426  static constexpr size_t numArgs = sizeof...(ArgTypes);
    427 #endif
    428 };
    429 
    430 template <typename... ArgTypes>
    431 inline ArgSeq<ArgTypes...> ArgList(ArgTypes&&... args) {
    432  return ArgSeq<ArgTypes...>(std::forward<ArgTypes>(args)...);
    433 }
    434 
    435 // Store wrappers, to generate the right move of data after the VM call.
    436 
    437 struct StoreNothing {
    438  inline void generate(CodeGenerator* codegen) const {}
    439  inline LiveRegisterSet clobbered() const {
    440    return LiveRegisterSet();  // No register gets clobbered
    441  }
    442 };
    443 
    444 class StoreRegisterTo {
    445 private:
    446  Register out_;
    447 
    448 public:
    449  explicit StoreRegisterTo(Register out) : out_(out) {}
    450 
    451  inline void generate(CodeGenerator* codegen) const {
    452    // It's okay to use storePointerResultTo here - the VMFunction wrapper
    453    // ensures the upper bytes are zero for bool/int32 return values.
    454    codegen->storePointerResultTo(out_);
    455  }
    456  inline LiveRegisterSet clobbered() const {
    457    LiveRegisterSet set;
    458    set.add(out_);
    459    return set;
    460  }
    461 };
    462 
    463 class StoreFloatRegisterTo {
    464 private:
    465  FloatRegister out_;
    466 
    467 public:
    468  explicit StoreFloatRegisterTo(FloatRegister out) : out_(out) {}
    469 
    470  inline void generate(CodeGenerator* codegen) const {
    471    codegen->storeFloatResultTo(out_);
    472  }
    473  inline LiveRegisterSet clobbered() const {
    474    LiveRegisterSet set;
    475    set.add(out_);
    476    return set;
    477  }
    478 };
    479 
    480 template <typename Output>
    481 class StoreValueTo_ {
    482 private:
    483  Output out_;
    484 
    485 public:
    486  explicit StoreValueTo_(const Output& out) : out_(out) {}
    487 
    488  inline void generate(CodeGenerator* codegen) const {
    489    codegen->storeResultValueTo(out_);
    490  }
    491  inline LiveRegisterSet clobbered() const {
    492    LiveRegisterSet set;
    493    set.add(out_);
    494    return set;
    495  }
    496 };
    497 
    498 template <typename Output>
    499 StoreValueTo_<Output> StoreValueTo(const Output& out) {
    500  return StoreValueTo_<Output>(out);
    501 }
    502 
    503 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
    504 class OutOfLineCallVM : public OutOfLineCodeBase<CodeGenerator> {
    505 private:
    506  LInstruction* lir_;
    507  ArgSeq args_;
    508  StoreOutputTo out_;
    509 
    510 public:
    511  OutOfLineCallVM(LInstruction* lir, const ArgSeq& args,
    512                  const StoreOutputTo& out)
    513      : lir_(lir), args_(args), out_(out) {}
    514 
    515  void accept(CodeGenerator* codegen) override {
    516    codegen->visitOutOfLineCallVM(this);
    517  }
    518 
    519  LInstruction* lir() const { return lir_; }
    520  const ArgSeq& args() const { return args_; }
    521  const StoreOutputTo& out() const { return out_; }
    522 };
    523 
    524 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
    525 OutOfLineCode* CodeGenerator::oolCallVM(LInstruction* lir, const ArgSeq& args,
    526                                        const StoreOutputTo& out) {
    527  MOZ_ASSERT(lir->mirRaw());
    528  MOZ_ASSERT(lir->mirRaw()->isInstruction());
    529 
    530 #ifdef DEBUG
    531  VMFunctionId id = VMFunctionToId<Fn, fn>::id;
    532  const VMFunctionData& fun = GetVMFunction(id);
    533  MOZ_ASSERT(fun.explicitArgs == args.numArgs);
    534  MOZ_ASSERT(fun.returnsData() !=
    535             (std::is_same_v<StoreOutputTo, StoreNothing>));
    536 #endif
    537 
    538  OutOfLineCode* ool = new (alloc())
    539      OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>(lir, args, out);
    540  addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
    541  return ool;
    542 }
    543 
    544 template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
    545 void CodeGenerator::visitOutOfLineCallVM(
    546    OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool) {
    547  LInstruction* lir = ool->lir();
    548 
    549 #ifdef JS_JITSPEW
    550  JitSpewStart(JitSpew_Codegen, "                                # LIR=%s",
    551               lir->opName());
    552  if (const char* extra = lir->getExtraName()) {
    553    JitSpewCont(JitSpew_Codegen, ":%s", extra);
    554  }
    555  JitSpewFin(JitSpew_Codegen);
    556 #endif
    557  perfSpewer().recordInstruction(masm, lir);
    558  if (!lir->isCall()) {
    559    saveLive(lir);
    560  }
    561  ool->args().generate(this);
    562  callVM<Fn, fn>(lir);
    563  ool->out().generate(this);
    564  if (!lir->isCall()) {
    565    restoreLiveIgnore(lir, ool->out().clobbered());
    566  }
    567  masm.jump(ool->rejoin());
    568 }
    569 
    570 class OutOfLineICFallback : public OutOfLineCodeBase<CodeGenerator> {
    571 private:
    572  LInstruction* lir_;
    573  size_t cacheIndex_;
    574  size_t cacheInfoIndex_;
    575 
    576 public:
    577  OutOfLineICFallback(LInstruction* lir, size_t cacheIndex,
    578                      size_t cacheInfoIndex)
    579      : lir_(lir), cacheIndex_(cacheIndex), cacheInfoIndex_(cacheInfoIndex) {}
    580 
    581  void bind(MacroAssembler* masm) override {
    582    // The binding of the initial jump is done in
    583    // CodeGenerator::visitOutOfLineICFallback.
    584  }
    585 
    586  size_t cacheIndex() const { return cacheIndex_; }
    587  size_t cacheInfoIndex() const { return cacheInfoIndex_; }
    588  LInstruction* lir() const { return lir_; }
    589 
    590  void accept(CodeGenerator* codegen) override {
    591    codegen->visitOutOfLineICFallback(this);
    592  }
    593 };
    594 
    595 void CodeGeneratorShared::addIC(LInstruction* lir, size_t cacheIndex) {
    596  if (cacheIndex == SIZE_MAX) {
    597    masm.setOOM();
    598    return;
    599  }
    600 
    601  DataPtr<IonIC> cache(this, cacheIndex);
    602  MInstruction* mir = lir->mirRaw()->toInstruction();
    603  cache->setScriptedLocation(mir->block()->info().script(),
    604                             mir->resumePoint()->pc());
    605 
    606  Register temp = cache->scratchRegisterForEntryJump();
    607  icInfo_.back().icOffsetForJump = masm.movWithPatch(ImmWord(-1), temp);
    608  masm.jump(Address(temp, 0));
    609 
    610  MOZ_ASSERT(!icInfo_.empty());
    611 
    612  OutOfLineICFallback* ool =
    613      new (alloc()) OutOfLineICFallback(lir, cacheIndex, icInfo_.length() - 1);
    614  addOutOfLineCode(ool, mir);
    615 
    616  masm.bind(ool->rejoin());
    617  cache->setRejoinOffset(CodeOffset(ool->rejoin()->offset()));
    618 }
    619 
    620 void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
    621  LInstruction* lir = ool->lir();
    622  size_t cacheIndex = ool->cacheIndex();
    623  size_t cacheInfoIndex = ool->cacheInfoIndex();
    624 
    625  DataPtr<IonIC> ic(this, cacheIndex);
    626 
    627  // Register the location of the OOL path in the IC.
    628  ic->setFallbackOffset(CodeOffset(masm.currentOffset()));
    629 
    630  switch (ic->kind()) {
    631    case CacheKind::GetProp:
    632    case CacheKind::GetElem: {
    633      IonGetPropertyIC* getPropIC = ic->asGetPropertyIC();
    634 
    635      saveLive(lir);
    636 
    637      pushArg(getPropIC->id());
    638      pushArg(getPropIC->value());
    639      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    640      pushArg(ImmGCPtr(gen->outerInfo().script()));
    641 
    642      using Fn = bool (*)(JSContext*, HandleScript, IonGetPropertyIC*,
    643                          HandleValue, HandleValue, MutableHandleValue);
    644      callVM<Fn, IonGetPropertyIC::update>(lir);
    645 
    646      StoreValueTo(getPropIC->output()).generate(this);
    647      restoreLiveIgnore(lir, StoreValueTo(getPropIC->output()).clobbered());
    648 
    649      masm.jump(ool->rejoin());
    650      return;
    651    }
    652    case CacheKind::GetPropSuper:
    653    case CacheKind::GetElemSuper: {
    654      IonGetPropSuperIC* getPropSuperIC = ic->asGetPropSuperIC();
    655 
    656      saveLive(lir);
    657 
    658      pushArg(getPropSuperIC->id());
    659      pushArg(getPropSuperIC->receiver());
    660      pushArg(getPropSuperIC->object());
    661      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    662      pushArg(ImmGCPtr(gen->outerInfo().script()));
    663 
    664      using Fn =
    665          bool (*)(JSContext*, HandleScript, IonGetPropSuperIC*, HandleObject,
    666                   HandleValue, HandleValue, MutableHandleValue);
    667      callVM<Fn, IonGetPropSuperIC::update>(lir);
    668 
    669      StoreValueTo(getPropSuperIC->output()).generate(this);
    670      restoreLiveIgnore(lir,
    671                        StoreValueTo(getPropSuperIC->output()).clobbered());
    672 
    673      masm.jump(ool->rejoin());
    674      return;
    675    }
    676    case CacheKind::SetProp:
    677    case CacheKind::SetElem: {
    678      IonSetPropertyIC* setPropIC = ic->asSetPropertyIC();
    679 
    680      saveLive(lir);
    681 
    682      pushArg(setPropIC->rhs());
    683      pushArg(setPropIC->id());
    684      pushArg(setPropIC->object());
    685      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    686      pushArg(ImmGCPtr(gen->outerInfo().script()));
    687 
    688      using Fn = bool (*)(JSContext*, HandleScript, IonSetPropertyIC*,
    689                          HandleObject, HandleValue, HandleValue);
    690      callVM<Fn, IonSetPropertyIC::update>(lir);
    691 
    692      restoreLive(lir);
    693 
    694      masm.jump(ool->rejoin());
    695      return;
    696    }
    697    case CacheKind::GetName: {
    698      IonGetNameIC* getNameIC = ic->asGetNameIC();
    699 
    700      saveLive(lir);
    701 
    702      pushArg(getNameIC->environment());
    703      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    704      pushArg(ImmGCPtr(gen->outerInfo().script()));
    705 
    706      using Fn = bool (*)(JSContext*, HandleScript, IonGetNameIC*, HandleObject,
    707                          MutableHandleValue);
    708      callVM<Fn, IonGetNameIC::update>(lir);
    709 
    710      StoreValueTo(getNameIC->output()).generate(this);
    711      restoreLiveIgnore(lir, StoreValueTo(getNameIC->output()).clobbered());
    712 
    713      masm.jump(ool->rejoin());
    714      return;
    715    }
    716    case CacheKind::BindName: {
    717      IonBindNameIC* bindNameIC = ic->asBindNameIC();
    718 
    719      saveLive(lir);
    720 
    721      pushArg(bindNameIC->environment());
    722      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    723      pushArg(ImmGCPtr(gen->outerInfo().script()));
    724 
    725      using Fn =
    726          JSObject* (*)(JSContext*, HandleScript, IonBindNameIC*, HandleObject);
    727      callVM<Fn, IonBindNameIC::update>(lir);
    728 
    729      StoreRegisterTo(bindNameIC->output()).generate(this);
    730      restoreLiveIgnore(lir, StoreRegisterTo(bindNameIC->output()).clobbered());
    731 
    732      masm.jump(ool->rejoin());
    733      return;
    734    }
    735    case CacheKind::GetIterator: {
    736      IonGetIteratorIC* getIteratorIC = ic->asGetIteratorIC();
    737 
    738      saveLive(lir);
    739 
    740      pushArg(getIteratorIC->value());
    741      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    742      pushArg(ImmGCPtr(gen->outerInfo().script()));
    743 
    744      using Fn = JSObject* (*)(JSContext*, HandleScript, IonGetIteratorIC*,
    745                               HandleValue);
    746      callVM<Fn, IonGetIteratorIC::update>(lir);
    747 
    748      StoreRegisterTo(getIteratorIC->output()).generate(this);
    749      restoreLiveIgnore(lir,
    750                        StoreRegisterTo(getIteratorIC->output()).clobbered());
    751 
    752      masm.jump(ool->rejoin());
    753      return;
    754    }
    755    case CacheKind::OptimizeSpreadCall: {
    756      auto* optimizeSpreadCallIC = ic->asOptimizeSpreadCallIC();
    757 
    758      saveLive(lir);
    759 
    760      pushArg(optimizeSpreadCallIC->value());
    761      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    762      pushArg(ImmGCPtr(gen->outerInfo().script()));
    763 
    764      using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeSpreadCallIC*,
    765                          HandleValue, MutableHandleValue);
    766      callVM<Fn, IonOptimizeSpreadCallIC::update>(lir);
    767 
    768      StoreValueTo(optimizeSpreadCallIC->output()).generate(this);
    769      restoreLiveIgnore(
    770          lir, StoreValueTo(optimizeSpreadCallIC->output()).clobbered());
    771 
    772      masm.jump(ool->rejoin());
    773      return;
    774    }
    775    case CacheKind::In: {
    776      IonInIC* inIC = ic->asInIC();
    777 
    778      saveLive(lir);
    779 
    780      pushArg(inIC->object());
    781      pushArg(inIC->key());
    782      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    783      pushArg(ImmGCPtr(gen->outerInfo().script()));
    784 
    785      using Fn = bool (*)(JSContext*, HandleScript, IonInIC*, HandleValue,
    786                          HandleObject, bool*);
    787      callVM<Fn, IonInIC::update>(lir);
    788 
    789      StoreRegisterTo(inIC->output()).generate(this);
    790      restoreLiveIgnore(lir, StoreRegisterTo(inIC->output()).clobbered());
    791 
    792      masm.jump(ool->rejoin());
    793      return;
    794    }
    795    case CacheKind::HasOwn: {
    796      IonHasOwnIC* hasOwnIC = ic->asHasOwnIC();
    797 
    798      saveLive(lir);
    799 
    800      pushArg(hasOwnIC->id());
    801      pushArg(hasOwnIC->value());
    802      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    803      pushArg(ImmGCPtr(gen->outerInfo().script()));
    804 
    805      using Fn = bool (*)(JSContext*, HandleScript, IonHasOwnIC*, HandleValue,
    806                          HandleValue, int32_t*);
    807      callVM<Fn, IonHasOwnIC::update>(lir);
    808 
    809      StoreRegisterTo(hasOwnIC->output()).generate(this);
    810      restoreLiveIgnore(lir, StoreRegisterTo(hasOwnIC->output()).clobbered());
    811 
    812      masm.jump(ool->rejoin());
    813      return;
    814    }
    815    case CacheKind::CheckPrivateField: {
    816      IonCheckPrivateFieldIC* checkPrivateFieldIC = ic->asCheckPrivateFieldIC();
    817 
    818      saveLive(lir);
    819 
    820      pushArg(checkPrivateFieldIC->id());
    821      pushArg(checkPrivateFieldIC->value());
    822 
    823      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    824      pushArg(ImmGCPtr(gen->outerInfo().script()));
    825 
    826      using Fn = bool (*)(JSContext*, HandleScript, IonCheckPrivateFieldIC*,
    827                          HandleValue, HandleValue, bool*);
    828      callVM<Fn, IonCheckPrivateFieldIC::update>(lir);
    829 
    830      StoreRegisterTo(checkPrivateFieldIC->output()).generate(this);
    831      restoreLiveIgnore(
    832          lir, StoreRegisterTo(checkPrivateFieldIC->output()).clobbered());
    833 
    834      masm.jump(ool->rejoin());
    835      return;
    836    }
    837    case CacheKind::InstanceOf: {
    838      IonInstanceOfIC* hasInstanceOfIC = ic->asInstanceOfIC();
    839 
    840      saveLive(lir);
    841 
    842      pushArg(hasInstanceOfIC->rhs());
    843      pushArg(hasInstanceOfIC->lhs());
    844      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    845      pushArg(ImmGCPtr(gen->outerInfo().script()));
    846 
    847      using Fn = bool (*)(JSContext*, HandleScript, IonInstanceOfIC*,
    848                          HandleValue lhs, HandleObject rhs, bool* res);
    849      callVM<Fn, IonInstanceOfIC::update>(lir);
    850 
    851      StoreRegisterTo(hasInstanceOfIC->output()).generate(this);
    852      restoreLiveIgnore(lir,
    853                        StoreRegisterTo(hasInstanceOfIC->output()).clobbered());
    854 
    855      masm.jump(ool->rejoin());
    856      return;
    857    }
    858    case CacheKind::UnaryArith: {
    859      IonUnaryArithIC* unaryArithIC = ic->asUnaryArithIC();
    860 
    861      saveLive(lir);
    862 
    863      pushArg(unaryArithIC->input());
    864      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    865      pushArg(ImmGCPtr(gen->outerInfo().script()));
    866 
    867      using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
    868                          IonUnaryArithIC* stub, HandleValue val,
    869                          MutableHandleValue res);
    870      callVM<Fn, IonUnaryArithIC::update>(lir);
    871 
    872      StoreValueTo(unaryArithIC->output()).generate(this);
    873      restoreLiveIgnore(lir, StoreValueTo(unaryArithIC->output()).clobbered());
    874 
    875      masm.jump(ool->rejoin());
    876      return;
    877    }
    878    case CacheKind::ToPropertyKey: {
    879      IonToPropertyKeyIC* toPropertyKeyIC = ic->asToPropertyKeyIC();
    880 
    881      saveLive(lir);
    882 
    883      pushArg(toPropertyKeyIC->input());
    884      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    885      pushArg(ImmGCPtr(gen->outerInfo().script()));
    886 
    887      using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
    888                          IonToPropertyKeyIC* ic, HandleValue val,
    889                          MutableHandleValue res);
    890      callVM<Fn, IonToPropertyKeyIC::update>(lir);
    891 
    892      StoreValueTo(toPropertyKeyIC->output()).generate(this);
    893      restoreLiveIgnore(lir,
    894                        StoreValueTo(toPropertyKeyIC->output()).clobbered());
    895 
    896      masm.jump(ool->rejoin());
    897      return;
    898    }
    899    case CacheKind::BinaryArith: {
    900      IonBinaryArithIC* binaryArithIC = ic->asBinaryArithIC();
    901 
    902      saveLive(lir);
    903 
    904      pushArg(binaryArithIC->rhs());
    905      pushArg(binaryArithIC->lhs());
    906      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    907      pushArg(ImmGCPtr(gen->outerInfo().script()));
    908 
    909      using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
    910                          IonBinaryArithIC* stub, HandleValue lhs,
    911                          HandleValue rhs, MutableHandleValue res);
    912      callVM<Fn, IonBinaryArithIC::update>(lir);
    913 
    914      StoreValueTo(binaryArithIC->output()).generate(this);
    915      restoreLiveIgnore(lir, StoreValueTo(binaryArithIC->output()).clobbered());
    916 
    917      masm.jump(ool->rejoin());
    918      return;
    919    }
    920    case CacheKind::Compare: {
    921      IonCompareIC* compareIC = ic->asCompareIC();
    922 
    923      saveLive(lir);
    924 
    925      pushArg(compareIC->rhs());
    926      pushArg(compareIC->lhs());
    927      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    928      pushArg(ImmGCPtr(gen->outerInfo().script()));
    929 
    930      using Fn =
    931          bool (*)(JSContext* cx, HandleScript outerScript, IonCompareIC* stub,
    932                   HandleValue lhs, HandleValue rhs, bool* res);
    933      callVM<Fn, IonCompareIC::update>(lir);
    934 
    935      StoreRegisterTo(compareIC->output()).generate(this);
    936      restoreLiveIgnore(lir, StoreRegisterTo(compareIC->output()).clobbered());
    937 
    938      masm.jump(ool->rejoin());
    939      return;
    940    }
    941    case CacheKind::CloseIter: {
    942      IonCloseIterIC* closeIterIC = ic->asCloseIterIC();
    943 
    944      saveLive(lir);
    945 
    946      pushArg(closeIterIC->iter());
    947      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    948      pushArg(ImmGCPtr(gen->outerInfo().script()));
    949 
    950      using Fn =
    951          bool (*)(JSContext*, HandleScript, IonCloseIterIC*, HandleObject);
    952      callVM<Fn, IonCloseIterIC::update>(lir);
    953 
    954      restoreLive(lir);
    955 
    956      masm.jump(ool->rejoin());
    957      return;
    958    }
    959    case CacheKind::OptimizeGetIterator: {
    960      auto* optimizeGetIteratorIC = ic->asOptimizeGetIteratorIC();
    961 
    962      saveLive(lir);
    963 
    964      pushArg(optimizeGetIteratorIC->value());
    965      icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
    966      pushArg(ImmGCPtr(gen->outerInfo().script()));
    967 
    968      using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeGetIteratorIC*,
    969                          HandleValue, bool* res);
    970      callVM<Fn, IonOptimizeGetIteratorIC::update>(lir);
    971 
    972      StoreRegisterTo(optimizeGetIteratorIC->output()).generate(this);
    973      restoreLiveIgnore(
    974          lir, StoreRegisterTo(optimizeGetIteratorIC->output()).clobbered());
    975 
    976      masm.jump(ool->rejoin());
    977      return;
    978    }
    979    case CacheKind::Call:
    980    case CacheKind::TypeOf:
    981    case CacheKind::TypeOfEq:
    982    case CacheKind::ToBool:
    983    case CacheKind::LazyConstant:
    984    case CacheKind::NewArray:
    985    case CacheKind::NewObject:
    986    case CacheKind::Lambda:
    987    case CacheKind::GetImport:
    988      MOZ_CRASH("Unsupported IC");
    989  }
    990  MOZ_CRASH();
    991 }
    992 
    993 StringObject* MNewStringObject::templateObj() const {
    994  return &templateObj_->as<StringObject>();
    995 }
    996 
    997 CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph,
    998                             MacroAssembler* masm,
    999                             const wasm::CodeMetadata* wasmCodeMeta)
   1000    : CodeGeneratorSpecific(gen, graph, masm, wasmCodeMeta),
   1001      ionScriptLabels_(gen->alloc()),
   1002      nurseryObjectLabels_(gen->alloc()),
   1003      nurseryValueLabels_(gen->alloc()),
   1004      scriptCounts_(nullptr) {}
   1005 
   1006 CodeGenerator::~CodeGenerator() { js_delete(scriptCounts_); }
   1007 
   1008 void CodeGenerator::visitValueToNumberInt32(LValueToNumberInt32* lir) {
   1009  ValueOperand operand = ToValue(lir->input());
   1010  Register output = ToRegister(lir->output());
   1011  FloatRegister temp = ToFloatRegister(lir->temp0());
   1012 
   1013  Label fails;
   1014  masm.convertValueToInt32(operand, temp, output, &fails,
   1015                           lir->mir()->needsNegativeZeroCheck(),
   1016                           lir->mir()->conversion());
   1017 
   1018  bailoutFrom(&fails, lir->snapshot());
   1019 }
   1020 
   1021 void CodeGenerator::visitValueTruncateToInt32(LValueTruncateToInt32* lir) {
   1022  ValueOperand operand = ToValue(lir->input());
   1023  Register output = ToRegister(lir->output());
   1024  FloatRegister temp = ToFloatRegister(lir->temp0());
   1025  Register stringReg = ToRegister(lir->temp1());
   1026 
   1027  auto* oolDouble = oolTruncateDouble(temp, output, lir->mir());
   1028 
   1029  using Fn = bool (*)(JSContext*, JSString*, double*);
   1030  auto* oolString = oolCallVM<Fn, StringToNumber>(lir, ArgList(stringReg),
   1031                                                  StoreFloatRegisterTo(temp));
   1032  Label* stringEntry = oolString->entry();
   1033  Label* stringRejoin = oolString->rejoin();
   1034 
   1035  Label fails;
   1036  masm.truncateValueToInt32(operand, stringEntry, stringRejoin,
   1037                            oolDouble->entry(), stringReg, temp, output,
   1038                            &fails);
   1039  masm.bind(oolDouble->rejoin());
   1040 
   1041  bailoutFrom(&fails, lir->snapshot());
   1042 }
   1043 
   1044 void CodeGenerator::visitValueToDouble(LValueToDouble* lir) {
   1045  ValueOperand operand = ToValue(lir->input());
   1046  FloatRegister output = ToFloatRegister(lir->output());
   1047 
   1048  Label fail;
   1049  masm.convertValueToDouble(operand, output, &fail);
   1050  bailoutFrom(&fail, lir->snapshot());
   1051 }
   1052 
   1053 void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) {
   1054  ValueOperand operand = ToValue(lir->input());
   1055  FloatRegister output = ToFloatRegister(lir->output());
   1056 
   1057  Label fail;
   1058  masm.convertValueToFloat32(operand, output, &fail);
   1059  bailoutFrom(&fail, lir->snapshot());
   1060 }
   1061 
   1062 void CodeGenerator::visitValueToFloat16(LValueToFloat16* lir) {
   1063  ValueOperand operand = ToValue(lir->input());
   1064  Register temp = ToTempRegisterOrInvalid(lir->temp0());
   1065  FloatRegister output = ToFloatRegister(lir->output());
   1066 
   1067  LiveRegisterSet volatileRegs;
   1068  if (!MacroAssembler::SupportsFloat64To16()) {
   1069    volatileRegs = liveVolatileRegs(lir);
   1070  }
   1071 
   1072  Label fail;
   1073  masm.convertValueToFloat16(operand, output, temp, volatileRegs, &fail);
   1074  bailoutFrom(&fail, lir->snapshot());
   1075 }
   1076 
   1077 void CodeGenerator::visitValueToBigInt(LValueToBigInt* lir) {
   1078  ValueOperand operand = ToValue(lir->input());
   1079  Register output = ToRegister(lir->output());
   1080 
   1081  using Fn = BigInt* (*)(JSContext*, HandleValue);
   1082  auto* ool =
   1083      oolCallVM<Fn, ToBigInt>(lir, ArgList(operand), StoreRegisterTo(output));
   1084 
   1085  Register tag = masm.extractTag(operand, output);
   1086 
   1087  Label notBigInt, done;
   1088  masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
   1089  masm.unboxBigInt(operand, output);
   1090  masm.jump(&done);
   1091  masm.bind(&notBigInt);
   1092 
   1093  masm.branchTestBoolean(Assembler::Equal, tag, ool->entry());
   1094  masm.branchTestString(Assembler::Equal, tag, ool->entry());
   1095 
   1096  // ToBigInt(object) can have side-effects; all other types throw a TypeError.
   1097  bailout(lir->snapshot());
   1098 
   1099  masm.bind(ool->rejoin());
   1100  masm.bind(&done);
   1101 }
   1102 
   1103 void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) {
   1104  masm.convertInt32ToDouble(ToRegister(lir->input()),
   1105                            ToFloatRegister(lir->output()));
   1106 }
   1107 
   1108 void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) {
   1109  masm.convertFloat32ToDouble(ToFloatRegister(lir->input()),
   1110                              ToFloatRegister(lir->output()));
   1111 }
   1112 
   1113 void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) {
   1114  masm.convertDoubleToFloat32(ToFloatRegister(lir->input()),
   1115                              ToFloatRegister(lir->output()));
   1116 }
   1117 
   1118 void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) {
   1119  masm.convertInt32ToFloat32(ToRegister(lir->input()),
   1120                             ToFloatRegister(lir->output()));
   1121 }
   1122 
   1123 void CodeGenerator::visitDoubleToFloat16(LDoubleToFloat16* lir) {
   1124  LiveRegisterSet volatileRegs;
   1125  if (!MacroAssembler::SupportsFloat64To16()) {
   1126    volatileRegs = liveVolatileRegs(lir);
   1127  }
   1128  masm.convertDoubleToFloat16(
   1129      ToFloatRegister(lir->input()), ToFloatRegister(lir->output()),
   1130      ToTempRegisterOrInvalid(lir->temp0()), volatileRegs);
   1131 }
   1132 
   1133 void CodeGenerator::visitDoubleToFloat32ToFloat16(
   1134    LDoubleToFloat32ToFloat16* lir) {
   1135  masm.convertDoubleToFloat16(
   1136      ToFloatRegister(lir->input()), ToFloatRegister(lir->output()),
   1137      ToRegister(lir->temp0()), ToRegister(lir->temp1()));
   1138 }
   1139 
   1140 void CodeGenerator::visitFloat32ToFloat16(LFloat32ToFloat16* lir) {
   1141  LiveRegisterSet volatileRegs;
   1142  if (!MacroAssembler::SupportsFloat32To16()) {
   1143    volatileRegs = liveVolatileRegs(lir);
   1144  }
   1145  masm.convertFloat32ToFloat16(
   1146      ToFloatRegister(lir->input()), ToFloatRegister(lir->output()),
   1147      ToTempRegisterOrInvalid(lir->temp0()), volatileRegs);
   1148 }
   1149 
   1150 void CodeGenerator::visitInt32ToFloat16(LInt32ToFloat16* lir) {
   1151  LiveRegisterSet volatileRegs;
   1152  if (!MacroAssembler::SupportsFloat32To16()) {
   1153    volatileRegs = liveVolatileRegs(lir);
   1154  }
   1155  masm.convertInt32ToFloat16(
   1156      ToRegister(lir->input()), ToFloatRegister(lir->output()),
   1157      ToTempRegisterOrInvalid(lir->temp0()), volatileRegs);
   1158 }
   1159 
   1160 void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) {
   1161  Label fail;
   1162  FloatRegister input = ToFloatRegister(lir->input());
   1163  Register output = ToRegister(lir->output());
   1164  masm.convertDoubleToInt32(input, output, &fail,
   1165                            lir->mir()->needsNegativeZeroCheck());
   1166  bailoutFrom(&fail, lir->snapshot());
   1167 }
   1168 
   1169 void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) {
   1170  Label fail;
   1171  FloatRegister input = ToFloatRegister(lir->input());
   1172  Register output = ToRegister(lir->output());
   1173  masm.convertFloat32ToInt32(input, output, &fail,
   1174                             lir->mir()->needsNegativeZeroCheck());
   1175  bailoutFrom(&fail, lir->snapshot());
   1176 }
   1177 
   1178 void CodeGenerator::visitInt32ToIntPtr(LInt32ToIntPtr* lir) {
   1179 #ifdef JS_64BIT
   1180  // This LIR instruction is only used if the input can be negative.
   1181  MOZ_ASSERT(lir->mir()->canBeNegative());
   1182 
   1183  Register output = ToRegister(lir->output());
   1184  const LAllocation* input = lir->input();
   1185  if (input->isGeneralReg()) {
   1186    masm.move32SignExtendToPtr(ToRegister(input), output);
   1187  } else {
   1188    masm.load32SignExtendToPtr(ToAddress(input), output);
   1189  }
   1190 #else
   1191  MOZ_CRASH("Not used on 32-bit platforms");
   1192 #endif
   1193 }
   1194 
   1195 void CodeGenerator::visitNonNegativeIntPtrToInt32(
   1196    LNonNegativeIntPtrToInt32* lir) {
   1197 #ifdef JS_64BIT
   1198  Register output = ToRegister(lir->output());
   1199  MOZ_ASSERT(ToRegister(lir->input()) == output);
   1200 
   1201  Label bail;
   1202  masm.guardNonNegativeIntPtrToInt32(output, &bail);
   1203  bailoutFrom(&bail, lir->snapshot());
   1204 #else
   1205  MOZ_CRASH("Not used on 32-bit platforms");
   1206 #endif
   1207 }
   1208 
   1209 void CodeGenerator::visitIntPtrToDouble(LIntPtrToDouble* lir) {
   1210  Register input = ToRegister(lir->input());
   1211  FloatRegister output = ToFloatRegister(lir->output());
   1212  masm.convertIntPtrToDouble(input, output);
   1213 }
   1214 
   1215 void CodeGenerator::visitAdjustDataViewLength(LAdjustDataViewLength* lir) {
   1216  Register output = ToRegister(lir->output());
   1217  MOZ_ASSERT(ToRegister(lir->input()) == output);
   1218 
   1219  uint32_t byteSize = lir->mir()->byteSize();
   1220 
   1221 #ifdef DEBUG
   1222  Label ok;
   1223  masm.branchTestPtr(Assembler::NotSigned, output, output, &ok);
   1224  masm.assumeUnreachable("Unexpected negative value in LAdjustDataViewLength");
   1225  masm.bind(&ok);
   1226 #endif
   1227 
   1228  Label bail;
   1229  masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), output, &bail);
   1230  bailoutFrom(&bail, lir->snapshot());
   1231 }
   1232 
   1233 void CodeGenerator::emitOOLTestObject(Register objreg,
   1234                                      Label* ifEmulatesUndefined,
   1235                                      Label* ifDoesntEmulateUndefined,
   1236                                      Register scratch) {
   1237  saveVolatile(scratch);
   1238 #if defined(DEBUG) || defined(FUZZING)
   1239  masm.loadRuntimeFuse(
   1240      RuntimeFuses::FuseIndex::HasSeenObjectEmulateUndefinedFuse, scratch);
   1241  using Fn = bool (*)(JSObject* obj, size_t fuseValue);
   1242  masm.setupAlignedABICall();
   1243  masm.passABIArg(objreg);
   1244  masm.passABIArg(scratch);
   1245  masm.callWithABI<Fn, js::EmulatesUndefinedCheckFuse>();
   1246 #else
   1247  using Fn = bool (*)(JSObject* obj);
   1248  masm.setupAlignedABICall();
   1249  masm.passABIArg(objreg);
   1250  masm.callWithABI<Fn, js::EmulatesUndefined>();
   1251 #endif
   1252  masm.storeCallPointerResult(scratch);
   1253  restoreVolatile(scratch);
   1254 
   1255  masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
   1256  masm.jump(ifDoesntEmulateUndefined);
   1257 }
   1258 
   1259 // Base out-of-line code generator for all tests of the truthiness of an
   1260 // object, where the object might not be truthy.  (Recall that per spec all
   1261 // objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
   1262 // flag to permit objects to look like |undefined| in certain contexts,
   1263 // including in object truthiness testing.)  We check truthiness inline except
   1264 // when we're testing it on a proxy, in which case out-of-line code will call
   1265 // EmulatesUndefined for a conclusive answer.
   1266 class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator> {
   1267  Register objreg_;
   1268  Register scratch_;
   1269 
   1270  Label* ifEmulatesUndefined_;
   1271  Label* ifDoesntEmulateUndefined_;
   1272 
   1273 #ifdef DEBUG
   1274  bool initialized() { return ifEmulatesUndefined_ != nullptr; }
   1275 #endif
   1276 
   1277 public:
   1278  OutOfLineTestObject()
   1279      : ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) {}
   1280 
   1281  void accept(CodeGenerator* codegen) final {
   1282    MOZ_ASSERT(initialized());
   1283    codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_,
   1284                               ifDoesntEmulateUndefined_, scratch_);
   1285  }
   1286 
   1287  // Specify the register where the object to be tested is found, labels to
   1288  // jump to if the object is truthy or falsy, and a scratch register for
   1289  // use in the out-of-line path.
   1290  void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined,
   1291                          Label* ifDoesntEmulateUndefined, Register scratch) {
   1292    MOZ_ASSERT(!initialized());
   1293    MOZ_ASSERT(ifEmulatesUndefined);
   1294    objreg_ = objreg;
   1295    scratch_ = scratch;
   1296    ifEmulatesUndefined_ = ifEmulatesUndefined;
   1297    ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
   1298  }
   1299 };
   1300 
   1301 // A subclass of OutOfLineTestObject containing two extra labels, for use when
   1302 // the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
   1303 // code.  The user should bind these labels in inline code, and specify them as
   1304 // targets via setInputAndTargets, as appropriate.
   1305 class OutOfLineTestObjectWithLabels : public OutOfLineTestObject {
   1306  Label label1_;
   1307  Label label2_;
   1308 
   1309 public:
   1310  OutOfLineTestObjectWithLabels() = default;
   1311 
   1312  Label* label1() { return &label1_; }
   1313  Label* label2() { return &label2_; }
   1314 };
   1315 
   1316 void CodeGenerator::testObjectEmulatesUndefinedKernel(
   1317    Register objreg, Label* ifEmulatesUndefined,
   1318    Label* ifDoesntEmulateUndefined, Register scratch,
   1319    OutOfLineTestObject* ool) {
   1320  ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
   1321                          scratch);
   1322 
   1323  // Perform a fast-path check of the object's class flags if the object's
   1324  // not a proxy.  Let out-of-line code handle the slow cases that require
   1325  // saving registers, making a function call, and restoring registers.
   1326  masm.branchIfObjectEmulatesUndefined(objreg, scratch, ool->entry(),
   1327                                       ifEmulatesUndefined);
   1328 }
   1329 
   1330 void CodeGenerator::branchTestObjectEmulatesUndefined(
   1331    Register objreg, Label* ifEmulatesUndefined,
   1332    Label* ifDoesntEmulateUndefined, Register scratch,
   1333    OutOfLineTestObject* ool) {
   1334  MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
   1335             "ifDoesntEmulateUndefined will be bound to the fallthrough path");
   1336 
   1337  testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
   1338                                    ifDoesntEmulateUndefined, scratch, ool);
   1339  masm.bind(ifDoesntEmulateUndefined);
   1340 }
   1341 
   1342 void CodeGenerator::testObjectEmulatesUndefined(Register objreg,
   1343                                                Label* ifEmulatesUndefined,
   1344                                                Label* ifDoesntEmulateUndefined,
   1345                                                Register scratch,
   1346                                                OutOfLineTestObject* ool) {
   1347  testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
   1348                                    ifDoesntEmulateUndefined, scratch, ool);
   1349  masm.jump(ifDoesntEmulateUndefined);
   1350 }
   1351 
   1352 void CodeGenerator::testValueTruthyForType(
   1353    JSValueType type, ScratchTagScope& tag, const ValueOperand& value,
   1354    Register tempToUnbox, Register temp, FloatRegister floatTemp,
   1355    Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool,
   1356    bool skipTypeTest) {
   1357 #ifdef DEBUG
   1358  if (skipTypeTest) {
   1359    Label expected;
   1360    masm.branchTestType(Assembler::Equal, tag, type, &expected);
   1361    masm.assumeUnreachable("Unexpected Value type in testValueTruthyForType");
   1362    masm.bind(&expected);
   1363  }
   1364 #endif
   1365 
   1366  // Handle irregular types first.
   1367  switch (type) {
   1368    case JSVAL_TYPE_UNDEFINED:
   1369    case JSVAL_TYPE_NULL:
   1370      // Undefined and null are falsy.
   1371      if (!skipTypeTest) {
   1372        masm.branchTestType(Assembler::Equal, tag, type, ifFalsy);
   1373      } else {
   1374        masm.jump(ifFalsy);
   1375      }
   1376      return;
   1377    case JSVAL_TYPE_SYMBOL:
   1378      // Symbols are truthy.
   1379      if (!skipTypeTest) {
   1380        masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
   1381      } else {
   1382        masm.jump(ifTruthy);
   1383      }
   1384      return;
   1385    case JSVAL_TYPE_OBJECT: {
   1386      Label notObject;
   1387      if (!skipTypeTest) {
   1388        masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
   1389      }
   1390      ScratchTagScopeRelease _(&tag);
   1391      Register objreg = masm.extractObject(value, tempToUnbox);
   1392      testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, temp, ool);
   1393      masm.bind(&notObject);
   1394      return;
   1395    }
   1396    default:
   1397      break;
   1398  }
   1399 
   1400  // Check the type of the value (unless this is the last possible type).
   1401  Label differentType;
   1402  if (!skipTypeTest) {
   1403    masm.branchTestType(Assembler::NotEqual, tag, type, &differentType);
   1404  }
   1405 
   1406  // Branch if the value is falsy.
   1407  ScratchTagScopeRelease _(&tag);
   1408  switch (type) {
   1409    case JSVAL_TYPE_BOOLEAN: {
   1410      masm.branchTestBooleanTruthy(false, value, ifFalsy);
   1411      break;
   1412    }
   1413    case JSVAL_TYPE_INT32: {
   1414      masm.branchTestInt32Truthy(false, value, ifFalsy);
   1415      break;
   1416    }
   1417    case JSVAL_TYPE_STRING: {
   1418      masm.branchTestStringTruthy(false, value, ifFalsy);
   1419      break;
   1420    }
   1421    case JSVAL_TYPE_BIGINT: {
   1422      masm.branchTestBigIntTruthy(false, value, ifFalsy);
   1423      break;
   1424    }
   1425    case JSVAL_TYPE_DOUBLE: {
   1426      masm.unboxDouble(value, floatTemp);
   1427      masm.branchTestDoubleTruthy(false, floatTemp, ifFalsy);
   1428      break;
   1429    }
   1430    default:
   1431      MOZ_CRASH("Unexpected value type");
   1432  }
   1433 
   1434  // If we reach this point, the value is truthy.  We fall through for
   1435  // truthy on the last test; otherwise, branch.
   1436  if (!skipTypeTest) {
   1437    masm.jump(ifTruthy);
   1438  }
   1439 
   1440  masm.bind(&differentType);
   1441 }
   1442 
   1443 void CodeGenerator::testValueTruthy(const ValueOperand& value,
   1444                                    Register tempToUnbox, Register temp,
   1445                                    FloatRegister floatTemp,
   1446                                    const TypeDataList& observedTypes,
   1447                                    Label* ifTruthy, Label* ifFalsy,
   1448                                    OutOfLineTestObject* ool) {
   1449  ScratchTagScope tag(masm, value);
   1450  masm.splitTagForTest(value, tag);
   1451 
   1452  const std::initializer_list<JSValueType> defaultOrder = {
   1453      JSVAL_TYPE_UNDEFINED, JSVAL_TYPE_NULL,   JSVAL_TYPE_BOOLEAN,
   1454      JSVAL_TYPE_INT32,     JSVAL_TYPE_OBJECT, JSVAL_TYPE_STRING,
   1455      JSVAL_TYPE_DOUBLE,    JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
   1456 
   1457  mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
   1458 
   1459  // Generate tests for previously observed types first.
   1460  // The TypeDataList is sorted by descending frequency.
   1461  for (auto& observed : observedTypes) {
   1462    JSValueType type = observed.type();
   1463    remaining -= type;
   1464 
   1465    testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
   1466                           ifTruthy, ifFalsy, ool, /*skipTypeTest*/ false);
   1467  }
   1468 
   1469  // Generate tests for remaining types.
   1470  for (auto type : defaultOrder) {
   1471    if (!remaining.contains(type)) {
   1472      continue;
   1473    }
   1474    remaining -= type;
   1475 
   1476    // We don't need a type test for the last possible type.
   1477    bool skipTypeTest = remaining.isEmpty();
   1478    testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
   1479                           ifTruthy, ifFalsy, ool, skipTypeTest);
   1480  }
   1481  MOZ_ASSERT(remaining.isEmpty());
   1482 
   1483  // We fall through if the final test is truthy.
   1484 }
   1485 
   1486 void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
   1487  Register input = ToRegister(test->input());
   1488  MBasicBlock* ifTrue = test->ifTrue();
   1489  MBasicBlock* ifFalse = test->ifFalse();
   1490 
   1491  if (isNextBlock(ifFalse->lir())) {
   1492    masm.branchTest32(Assembler::NonZero, input, input,
   1493                      getJumpLabelForBranch(ifTrue));
   1494  } else {
   1495    masm.branchTest32(Assembler::Zero, input, input,
   1496                      getJumpLabelForBranch(ifFalse));
   1497    jumpToBlock(ifTrue);
   1498  }
   1499 }
   1500 
   1501 void CodeGenerator::visitTestIPtrAndBranch(LTestIPtrAndBranch* test) {
   1502  Register input = ToRegister(test->input());
   1503  MBasicBlock* ifTrue = test->ifTrue();
   1504  MBasicBlock* ifFalse = test->ifFalse();
   1505 
   1506  if (isNextBlock(ifFalse->lir())) {
   1507    masm.branchTestPtr(Assembler::NonZero, input, input,
   1508                       getJumpLabelForBranch(ifTrue));
   1509  } else {
   1510    masm.branchTestPtr(Assembler::Zero, input, input,
   1511                       getJumpLabelForBranch(ifFalse));
   1512    jumpToBlock(ifTrue);
   1513  }
   1514 }
   1515 
   1516 void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* test) {
   1517  Register64 input = ToRegister64(test->input());
   1518  MBasicBlock* ifTrue = test->ifTrue();
   1519  MBasicBlock* ifFalse = test->ifFalse();
   1520 
   1521  if (isNextBlock(ifFalse->lir())) {
   1522    masm.branchTest64(Assembler::NonZero, input, input,
   1523                      getJumpLabelForBranch(ifTrue));
   1524  } else if (isNextBlock(ifTrue->lir())) {
   1525    masm.branchTest64(Assembler::Zero, input, input,
   1526                      getJumpLabelForBranch(ifFalse));
   1527  } else {
   1528    masm.branchTest64(Assembler::NonZero, input, input,
   1529                      getJumpLabelForBranch(ifTrue),
   1530                      getJumpLabelForBranch(ifFalse));
   1531  }
   1532 }
   1533 
   1534 void CodeGenerator::visitTestBIAndBranch(LTestBIAndBranch* lir) {
   1535  Register input = ToRegister(lir->input());
   1536  MBasicBlock* ifTrue = lir->ifTrue();
   1537  MBasicBlock* ifFalse = lir->ifFalse();
   1538 
   1539  if (isNextBlock(ifFalse->lir())) {
   1540    masm.branchIfBigIntIsNonZero(input, getJumpLabelForBranch(ifTrue));
   1541  } else {
   1542    masm.branchIfBigIntIsZero(input, getJumpLabelForBranch(ifFalse));
   1543    jumpToBlock(ifTrue);
   1544  }
   1545 }
   1546 
   1547 static Assembler::Condition ReverseCondition(Assembler::Condition condition) {
   1548  switch (condition) {
   1549    case Assembler::Equal:
   1550    case Assembler::NotEqual:
   1551      return condition;
   1552    case Assembler::Above:
   1553      return Assembler::Below;
   1554    case Assembler::AboveOrEqual:
   1555      return Assembler::BelowOrEqual;
   1556    case Assembler::Below:
   1557      return Assembler::Above;
   1558    case Assembler::BelowOrEqual:
   1559      return Assembler::AboveOrEqual;
   1560    case Assembler::GreaterThan:
   1561      return Assembler::LessThan;
   1562    case Assembler::GreaterThanOrEqual:
   1563      return Assembler::LessThanOrEqual;
   1564    case Assembler::LessThan:
   1565      return Assembler::GreaterThan;
   1566    case Assembler::LessThanOrEqual:
   1567      return Assembler::GreaterThanOrEqual;
   1568    default:
   1569      break;
   1570  }
   1571  MOZ_CRASH("unhandled condition");
   1572 }
   1573 
   1574 void CodeGenerator::visitCompare(LCompare* comp) {
   1575  MCompare::CompareType compareType = comp->mir()->compareType();
   1576  Assembler::Condition cond = JSOpToCondition(compareType, comp->jsop());
   1577  Register left = ToRegister(comp->left());
   1578  const LAllocation* right = comp->right();
   1579  Register output = ToRegister(comp->output());
   1580 
   1581  if (compareType == MCompare::Compare_Object ||
   1582      compareType == MCompare::Compare_Symbol ||
   1583      compareType == MCompare::Compare_IntPtr ||
   1584      compareType == MCompare::Compare_UIntPtr ||
   1585      compareType == MCompare::Compare_WasmAnyRef) {
   1586    if (right->isConstant()) {
   1587      MOZ_ASSERT(compareType == MCompare::Compare_IntPtr ||
   1588                 compareType == MCompare::Compare_UIntPtr);
   1589      masm.cmpPtrSet(cond, left, ImmWord(ToInt32(right)), output);
   1590    } else if (right->isGeneralReg()) {
   1591      masm.cmpPtrSet(cond, left, ToRegister(right), output);
   1592    } else {
   1593      masm.cmpPtrSet(ReverseCondition(cond), ToAddress(right), left, output);
   1594    }
   1595    return;
   1596  }
   1597 
   1598  MOZ_ASSERT(compareType == MCompare::Compare_Int32 ||
   1599             compareType == MCompare::Compare_UInt32);
   1600 
   1601  if (right->isConstant()) {
   1602    masm.cmp32Set(cond, left, Imm32(ToInt32(right)), output);
   1603  } else if (right->isGeneralReg()) {
   1604    masm.cmp32Set(cond, left, ToRegister(right), output);
   1605  } else {
   1606    masm.cmp32Set(ReverseCondition(cond), ToAddress(right), left, output);
   1607  }
   1608 }
   1609 
   1610 void CodeGenerator::visitStrictConstantCompareInt32(
   1611    LStrictConstantCompareInt32* comp) {
   1612  ValueOperand value = ToValue(comp->value());
   1613  int32_t constantVal = comp->mir()->constant();
   1614  JSOp op = comp->mir()->jsop();
   1615  Register temp = ToRegister(comp->temp0());
   1616  Register output = ToRegister(comp->output());
   1617 
   1618  masm.cmp64Set(JSOpToCondition(op, false), value.toRegister64(),
   1619                Imm64(Int32Value(constantVal).asRawBits()), output);
   1620  masm.cmp64Set(JSOpToCondition(op, false), value.toRegister64(),
   1621                Imm64(DoubleValue(constantVal).asRawBits()), temp);
   1622 
   1623  if (op == JSOp::StrictEq) {
   1624    masm.or32(temp, output);
   1625  } else {
   1626    masm.and32(temp, output);
   1627  }
   1628 
   1629  if (constantVal == 0) {
   1630    masm.cmp64Set(JSOpToCondition(op, false), value.toRegister64(),
   1631                  Imm64(DoubleValue(-0.0).asRawBits()), temp);
   1632 
   1633    if (op == JSOp::StrictEq) {
   1634      masm.or32(temp, output);
   1635    } else {
   1636      masm.and32(temp, output);
   1637    }
   1638  }
   1639 }
   1640 
   1641 void CodeGenerator::visitStrictConstantCompareInt32AndBranch(
   1642    LStrictConstantCompareInt32AndBranch* comp) {
   1643  ValueOperand value = ToValue(comp->value());
   1644  int32_t constantVal = comp->cmpMir()->constant();
   1645  JSOp op = comp->cmpMir()->jsop();
   1646  Assembler::Condition cond = JSOpToCondition(op, false);
   1647 
   1648  MBasicBlock* ifTrue = comp->ifTrue();
   1649  MBasicBlock* ifFalse = comp->ifFalse();
   1650 
   1651  Label* trueLabel = getJumpLabelForBranch(ifTrue);
   1652  Label* falseLabel = getJumpLabelForBranch(ifFalse);
   1653 
   1654  Label* onEqual = op == JSOp::StrictEq ? trueLabel : falseLabel;
   1655 
   1656  // If the next block is the true case, invert the condition to fall through.
   1657  if (isNextBlock(ifTrue->lir())) {
   1658    cond = Assembler::InvertCondition(cond);
   1659    trueLabel = falseLabel;
   1660    falseLabel = nullptr;
   1661  } else if (isNextBlock(ifFalse->lir())) {
   1662    falseLabel = nullptr;
   1663  }
   1664 
   1665  masm.branch64(Assembler::Equal, value.toRegister64(),
   1666                Imm64(Int32Value(constantVal).asRawBits()), onEqual);
   1667  if (constantVal == 0) {
   1668    masm.branch64(Assembler::Equal, value.toRegister64(),
   1669                  Imm64(DoubleValue(0.0).asRawBits()), onEqual);
   1670    masm.branch64(cond, value.toRegister64(),
   1671                  Imm64(DoubleValue(-0.0).asRawBits()), trueLabel, falseLabel);
   1672  } else {
   1673    masm.branch64(cond, value.toRegister64(),
   1674                  Imm64(DoubleValue(constantVal).asRawBits()), trueLabel,
   1675                  falseLabel);
   1676  }
   1677 }
   1678 
   1679 void CodeGenerator::visitStrictConstantCompareBoolean(
   1680    LStrictConstantCompareBoolean* comp) {
   1681  ValueOperand value = ToValue(comp->value());
   1682  bool constantVal = comp->mir()->constant();
   1683  JSOp op = comp->mir()->jsop();
   1684  Register output = ToRegister(comp->output());
   1685 
   1686  masm.cmp64Set(JSOpToCondition(op, false), value.toRegister64(),
   1687                Imm64(BooleanValue(constantVal).asRawBits()), output);
   1688 }
   1689 
   1690 void CodeGenerator::visitStrictConstantCompareBooleanAndBranch(
   1691    LStrictConstantCompareBooleanAndBranch* comp) {
   1692  ValueOperand value = ToValue(comp->value());
   1693  bool constantVal = comp->cmpMir()->constant();
   1694  Assembler::Condition cond = JSOpToCondition(comp->cmpMir()->jsop(), false);
   1695 
   1696  MBasicBlock* ifTrue = comp->ifTrue();
   1697  MBasicBlock* ifFalse = comp->ifFalse();
   1698 
   1699  Label* trueLabel = getJumpLabelForBranch(ifTrue);
   1700  Label* falseLabel = getJumpLabelForBranch(ifFalse);
   1701 
   1702  // If the next block is the true case, invert the condition to fall through.
   1703  if (isNextBlock(ifTrue->lir())) {
   1704    cond = Assembler::InvertCondition(cond);
   1705    trueLabel = falseLabel;
   1706    falseLabel = nullptr;
   1707  } else if (isNextBlock(ifFalse->lir())) {
   1708    falseLabel = nullptr;
   1709  }
   1710 
   1711  masm.branch64(cond, value.toRegister64(),
   1712                Imm64(BooleanValue(constantVal).asRawBits()), trueLabel,
   1713                falseLabel);
   1714 }
   1715 
   1716 void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
   1717  MCompare::CompareType compareType = comp->cmpMir()->compareType();
   1718  Assembler::Condition cond = JSOpToCondition(compareType, comp->jsop());
   1719  Register left = ToRegister(comp->left());
   1720  const LAllocation* right = comp->right();
   1721 
   1722  MBasicBlock* ifTrue = comp->ifTrue();
   1723  MBasicBlock* ifFalse = comp->ifFalse();
   1724 
   1725  // If the next block is the true case, invert the condition to fall through.
   1726  Label* label;
   1727  if (isNextBlock(ifTrue->lir())) {
   1728    cond = Assembler::InvertCondition(cond);
   1729    label = getJumpLabelForBranch(ifFalse);
   1730  } else {
   1731    label = getJumpLabelForBranch(ifTrue);
   1732  }
   1733 
   1734  if (compareType == MCompare::Compare_Object ||
   1735      compareType == MCompare::Compare_Symbol ||
   1736      compareType == MCompare::Compare_IntPtr ||
   1737      compareType == MCompare::Compare_UIntPtr ||
   1738      compareType == MCompare::Compare_WasmAnyRef) {
   1739    if (right->isConstant()) {
   1740      MOZ_ASSERT(compareType == MCompare::Compare_IntPtr ||
   1741                 compareType == MCompare::Compare_UIntPtr);
   1742      masm.branchPtr(cond, left, ImmWord(ToInt32(right)), label);
   1743    } else if (right->isGeneralReg()) {
   1744      masm.branchPtr(cond, left, ToRegister(right), label);
   1745    } else {
   1746      masm.branchPtr(ReverseCondition(cond), ToAddress(right), left, label);
   1747    }
   1748  } else {
   1749    MOZ_ASSERT(compareType == MCompare::Compare_Int32 ||
   1750               compareType == MCompare::Compare_UInt32);
   1751 
   1752    if (right->isConstant()) {
   1753      masm.branch32(cond, left, Imm32(ToInt32(right)), label);
   1754    } else if (right->isGeneralReg()) {
   1755      masm.branch32(cond, left, ToRegister(right), label);
   1756    } else {
   1757      masm.branch32(ReverseCondition(cond), ToAddress(right), left, label);
   1758    }
   1759  }
   1760 
   1761  if (!isNextBlock(ifTrue->lir())) {
   1762    jumpToBlock(ifFalse);
   1763  }
   1764 }
   1765 
   1766 void CodeGenerator::visitCompareI64(LCompareI64* lir) {
   1767  MCompare::CompareType compareType = lir->mir()->compareType();
   1768  MOZ_ASSERT(compareType == MCompare::Compare_Int64 ||
   1769             compareType == MCompare::Compare_UInt64);
   1770  bool isSigned = compareType == MCompare::Compare_Int64;
   1771  Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
   1772  Register64 left = ToRegister64(lir->left());
   1773  LInt64Allocation right = lir->right();
   1774  Register output = ToRegister(lir->output());
   1775 
   1776  if (IsConstant(right)) {
   1777    masm.cmp64Set(cond, left, Imm64(ToInt64(right)), output);
   1778  } else if (IsRegister64(right)) {
   1779    masm.cmp64Set(cond, left, ToRegister64(right), output);
   1780  } else {
   1781    masm.cmp64Set(ReverseCondition(cond), ToAddress(right), left, output);
   1782  }
   1783 }
   1784 
   1785 void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
   1786  MCompare::CompareType compareType = lir->cmpMir()->compareType();
   1787  MOZ_ASSERT(compareType == MCompare::Compare_Int64 ||
   1788             compareType == MCompare::Compare_UInt64);
   1789  bool isSigned = compareType == MCompare::Compare_Int64;
   1790  Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
   1791  Register64 left = ToRegister64(lir->left());
   1792  LInt64Allocation right = lir->right();
   1793 
   1794  MBasicBlock* ifTrue = lir->ifTrue();
   1795  MBasicBlock* ifFalse = lir->ifFalse();
   1796 
   1797  Label* trueLabel = getJumpLabelForBranch(ifTrue);
   1798  Label* falseLabel = getJumpLabelForBranch(ifFalse);
   1799 
   1800  // If the next block is the true case, invert the condition to fall through.
   1801  if (isNextBlock(ifTrue->lir())) {
   1802    cond = Assembler::InvertCondition(cond);
   1803    trueLabel = falseLabel;
   1804    falseLabel = nullptr;
   1805  } else if (isNextBlock(ifFalse->lir())) {
   1806    falseLabel = nullptr;
   1807  }
   1808 
   1809  if (IsConstant(right)) {
   1810    masm.branch64(cond, left, Imm64(ToInt64(right)), trueLabel, falseLabel);
   1811  } else if (IsRegister64(right)) {
   1812    masm.branch64(cond, left, ToRegister64(right), trueLabel, falseLabel);
   1813  } else {
   1814    masm.branch64(ReverseCondition(cond), ToAddress(right), left, trueLabel,
   1815                  falseLabel);
   1816  }
   1817 }
   1818 
   1819 void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) {
   1820  Assembler::Condition cond = baab->cond();
   1821  MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
   1822 
   1823  Register left = ToRegister(baab->left());
   1824  const LAllocation* right = baab->right();
   1825 
   1826  MBasicBlock* ifTrue = baab->ifTrue();
   1827  MBasicBlock* ifFalse = baab->ifFalse();
   1828 
   1829  // If the next block is the true case, invert the condition to fall through.
   1830  Label* label;
   1831  if (isNextBlock(ifTrue->lir())) {
   1832    cond = Assembler::InvertCondition(cond);
   1833    label = getJumpLabelForBranch(ifFalse);
   1834  } else {
   1835    label = getJumpLabelForBranch(ifTrue);
   1836  }
   1837 
   1838  if (right->isConstant()) {
   1839    masm.branchTest32(cond, left, Imm32(ToInt32(right)), label);
   1840  } else {
   1841    masm.branchTest32(cond, left, ToRegister(right), label);
   1842  }
   1843 
   1844  if (!isNextBlock(ifTrue->lir())) {
   1845    jumpToBlock(ifFalse);
   1846  }
   1847 }
   1848 
   1849 void CodeGenerator::visitBitAnd64AndBranch(LBitAnd64AndBranch* baab) {
   1850  Assembler::Condition cond = baab->cond();
   1851  MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
   1852 
   1853  Register64 left = ToRegister64(baab->left());
   1854  LInt64Allocation right = baab->right();
   1855 
   1856  MBasicBlock* ifTrue = baab->ifTrue();
   1857  MBasicBlock* ifFalse = baab->ifFalse();
   1858 
   1859  Label* trueLabel = getJumpLabelForBranch(ifTrue);
   1860  Label* falseLabel = getJumpLabelForBranch(ifFalse);
   1861 
   1862  // If the next block is the true case, invert the condition to fall through.
   1863  if (isNextBlock(ifTrue->lir())) {
   1864    cond = Assembler::InvertCondition(cond);
   1865    trueLabel = falseLabel;
   1866    falseLabel = nullptr;
   1867  } else if (isNextBlock(ifFalse->lir())) {
   1868    falseLabel = nullptr;
   1869  }
   1870 
   1871  if (IsConstant(right)) {
   1872    masm.branchTest64(cond, left, Imm64(ToInt64(right)), trueLabel, falseLabel);
   1873  } else {
   1874    masm.branchTest64(cond, left, ToRegister64(right), trueLabel, falseLabel);
   1875  }
   1876 }
   1877 
   1878 void CodeGenerator::assertObjectDoesNotEmulateUndefined(
   1879    Register input, Register temp, const MInstruction* mir) {
   1880 #if defined(DEBUG) || defined(FUZZING)
   1881  // Validate that the object indeed doesn't have the emulates undefined flag.
   1882  auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
   1883  addOutOfLineCode(ool, mir);
   1884 
   1885  Label* doesNotEmulateUndefined = ool->label1();
   1886  Label* emulatesUndefined = ool->label2();
   1887 
   1888  testObjectEmulatesUndefined(input, emulatesUndefined, doesNotEmulateUndefined,
   1889                              temp, ool);
   1890  masm.bind(emulatesUndefined);
   1891  masm.assumeUnreachable(
   1892      "Found an object emulating undefined while the fuse is intact");
   1893  masm.bind(doesNotEmulateUndefined);
   1894 #endif
   1895 }
   1896 
   1897 void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) {
   1898  Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
   1899  Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
   1900  Register input = ToRegister(lir->input());
   1901  Register temp = ToRegister(lir->temp0());
   1902 
   1903  bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
   1904  if (intact) {
   1905    assertObjectDoesNotEmulateUndefined(input, temp, lir->mir());
   1906    // Bug 1874905: It would be fantastic if this could be optimized out
   1907    masm.jump(truthy);
   1908  } else {
   1909    auto* ool = new (alloc()) OutOfLineTestObject();
   1910    addOutOfLineCode(ool, lir->mir());
   1911 
   1912    testObjectEmulatesUndefined(input, falsy, truthy, temp, ool);
   1913  }
   1914 }
   1915 
   1916 void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) {
   1917  auto* ool = new (alloc()) OutOfLineTestObject();
   1918  addOutOfLineCode(ool, lir->mir());
   1919 
   1920  Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
   1921  Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
   1922 
   1923  ValueOperand input = ToValue(lir->input());
   1924  Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
   1925  Register temp = ToRegister(lir->temp2());
   1926  FloatRegister floatTemp = ToFloatRegister(lir->temp0());
   1927  const TypeDataList& observedTypes = lir->mir()->observedTypes();
   1928 
   1929  testValueTruthy(input, tempToUnbox, temp, floatTemp, observedTypes, truthy,
   1930                  falsy, ool);
   1931  masm.jump(truthy);
   1932 }
   1933 
   1934 void CodeGenerator::visitBooleanToString(LBooleanToString* lir) {
   1935  Register input = ToRegister(lir->input());
   1936  Register output = ToRegister(lir->output());
   1937  const JSAtomState& names = gen->runtime->names();
   1938  Label true_, done;
   1939 
   1940  masm.branchTest32(Assembler::NonZero, input, input, &true_);
   1941  masm.movePtr(ImmGCPtr(names.false_), output);
   1942  masm.jump(&done);
   1943 
   1944  masm.bind(&true_);
   1945  masm.movePtr(ImmGCPtr(names.true_), output);
   1946 
   1947  masm.bind(&done);
   1948 }
   1949 
   1950 void CodeGenerator::visitIntToString(LIntToString* lir) {
   1951  Register input = ToRegister(lir->input());
   1952  Register output = ToRegister(lir->output());
   1953 
   1954  using Fn = JSLinearString* (*)(JSContext*, int);
   1955  OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
   1956      lir, ArgList(input), StoreRegisterTo(output));
   1957 
   1958  masm.lookupStaticIntString(input, output, gen->runtime->staticStrings(),
   1959                             ool->entry());
   1960 
   1961  masm.bind(ool->rejoin());
   1962 }
   1963 
   1964 void CodeGenerator::visitDoubleToString(LDoubleToString* lir) {
   1965  FloatRegister input = ToFloatRegister(lir->input());
   1966  Register temp = ToRegister(lir->temp0());
   1967  Register output = ToRegister(lir->output());
   1968 
   1969  using Fn = JSString* (*)(JSContext*, double);
   1970  OutOfLineCode* ool = oolCallVM<Fn, NumberToString<CanGC>>(
   1971      lir, ArgList(input), StoreRegisterTo(output));
   1972 
   1973  // Try double to integer conversion and run integer to string code.
   1974  masm.convertDoubleToInt32(input, temp, ool->entry(), false);
   1975  masm.lookupStaticIntString(temp, output, gen->runtime->staticStrings(),
   1976                             ool->entry());
   1977 
   1978  masm.bind(ool->rejoin());
   1979 }
   1980 
   1981 void CodeGenerator::visitValueToString(LValueToString* lir) {
   1982  ValueOperand input = ToValue(lir->input());
   1983  Register output = ToRegister(lir->output());
   1984 
   1985  using Fn = JSString* (*)(JSContext*, HandleValue);
   1986  OutOfLineCode* ool = oolCallVM<Fn, ToStringSlow<CanGC>>(
   1987      lir, ArgList(input), StoreRegisterTo(output));
   1988 
   1989  Label done;
   1990  Register tag = masm.extractTag(input, output);
   1991  const JSAtomState& names = gen->runtime->names();
   1992 
   1993  // String
   1994  {
   1995    Label notString;
   1996    masm.branchTestString(Assembler::NotEqual, tag, &notString);
   1997    masm.unboxString(input, output);
   1998    masm.jump(&done);
   1999    masm.bind(&notString);
   2000  }
   2001 
   2002  // Integer
   2003  {
   2004    Label notInteger;
   2005    masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
   2006    Register unboxed = ToTempUnboxRegister(lir->temp0());
   2007    unboxed = masm.extractInt32(input, unboxed);
   2008    masm.lookupStaticIntString(unboxed, output, gen->runtime->staticStrings(),
   2009                               ool->entry());
   2010    masm.jump(&done);
   2011    masm.bind(&notInteger);
   2012  }
   2013 
   2014  // Double
   2015  {
   2016    // Note: no fastpath. Need two extra registers and can only convert doubles
   2017    // that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
   2018    masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
   2019  }
   2020 
   2021  // Undefined
   2022  {
   2023    Label notUndefined;
   2024    masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
   2025    masm.movePtr(ImmGCPtr(names.undefined), output);
   2026    masm.jump(&done);
   2027    masm.bind(&notUndefined);
   2028  }
   2029 
   2030  // Null
   2031  {
   2032    Label notNull;
   2033    masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
   2034    masm.movePtr(ImmGCPtr(names.null), output);
   2035    masm.jump(&done);
   2036    masm.bind(&notNull);
   2037  }
   2038 
   2039  // Boolean
   2040  {
   2041    Label notBoolean, true_;
   2042    masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
   2043    masm.branchTestBooleanTruthy(true, input, &true_);
   2044    masm.movePtr(ImmGCPtr(names.false_), output);
   2045    masm.jump(&done);
   2046    masm.bind(&true_);
   2047    masm.movePtr(ImmGCPtr(names.true_), output);
   2048    masm.jump(&done);
   2049    masm.bind(&notBoolean);
   2050  }
   2051 
   2052  // Objects/symbols are only possible when |mir->mightHaveSideEffects()|.
   2053  if (lir->mir()->mightHaveSideEffects()) {
   2054    // Object
   2055    if (lir->mir()->supportSideEffects()) {
   2056      masm.branchTestObject(Assembler::Equal, tag, ool->entry());
   2057    } else {
   2058      // Bail.
   2059      MOZ_ASSERT(lir->mir()->needsSnapshot());
   2060      Label bail;
   2061      masm.branchTestObject(Assembler::Equal, tag, &bail);
   2062      bailoutFrom(&bail, lir->snapshot());
   2063    }
   2064 
   2065    // Symbol
   2066    if (lir->mir()->supportSideEffects()) {
   2067      masm.branchTestSymbol(Assembler::Equal, tag, ool->entry());
   2068    } else {
   2069      // Bail.
   2070      MOZ_ASSERT(lir->mir()->needsSnapshot());
   2071      Label bail;
   2072      masm.branchTestSymbol(Assembler::Equal, tag, &bail);
   2073      bailoutFrom(&bail, lir->snapshot());
   2074    }
   2075  }
   2076 
   2077  // BigInt
   2078  {
   2079    // No fastpath currently implemented.
   2080    masm.branchTestBigInt(Assembler::Equal, tag, ool->entry());
   2081  }
   2082 
   2083  masm.assumeUnreachable("Unexpected type for LValueToString.");
   2084 
   2085  masm.bind(&done);
   2086  masm.bind(ool->rejoin());
   2087 }
   2088 
   2089 using StoreBufferMutationFn = void (*)(js::gc::StoreBuffer*, js::gc::Cell**);
   2090 
   2091 static void EmitStoreBufferMutation(MacroAssembler& masm, Register holder,
   2092                                    size_t offset, Register buffer,
   2093                                    LiveGeneralRegisterSet& liveVolatiles,
   2094                                    StoreBufferMutationFn fun) {
   2095  Label callVM;
   2096  Label exit;
   2097 
   2098  // Call into the VM to barrier the write. The only registers that need to
   2099  // be preserved are those in liveVolatiles, so once they are saved on the
   2100  // stack all volatile registers are available for use.
   2101  masm.bind(&callVM);
   2102  masm.PushRegsInMask(liveVolatiles);
   2103 
   2104  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
   2105  regs.takeUnchecked(buffer);
   2106  regs.takeUnchecked(holder);
   2107  Register addrReg = regs.takeAny();
   2108 
   2109  masm.computeEffectiveAddress(Address(holder, offset), addrReg);
   2110 
   2111  bool needExtraReg = !regs.hasAny<GeneralRegisterSet::DefaultType>();
   2112  if (needExtraReg) {
   2113    masm.push(holder);
   2114    masm.setupUnalignedABICall(holder);
   2115  } else {
   2116    masm.setupUnalignedABICall(regs.takeAny());
   2117  }
   2118  masm.passABIArg(buffer);
   2119  masm.passABIArg(addrReg);
   2120  masm.callWithABI(DynamicFunction<StoreBufferMutationFn>(fun),
   2121                   ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
   2122 
   2123  if (needExtraReg) {
   2124    masm.pop(holder);
   2125  }
   2126  masm.PopRegsInMask(liveVolatiles);
   2127  masm.bind(&exit);
   2128 }
   2129 
   2130 // Warning: this function modifies prev and next.
   2131 static void EmitPostWriteBarrierS(MacroAssembler& masm, Register holder,
   2132                                  size_t offset, Register prev, Register next,
   2133                                  LiveGeneralRegisterSet& liveVolatiles) {
   2134  Label exit;
   2135  Label checkRemove, putCell;
   2136 
   2137  // if (next && (buffer = next->storeBuffer()))
   2138  // but we never pass in nullptr for next.
   2139  Register storebuffer = next;
   2140  masm.loadStoreBuffer(next, storebuffer);
   2141  masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &checkRemove);
   2142 
   2143  // if (prev && prev->storeBuffer())
   2144  masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &putCell);
   2145  masm.loadStoreBuffer(prev, prev);
   2146  masm.branchPtr(Assembler::NotEqual, prev, ImmWord(0), &exit);
   2147 
   2148  // buffer->putCell(cellp)
   2149  masm.bind(&putCell);
   2150  EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
   2151                          JSString::addCellAddressToStoreBuffer);
   2152  masm.jump(&exit);
   2153 
   2154  // if (prev && (buffer = prev->storeBuffer()))
   2155  masm.bind(&checkRemove);
   2156  masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &exit);
   2157  masm.loadStoreBuffer(prev, storebuffer);
   2158  masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &exit);
   2159  EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
   2160                          JSString::removeCellAddressFromStoreBuffer);
   2161 
   2162  masm.bind(&exit);
   2163 }
   2164 
   2165 void CodeGenerator::visitRegExp(LRegExp* lir) {
   2166  Register output = ToRegister(lir->output());
   2167  Register temp = ToRegister(lir->temp0());
   2168  JSObject* source = lir->mir()->source();
   2169 
   2170  using Fn = JSObject* (*)(JSContext*, Handle<RegExpObject*>);
   2171  OutOfLineCode* ool = oolCallVM<Fn, CloneRegExpObject>(
   2172      lir, ArgList(ImmGCPtr(source)), StoreRegisterTo(output));
   2173  if (lir->mir()->hasShared()) {
   2174    TemplateObject templateObject(source);
   2175    masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
   2176                        ool->entry());
   2177  } else {
   2178    masm.jump(ool->entry());
   2179  }
   2180  masm.bind(ool->rejoin());
   2181 }
   2182 
   2183 /*
   2184 * [SMDOC] RegExp stubs
   2185 *
   2186 * The RegExp stubs are a set of lazily generated per-zone stubs
   2187 * providing fast paths for regexp execution in baseline and Ion.
   2188 * In general, they are invoked from self-hosted code.
   2189 *
   2190 * There are four stubs:
   2191 * - RegExpMatcher: Given a regular expression, an input string,
   2192 *     and the current lastIndex, return the match result object.
   2193 * - RegExpExecMatch: The same as RegExpMatcher, but lastIndex is
   2194 *     not an argument. Instead, for sticky/global regexps, it is
   2195 *     loaded from the regexp, and the new value is stored back to
   2196 *     the regexp after execution. Otherwise, it is hardcoded to 0.
   2197 * - RegExpSearcher: Given a regular expression, an input string,
   2198 *     and the current lastIndex, return the index of the next match.
   2199 * - RegExpExecTest: Given a regular expression and an input string,
   2200 *     return a boolean indicating whether a match was found. This
   2201 *     stub has the same behaviour as RegExpExecMatch with respect to
   2202 *     lastIndex.
   2203 */
   2204 
   2205 // Offset of the InputOutputData relative to the frame pointer in regexp stubs.
   2206 // The InputOutputData is allocated by the caller, so it is placed above the
   2207 // frame pointer and return address on the stack.
   2208 static constexpr size_t RegExpInputOutputDataOffset = 2 * sizeof(void*);
   2209 
   2210 static constexpr size_t RegExpPairsVectorStartOffset =
   2211    RegExpInputOutputDataOffset + InputOutputDataSize + sizeof(MatchPairs);
   2212 
   2213 static Address RegExpPairCountAddress() {
   2214  return Address(FramePointer, RegExpInputOutputDataOffset +
   2215                                   int32_t(InputOutputDataSize) +
   2216                                   MatchPairs::offsetOfPairCount());
   2217 }
   2218 
   2219 static void UpdateRegExpStatics(MacroAssembler& masm, Register regexp,
   2220                                Register input, Register lastIndex,
   2221                                Register staticsReg, Register temp1,
   2222                                Register temp2, gc::Heap initialStringHeap,
   2223                                LiveGeneralRegisterSet& volatileRegs) {
   2224  Address pendingInputAddress(staticsReg,
   2225                              RegExpStatics::offsetOfPendingInput());
   2226  Address matchesInputAddress(staticsReg,
   2227                              RegExpStatics::offsetOfMatchesInput());
   2228  Address lazySourceAddress(staticsReg, RegExpStatics::offsetOfLazySource());
   2229  Address lazyIndexAddress(staticsReg, RegExpStatics::offsetOfLazyIndex());
   2230  Label legacyFeaturesEnabled, done;
   2231  if (JS::Prefs::experimental_legacy_regexp()) {
   2232    Address invalidatedAddress(staticsReg,
   2233                               RegExpStatics::offsetOfInvalidated());
   2234 
   2235    masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
   2236                                            RegExpObject::flagsSlot())),
   2237                        temp1, JSVAL_TYPE_INT32);
   2238    masm.branchTest32(Assembler::NonZero, temp1,
   2239                      Imm32(RegExpObject::LegacyFeaturesEnabledBit),
   2240                      &legacyFeaturesEnabled);
   2241    masm.store8(Imm32(1), invalidatedAddress);
   2242    masm.jump(&done);
   2243    masm.bind(&legacyFeaturesEnabled);
   2244  }
   2245 
   2246  masm.guardedCallPreBarrier(pendingInputAddress, MIRType::String);
   2247  masm.guardedCallPreBarrier(matchesInputAddress, MIRType::String);
   2248  masm.guardedCallPreBarrier(lazySourceAddress, MIRType::String);
   2249 
   2250  if (initialStringHeap == gc::Heap::Default) {
   2251    // Writing into RegExpStatics tenured memory; must post-barrier.
   2252    if (staticsReg.volatile_()) {
   2253      volatileRegs.add(staticsReg);
   2254    }
   2255 
   2256    masm.loadPtr(pendingInputAddress, temp1);
   2257    masm.storePtr(input, pendingInputAddress);
   2258    masm.movePtr(input, temp2);
   2259    EmitPostWriteBarrierS(masm, staticsReg,
   2260                          RegExpStatics::offsetOfPendingInput(),
   2261                          temp1 /* prev */, temp2 /* next */, volatileRegs);
   2262 
   2263    masm.loadPtr(matchesInputAddress, temp1);
   2264    masm.storePtr(input, matchesInputAddress);
   2265    masm.movePtr(input, temp2);
   2266    EmitPostWriteBarrierS(masm, staticsReg,
   2267                          RegExpStatics::offsetOfMatchesInput(),
   2268                          temp1 /* prev */, temp2 /* next */, volatileRegs);
   2269  } else {
   2270    masm.debugAssertGCThingIsTenured(input, temp1);
   2271    masm.storePtr(input, pendingInputAddress);
   2272    masm.storePtr(input, matchesInputAddress);
   2273  }
   2274 
   2275  masm.storePtr(lastIndex,
   2276                Address(staticsReg, RegExpStatics::offsetOfLazyIndex()));
   2277  masm.store32(
   2278      Imm32(1),
   2279      Address(staticsReg, RegExpStatics::offsetOfPendingLazyEvaluation()));
   2280 
   2281  masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
   2282                                          RegExpObject::SHARED_SLOT)),
   2283                      temp1, JSVAL_TYPE_PRIVATE_GCTHING);
   2284  masm.loadPtr(Address(temp1, RegExpShared::offsetOfSource()), temp2);
   2285  masm.storePtr(temp2, lazySourceAddress);
   2286  static_assert(sizeof(JS::RegExpFlags) == 1, "load size must match flag size");
   2287  masm.load8ZeroExtend(Address(temp1, RegExpShared::offsetOfFlags()), temp2);
   2288  masm.store8(temp2, Address(staticsReg, RegExpStatics::offsetOfLazyFlags()));
   2289  masm.bind(&done);
   2290 }
   2291 
   2292 // Prepare an InputOutputData and optional MatchPairs which space has been
   2293 // allocated for on the stack, and try to execute a RegExp on a string input.
   2294 // If the RegExp was successfully executed and matched the input, fallthrough.
   2295 // Otherwise, jump to notFound or failure.
   2296 static bool PrepareAndExecuteRegExp(MacroAssembler& masm, Register regexp,
   2297                                    Register input, Register lastIndex,
   2298                                    Register temp1, Register temp2,
   2299                                    Register temp3, gc::Heap initialStringHeap,
   2300                                    Label* notFound, Label* failure,
   2301                                    JitZone::StubKind kind) {
   2302  JitSpew(JitSpew_Codegen, "# Emitting PrepareAndExecuteRegExp");
   2303 
   2304  using irregexp::InputOutputData;
   2305 
   2306  /*
   2307   * [SMDOC] Stack layout for PrepareAndExecuteRegExp
   2308   *
   2309   * Before this function is called, the caller is responsible for
   2310   * allocating enough stack space for the result data. This code
   2311   * will fill in that data. This means that the match pairs will
   2312   * not be freed when we return from a match stub, which allows us
   2313   * to reuse them if we have to call into the VM to allocate results,
   2314   * instead of executing the regexp from scratch. For consistency,
   2315   * we use the same approach for stubs that don't use match pairs.
   2316   *
   2317   *                                    +---------------+
   2318   *                                    | Saved frameptr|
   2319   *                                    | Return address|
   2320   *        Current frame               +---------------+
   2321   *------------------------------------------------------------
   2322   *        Caller's frame              +---------------+
   2323   *                                    |InputOutputData|
   2324   *          inputStartAddress +---------->  inputStart|
   2325   *            inputEndAddress +---------->    inputEnd|
   2326   *          startIndexAddress +---------->  startIndex|
   2327   *             matchesAddress +---------->     matches|-----+
   2328   *                                    +---------------+     |
   2329   * matchPairs(Address|Offset) +-----> +---------------+  <--+
   2330   *                                    |  MatchPairs   |
   2331   *           pairCountAddress +---------->    count   |
   2332   *        pairsPointerAddress +---------->    pairs   |-----+
   2333   *                                    +---------------+     |
   2334   * pairsArray(Address|Offset) +-----> +---------------+  <--+
   2335   *                                    |   MatchPair   |
   2336   *     firstMatchStartAddress +---------->    start   |  <--+
   2337   *                                    |       limit   |     |
   2338   *                                    +---------------+     |
   2339   *                                           .              |
   2340   *                                           .  Reserved space for
   2341   *                                           .  RegExpObject::MaxPairCount
   2342   *                                           .  MatchPair objects
   2343   *                                           .              |
   2344   *                                    +---------------+     |
   2345   *                                    |   MatchPair   |     |
   2346   *                                    |       start   |     |
   2347   *                                    |       limit   |  <--+
   2348   *                                    +---------------+
   2349   */
   2350 
   2351  int32_t ioOffset = RegExpInputOutputDataOffset;
   2352  int32_t matchPairsOffset = ioOffset + int32_t(sizeof(InputOutputData));
   2353  int32_t pairsArrayOffset = matchPairsOffset + int32_t(sizeof(MatchPairs));
   2354 
   2355  Address inputStartAddress(FramePointer,
   2356                            ioOffset + InputOutputData::offsetOfInputStart());
   2357  Address inputEndAddress(FramePointer,
   2358                          ioOffset + InputOutputData::offsetOfInputEnd());
   2359  Address startIndexAddress(FramePointer,
   2360                            ioOffset + InputOutputData::offsetOfStartIndex());
   2361  Address matchesAddress(FramePointer,
   2362                         ioOffset + InputOutputData::offsetOfMatches());
   2363 
   2364  Address matchPairsAddress(FramePointer, matchPairsOffset);
   2365  Address pairCountAddress(FramePointer,
   2366                           matchPairsOffset + MatchPairs::offsetOfPairCount());
   2367  Address pairsPointerAddress(FramePointer,
   2368                              matchPairsOffset + MatchPairs::offsetOfPairs());
   2369 
   2370  Address pairsArrayAddress(FramePointer, pairsArrayOffset);
   2371  Address firstMatchStartAddress(FramePointer,
   2372                                 pairsArrayOffset + MatchPair::offsetOfStart());
   2373 
   2374  // First, fill in a skeletal MatchPairs instance on the stack. This will be
   2375  // passed to the OOL stub in the caller if we aren't able to execute the
   2376  // RegExp inline, and that stub needs to be able to determine whether the
   2377  // execution finished successfully.
   2378 
   2379  // Initialize MatchPairs::pairCount to 1. The correct value can only
   2380  // be determined after loading the RegExpShared. If the RegExpShared
   2381  // has Kind::Atom, this is the correct pairCount.
   2382  masm.store32(Imm32(1), pairCountAddress);
   2383 
   2384  // Initialize MatchPairs::pairs pointer
   2385  masm.computeEffectiveAddress(pairsArrayAddress, temp1);
   2386  masm.storePtr(temp1, pairsPointerAddress);
   2387 
   2388  // Initialize MatchPairs::pairs[0]::start to MatchPair::NoMatch
   2389  masm.store32(Imm32(MatchPair::NoMatch), firstMatchStartAddress);
   2390 
   2391  // Determine the set of volatile inputs to save when calling into C++ or
   2392  // regexp code.
   2393  LiveGeneralRegisterSet volatileRegs;
   2394  if (lastIndex.volatile_()) {
   2395    volatileRegs.add(lastIndex);
   2396  }
   2397  if (input.volatile_()) {
   2398    volatileRegs.add(input);
   2399  }
   2400  if (regexp.volatile_()) {
   2401    volatileRegs.add(regexp);
   2402  }
   2403 
   2404  // Ensure the input string is not a rope.
   2405  Label isLinear;
   2406  masm.branchIfNotRope(input, &isLinear);
   2407  {
   2408    masm.PushRegsInMask(volatileRegs);
   2409 
   2410    using Fn = JSLinearString* (*)(JSString*);
   2411    masm.setupUnalignedABICall(temp1);
   2412    masm.passABIArg(input);
   2413    masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
   2414 
   2415    MOZ_ASSERT(!volatileRegs.has(temp1));
   2416    masm.storeCallPointerResult(temp1);
   2417    masm.PopRegsInMask(volatileRegs);
   2418 
   2419    masm.branchTestPtr(Assembler::Zero, temp1, temp1, failure);
   2420  }
   2421  masm.bind(&isLinear);
   2422 
   2423  // Load the RegExpShared.
   2424  Register regexpReg = temp1;
   2425  Address sharedSlot = Address(
   2426      regexp, NativeObject::getFixedSlotOffset(RegExpObject::SHARED_SLOT));
   2427  masm.branchTestUndefined(Assembler::Equal, sharedSlot, failure);
   2428  masm.unboxNonDouble(sharedSlot, regexpReg, JSVAL_TYPE_PRIVATE_GCTHING);
   2429 
   2430  // Handle Atom matches
   2431  Label notAtom, checkSuccess;
   2432  masm.branchPtr(Assembler::Equal,
   2433                 Address(regexpReg, RegExpShared::offsetOfPatternAtom()),
   2434                 ImmWord(0), &notAtom);
   2435  {
   2436    masm.computeEffectiveAddress(matchPairsAddress, temp3);
   2437 
   2438    masm.PushRegsInMask(volatileRegs);
   2439    using Fn =
   2440        RegExpRunStatus (*)(RegExpShared* re, const JSLinearString* input,
   2441                            size_t start, MatchPairs* matchPairs);
   2442    masm.setupUnalignedABICall(temp2);
   2443    masm.passABIArg(regexpReg);
   2444    masm.passABIArg(input);
   2445    masm.passABIArg(lastIndex);
   2446    masm.passABIArg(temp3);
   2447    masm.callWithABI<Fn, js::ExecuteRegExpAtomRaw>();
   2448 
   2449    MOZ_ASSERT(!volatileRegs.has(temp1));
   2450    masm.storeCallInt32Result(temp1);
   2451    masm.PopRegsInMask(volatileRegs);
   2452 
   2453    masm.jump(&checkSuccess);
   2454  }
   2455  masm.bind(&notAtom);
   2456 
   2457  // If we don't need to look at the capture groups, we can leave pairCount at 1
   2458  // (set above). The regexp code is special-cased to skip copying capture
   2459  // groups if the pair count is 1, which also lets us avoid having to allocate
   2460  // memory to store them.
   2461  bool skipMatchPairs = kind == JitZone::StubKind::RegExpSearcher ||
   2462                        kind == JitZone::StubKind::RegExpExecTest;
   2463  if (!skipMatchPairs) {
   2464    // Don't handle regexps with too many capture pairs.
   2465    masm.load32(Address(regexpReg, RegExpShared::offsetOfPairCount()), temp2);
   2466    masm.branch32(Assembler::Above, temp2, Imm32(RegExpObject::MaxPairCount),
   2467                  failure);
   2468 
   2469    // Fill in the pair count in the MatchPairs on the stack.
   2470    masm.store32(temp2, pairCountAddress);
   2471  }
   2472 
   2473  // Load code pointer and length of input (in bytes).
   2474  // Store the input start in the InputOutputData.
   2475  Register codePointer = temp1;  // Note: temp1 was previously regexpReg.
   2476  Register byteLength = temp3;
   2477  {
   2478    Label isLatin1, done;
   2479    masm.loadStringLength(input, byteLength);
   2480 
   2481    masm.branchLatin1String(input, &isLatin1);
   2482 
   2483    // Two-byte input
   2484    masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
   2485    masm.storePtr(temp2, inputStartAddress);
   2486    masm.loadPtr(
   2487        Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/false)),
   2488        codePointer);
   2489    masm.lshiftPtr(Imm32(1), byteLength);
   2490    masm.jump(&done);
   2491 
   2492    // Latin1 input
   2493    masm.bind(&isLatin1);
   2494    masm.loadStringChars(input, temp2, CharEncoding::Latin1);
   2495    masm.storePtr(temp2, inputStartAddress);
   2496    masm.loadPtr(
   2497        Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/true)),
   2498        codePointer);
   2499 
   2500    masm.bind(&done);
   2501 
   2502    // Store end pointer
   2503    masm.addPtr(byteLength, temp2);
   2504    masm.storePtr(temp2, inputEndAddress);
   2505  }
   2506 
   2507  // Guard that the RegExpShared has been compiled for this type of input.
   2508  // If it has not been compiled, we fall back to the OOL case, which will
   2509  // do a VM call into the interpreter.
   2510  // TODO: add an interpreter trampoline?
   2511  masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure);
   2512  masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer);
   2513 
   2514  // Finish filling in the InputOutputData instance on the stack
   2515  masm.computeEffectiveAddress(matchPairsAddress, temp2);
   2516  masm.storePtr(temp2, matchesAddress);
   2517  masm.storePtr(lastIndex, startIndexAddress);
   2518 
   2519  // Execute the RegExp.
   2520  masm.computeEffectiveAddress(Address(FramePointer, ioOffset), temp2);
   2521  masm.PushRegsInMask(volatileRegs);
   2522  masm.setupUnalignedABICall(temp3);
   2523  masm.passABIArg(temp2);
   2524  masm.callWithABI(codePointer);
   2525  masm.storeCallInt32Result(temp1);
   2526  masm.PopRegsInMask(volatileRegs);
   2527 
   2528  masm.bind(&checkSuccess);
   2529  masm.branch32(Assembler::Equal, temp1,
   2530                Imm32(int32_t(RegExpRunStatus::Success_NotFound)), notFound);
   2531  masm.branch32(Assembler::Equal, temp1, Imm32(int32_t(RegExpRunStatus::Error)),
   2532                failure);
   2533 
   2534  // Lazily update the RegExpStatics.
   2535  size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
   2536                  RegExpRealm::offsetOfRegExpStatics();
   2537  masm.loadGlobalObjectData(temp1);
   2538  masm.loadPtr(Address(temp1, offset), temp1);
   2539  UpdateRegExpStatics(masm, regexp, input, lastIndex, temp1, temp2, temp3,
   2540                      initialStringHeap, volatileRegs);
   2541 
   2542  return true;
   2543 }
   2544 
   2545 // Shift a bit within a 32-bit word from one bit position to another.
   2546 // Both FromBitMask and ToBitMask must have a single bit set.
   2547 template <uint32_t FromBitMask, uint32_t ToBitMask>
   2548 static void ShiftFlag32(MacroAssembler& masm, Register reg) {
   2549  static_assert(mozilla::IsPowerOfTwo(FromBitMask));
   2550  static_assert(mozilla::IsPowerOfTwo(ToBitMask));
   2551  static_assert(FromBitMask != ToBitMask);
   2552  constexpr uint32_t fromShift = mozilla::CountTrailingZeroes32(FromBitMask);
   2553  constexpr uint32_t toShift = mozilla::CountTrailingZeroes32(ToBitMask);
   2554  if (fromShift < toShift) {
   2555    masm.lshift32(Imm32(toShift - fromShift), reg);
   2556  } else {
   2557    masm.rshift32(Imm32(fromShift - toShift), reg);
   2558  }
   2559 }
   2560 
   2561 static void EmitInitDependentStringBase(MacroAssembler& masm,
   2562                                        Register dependent, Register base,
   2563                                        Register temp1, Register temp2,
   2564                                        bool needsPostBarrier) {
   2565  // Determine the base string to use and store it in temp2.
   2566  Label notDependent, markedDependedOn;
   2567  masm.load32(Address(base, JSString::offsetOfFlags()), temp1);
   2568  masm.branchTest32(Assembler::Zero, temp1, Imm32(JSString::DEPENDENT_BIT),
   2569                    &notDependent);
   2570  {
   2571    // The base is also a dependent string. Load its base to prevent chains of
   2572    // dependent strings in most cases. This must either be an atom or already
   2573    // have the DEPENDED_ON_BIT set.
   2574    masm.loadDependentStringBase(base, temp2);
   2575    masm.jump(&markedDependedOn);
   2576  }
   2577  masm.bind(&notDependent);
   2578  {
   2579    // The base is not a dependent string. Set the DEPENDED_ON_BIT if it's not
   2580    // an atom (ATOM_BIT is not set). Roughly:
   2581    //
   2582    //   flags |= ((~flags) & ATOM_BIT) << (DEPENDED_ON_BIT - ATOM_BIT))
   2583    //
   2584    // but further modified to combine the initial move with an OR:
   2585    //
   2586    //   flags |= ~(flags | ~ATOM_BIT) << (DEPENDED_ON_BIT - ATOM_BIT)
   2587    //
   2588    masm.or32(Imm32(~JSString::ATOM_BIT), temp1, temp2);
   2589    masm.not32(temp2);
   2590    ShiftFlag32<JSString::ATOM_BIT, JSString::DEPENDED_ON_BIT>(masm, temp2);
   2591    masm.or32(temp2, temp1);
   2592    masm.movePtr(base, temp2);
   2593    masm.store32(temp1, Address(temp2, JSString::offsetOfFlags()));
   2594  }
   2595  masm.bind(&markedDependedOn);
   2596 
   2597 #ifdef DEBUG
   2598  // Assert the base has the DEPENDED_ON_BIT set or is an atom.
   2599  Label isAppropriatelyMarked;
   2600  masm.branchTest32(Assembler::NonZero,
   2601                    Address(temp2, JSString::offsetOfFlags()),
   2602                    Imm32(JSString::ATOM_BIT | JSString::DEPENDED_ON_BIT),
   2603                    &isAppropriatelyMarked);
   2604  masm.assumeUnreachable("Base string is missing DEPENDED_ON_BIT");
   2605  masm.bind(&isAppropriatelyMarked);
   2606 #endif
   2607  masm.storeDependentStringBase(temp2, dependent);
   2608 
   2609  // Post-barrier the base store. The base is still in temp2.
   2610  if (needsPostBarrier) {
   2611    Label done;
   2612    masm.branchPtrInNurseryChunk(Assembler::Equal, dependent, temp1, &done);
   2613    masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp2, temp1, &done);
   2614 
   2615    LiveRegisterSet regsToSave(RegisterSet::Volatile());
   2616    regsToSave.takeUnchecked(temp1);
   2617    regsToSave.takeUnchecked(temp2);
   2618 
   2619    masm.PushRegsInMask(regsToSave);
   2620 
   2621    masm.mov(ImmPtr(masm.runtime()), temp1);
   2622 
   2623    using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
   2624    masm.setupUnalignedABICall(temp2);
   2625    masm.passABIArg(temp1);
   2626    masm.passABIArg(dependent);
   2627    masm.callWithABI<Fn, PostWriteBarrier>();
   2628 
   2629    masm.PopRegsInMask(regsToSave);
   2630 
   2631    masm.bind(&done);
   2632  } else {
   2633 #ifdef DEBUG
   2634    Label done;
   2635    masm.branchPtrInNurseryChunk(Assembler::Equal, dependent, temp1, &done);
   2636    masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp2, temp1, &done);
   2637    masm.assumeUnreachable("Missing post barrier for dependent string base");
   2638    masm.bind(&done);
   2639 #endif
   2640  }
   2641 }
   2642 
   2643 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
   2644                            Register len, Register byteOpScratch,
   2645                            CharEncoding encoding,
   2646                            size_t maximumLength = SIZE_MAX);
   2647 
   2648 class CreateDependentString {
   2649  CharEncoding encoding_;
   2650  Register string_;
   2651  Register temp1_;
   2652  Register temp2_;
   2653  Label* failure_;
   2654 
   2655  enum class FallbackKind : uint8_t {
   2656    InlineString,
   2657    FatInlineString,
   2658    NotInlineString,
   2659    Count
   2660  };
   2661  mozilla::EnumeratedArray<FallbackKind, Label, size_t(FallbackKind::Count)>
   2662      fallbacks_, joins_;
   2663 
   2664 public:
   2665  CreateDependentString(CharEncoding encoding, Register string, Register temp1,
   2666                        Register temp2, Label* failure)
   2667      : encoding_(encoding),
   2668        string_(string),
   2669        temp1_(temp1),
   2670        temp2_(temp2),
   2671        failure_(failure) {}
   2672 
   2673  Register string() const { return string_; }
   2674  CharEncoding encoding() const { return encoding_; }
   2675 
   2676  // Generate code that creates DependentString.
   2677  // Caller should call generateFallback after masm.ret(), to generate
   2678  // fallback path.
   2679  void generate(MacroAssembler& masm, const JSAtomState& names,
   2680                CompileRuntime* runtime, Register base,
   2681                BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
   2682                gc::Heap initialStringHeap);
   2683 
   2684  // Generate fallback path for creating DependentString.
   2685  void generateFallback(MacroAssembler& masm);
   2686 };
   2687 
   2688 void CreateDependentString::generate(MacroAssembler& masm,
   2689                                     const JSAtomState& names,
   2690                                     CompileRuntime* runtime, Register base,
   2691                                     BaseIndex startIndexAddress,
   2692                                     BaseIndex limitIndexAddress,
   2693                                     gc::Heap initialStringHeap) {
   2694  JitSpew(JitSpew_Codegen, "# Emitting CreateDependentString (encoding=%s)",
   2695          (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
   2696 
   2697  auto newGCString = [&](FallbackKind kind) {
   2698    uint32_t flags = kind == FallbackKind::InlineString
   2699                         ? JSString::INIT_THIN_INLINE_FLAGS
   2700                     : kind == FallbackKind::FatInlineString
   2701                         ? JSString::INIT_FAT_INLINE_FLAGS
   2702                         : JSString::INIT_DEPENDENT_FLAGS;
   2703    if (encoding_ == CharEncoding::Latin1) {
   2704      flags |= JSString::LATIN1_CHARS_BIT;
   2705    }
   2706 
   2707    if (kind != FallbackKind::FatInlineString) {
   2708      masm.newGCString(string_, temp2_, initialStringHeap, &fallbacks_[kind]);
   2709    } else {
   2710      masm.newGCFatInlineString(string_, temp2_, initialStringHeap,
   2711                                &fallbacks_[kind]);
   2712    }
   2713    masm.bind(&joins_[kind]);
   2714    masm.store32(Imm32(flags), Address(string_, JSString::offsetOfFlags()));
   2715  };
   2716 
   2717  // Compute the string length.
   2718  masm.load32(startIndexAddress, temp2_);
   2719  masm.load32(limitIndexAddress, temp1_);
   2720  masm.sub32(temp2_, temp1_);
   2721 
   2722  Label done, nonEmpty;
   2723 
   2724  // Zero length matches use the empty string.
   2725  masm.branchTest32(Assembler::NonZero, temp1_, temp1_, &nonEmpty);
   2726  masm.movePtr(ImmGCPtr(names.empty_), string_);
   2727  masm.jump(&done);
   2728 
   2729  masm.bind(&nonEmpty);
   2730 
   2731  // Complete matches use the base string.
   2732  Label nonBaseStringMatch;
   2733  masm.branchTest32(Assembler::NonZero, temp2_, temp2_, &nonBaseStringMatch);
   2734  masm.branch32(Assembler::NotEqual, Address(base, JSString::offsetOfLength()),
   2735                temp1_, &nonBaseStringMatch);
   2736  masm.movePtr(base, string_);
   2737  masm.jump(&done);
   2738 
   2739  masm.bind(&nonBaseStringMatch);
   2740 
   2741  Label notInline;
   2742 
   2743  int32_t maxInlineLength = encoding_ == CharEncoding::Latin1
   2744                                ? JSFatInlineString::MAX_LENGTH_LATIN1
   2745                                : JSFatInlineString::MAX_LENGTH_TWO_BYTE;
   2746  masm.branch32(Assembler::Above, temp1_, Imm32(maxInlineLength), &notInline);
   2747  {
   2748    // Make a thin or fat inline string.
   2749    Label stringAllocated, fatInline;
   2750 
   2751    int32_t maxThinInlineLength = encoding_ == CharEncoding::Latin1
   2752                                      ? JSThinInlineString::MAX_LENGTH_LATIN1
   2753                                      : JSThinInlineString::MAX_LENGTH_TWO_BYTE;
   2754    masm.branch32(Assembler::Above, temp1_, Imm32(maxThinInlineLength),
   2755                  &fatInline);
   2756    if (encoding_ == CharEncoding::Latin1) {
   2757      // One character Latin-1 strings can be loaded directly from the
   2758      // static strings table.
   2759      Label thinInline;
   2760      masm.branch32(Assembler::Above, temp1_, Imm32(1), &thinInline);
   2761      {
   2762        static_assert(
   2763            StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
   2764            "Latin-1 strings can be loaded from static strings");
   2765 
   2766        masm.loadStringChars(base, temp1_, encoding_);
   2767        masm.loadChar(temp1_, temp2_, temp1_, encoding_);
   2768 
   2769        masm.lookupStaticString(temp1_, string_, runtime->staticStrings());
   2770 
   2771        masm.jump(&done);
   2772      }
   2773      masm.bind(&thinInline);
   2774    }
   2775    {
   2776      newGCString(FallbackKind::InlineString);
   2777      masm.jump(&stringAllocated);
   2778    }
   2779    masm.bind(&fatInline);
   2780    {
   2781      newGCString(FallbackKind::FatInlineString);
   2782    }
   2783    masm.bind(&stringAllocated);
   2784 
   2785    masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
   2786 
   2787    masm.push(string_);
   2788    masm.push(base);
   2789 
   2790    MOZ_ASSERT(startIndexAddress.base == FramePointer,
   2791               "startIndexAddress is still valid after stack pushes");
   2792 
   2793    // Load chars pointer for the new string.
   2794    masm.loadInlineStringCharsForStore(string_, string_);
   2795 
   2796    // Load the source characters pointer.
   2797    masm.loadStringChars(base, temp2_, encoding_);
   2798    masm.load32(startIndexAddress, base);
   2799    masm.addToCharPtr(temp2_, base, encoding_);
   2800 
   2801    CopyStringChars(masm, string_, temp2_, temp1_, base, encoding_);
   2802 
   2803    masm.pop(base);
   2804    masm.pop(string_);
   2805 
   2806    masm.jump(&done);
   2807  }
   2808 
   2809  masm.bind(&notInline);
   2810 
   2811  {
   2812    // Make a dependent string.
   2813    // Warning: string may be tenured (if the fallback case is hit), so
   2814    // stores into it must be post barriered.
   2815    newGCString(FallbackKind::NotInlineString);
   2816 
   2817    masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
   2818 
   2819    masm.loadNonInlineStringChars(base, temp1_, encoding_);
   2820    masm.load32(startIndexAddress, temp2_);
   2821    masm.addToCharPtr(temp1_, temp2_, encoding_);
   2822    masm.storeNonInlineStringChars(temp1_, string_);
   2823 
   2824    EmitInitDependentStringBase(masm, string_, base, temp1_, temp2_,
   2825                                /* needsPostBarrier = */ true);
   2826  }
   2827 
   2828  masm.bind(&done);
   2829 }
   2830 
   2831 void CreateDependentString::generateFallback(MacroAssembler& masm) {
   2832  JitSpew(JitSpew_Codegen,
   2833          "# Emitting CreateDependentString fallback (encoding=%s)",
   2834          (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
   2835 
   2836  LiveRegisterSet regsToSave(RegisterSet::Volatile());
   2837  regsToSave.takeUnchecked(string_);
   2838  regsToSave.takeUnchecked(temp2_);
   2839 
   2840  for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) {
   2841    masm.bind(&fallbacks_[kind]);
   2842 
   2843    masm.PushRegsInMask(regsToSave);
   2844 
   2845    using Fn = void* (*)(JSContext * cx);
   2846    masm.setupUnalignedABICall(string_);
   2847    masm.loadJSContext(string_);
   2848    masm.passABIArg(string_);
   2849    if (kind == FallbackKind::FatInlineString) {
   2850      masm.callWithABI<Fn, AllocateFatInlineString>();
   2851    } else {
   2852      masm.callWithABI<Fn, AllocateDependentString>();
   2853    }
   2854    masm.storeCallPointerResult(string_);
   2855 
   2856    masm.PopRegsInMask(regsToSave);
   2857 
   2858    masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_);
   2859 
   2860    masm.jump(&joins_[kind]);
   2861  }
   2862 }
   2863 
   2864 // Generate the RegExpMatcher and RegExpExecMatch stubs. These are very similar,
   2865 // but RegExpExecMatch also has to load and update .lastIndex for global/sticky
   2866 // regular expressions.
   2867 static JitCode* GenerateRegExpMatchStubShared(JSContext* cx,
   2868                                              gc::Heap initialStringHeap,
   2869                                              JitZone::StubKind kind) {
   2870  bool isExecMatch = kind == JitZone::StubKind::RegExpExecMatch;
   2871  MOZ_ASSERT_IF(!isExecMatch, kind == JitZone::StubKind::RegExpMatcher);
   2872 
   2873  if (isExecMatch) {
   2874    JitSpew(JitSpew_Codegen, "# Emitting RegExpExecMatch stub");
   2875  } else {
   2876    JitSpew(JitSpew_Codegen, "# Emitting RegExpMatcher stub");
   2877  }
   2878 
   2879  // |initialStringHeap| could be stale after a GC.
   2880  JS::AutoCheckCannotGC nogc(cx);
   2881 
   2882  Register regexp = RegExpMatcherRegExpReg;
   2883  Register input = RegExpMatcherStringReg;
   2884  Register lastIndex = RegExpMatcherLastIndexReg;
   2885  ValueOperand result = JSReturnOperand;
   2886 
   2887  // We are free to clobber all registers, as LRegExpMatcher is a call
   2888  // instruction.
   2889  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
   2890  regs.take(input);
   2891  regs.take(regexp);
   2892  regs.take(lastIndex);
   2893 
   2894  Register temp1 = regs.takeAny();
   2895  Register temp2 = regs.takeAny();
   2896  Register temp3 = regs.takeAny();
   2897  Register maybeTemp4 = InvalidReg;
   2898  if (!regs.empty()) {
   2899    // There are not enough registers on x86.
   2900    maybeTemp4 = regs.takeAny();
   2901  }
   2902  Register maybeTemp5 = InvalidReg;
   2903  if (!regs.empty()) {
   2904    // There are not enough registers on x86.
   2905    maybeTemp5 = regs.takeAny();
   2906  }
   2907 
   2908  Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
   2909  Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
   2910 
   2911  TempAllocator temp(&cx->tempLifoAlloc());
   2912  JitContext jcx(cx);
   2913  StackMacroAssembler masm(cx, temp);
   2914  AutoCreatedBy acb(masm, "GenerateRegExpMatchStubShared");
   2915 
   2916 #ifdef JS_USE_LINK_REGISTER
   2917  masm.pushReturnAddress();
   2918 #endif
   2919  masm.push(FramePointer);
   2920  masm.moveStackPtrTo(FramePointer);
   2921 
   2922  Label notFoundZeroLastIndex;
   2923  if (isExecMatch) {
   2924    masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
   2925  }
   2926 
   2927  Label notFound, oolEntry;
   2928  if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
   2929                               temp3, initialStringHeap, &notFound, &oolEntry,
   2930                               kind)) {
   2931    return nullptr;
   2932  }
   2933 
   2934  // If a regexp has named captures, fall back to the OOL stub, which
   2935  // will end up calling CreateRegExpMatchResults.
   2936  Register shared = temp2;
   2937  masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
   2938                                          RegExpObject::SHARED_SLOT)),
   2939                      shared, JSVAL_TYPE_PRIVATE_GCTHING);
   2940  masm.branchPtr(Assembler::NotEqual,
   2941                 Address(shared, RegExpShared::offsetOfGroupsTemplate()),
   2942                 ImmWord(0), &oolEntry);
   2943 
   2944  // Similarly, if the |hasIndices| flag is set, fall back to the OOL stub.
   2945  masm.branchTest32(Assembler::NonZero,
   2946                    Address(shared, RegExpShared::offsetOfFlags()),
   2947                    Imm32(int32_t(JS::RegExpFlag::HasIndices)), &oolEntry);
   2948 
   2949  Address pairCountAddress = RegExpPairCountAddress();
   2950 
   2951  // Construct the result.
   2952  Register object = temp1;
   2953  {
   2954    // In most cases, the array will have just 1-2 elements, so we optimize for
   2955    // that by emitting separate code paths for capacity 2/6/14 (= 4/8/16 slots
   2956    // because two slots are used for the elements header).
   2957 
   2958    // Load the array length in temp2 and the shape in temp3.
   2959    Label allocated;
   2960    masm.load32(pairCountAddress, temp2);
   2961    size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
   2962                    RegExpRealm::offsetOfNormalMatchResultShape();
   2963    masm.loadGlobalObjectData(temp3);
   2964    masm.loadPtr(Address(temp3, offset), temp3);
   2965 
   2966    auto emitAllocObject = [&](size_t elementCapacity) {
   2967      gc::AllocKind kind = GuessArrayGCKind(elementCapacity);
   2968      MOZ_ASSERT(gc::GetObjectFinalizeKind(&ArrayObject::class_) ==
   2969                 gc::FinalizeKind::None);
   2970      MOZ_ASSERT(!IsFinalizedKind(kind));
   2971 
   2972 #ifdef DEBUG
   2973      // Assert all of the available slots are used for |elementCapacity|
   2974      // elements.
   2975      size_t usedSlots = ObjectElements::VALUES_PER_HEADER + elementCapacity;
   2976      MOZ_ASSERT(usedSlots == GetGCKindSlots(kind));
   2977 #endif
   2978 
   2979      constexpr size_t numUsedDynamicSlots =
   2980          RegExpRealm::MatchResultObjectSlotSpan;
   2981      constexpr size_t numDynamicSlots =
   2982          RegExpRealm::MatchResultObjectNumDynamicSlots;
   2983      constexpr size_t arrayLength = 1;
   2984      masm.createArrayWithFixedElements(object, temp3, temp2, temp3,
   2985                                        arrayLength, elementCapacity,
   2986                                        numUsedDynamicSlots, numDynamicSlots,
   2987                                        kind, gc::Heap::Default, &oolEntry);
   2988    };
   2989 
   2990    Label moreThan2;
   2991    masm.branch32(Assembler::Above, temp2, Imm32(2), &moreThan2);
   2992    emitAllocObject(2);
   2993    masm.jump(&allocated);
   2994 
   2995    Label moreThan6;
   2996    masm.bind(&moreThan2);
   2997    masm.branch32(Assembler::Above, temp2, Imm32(6), &moreThan6);
   2998    emitAllocObject(6);
   2999    masm.jump(&allocated);
   3000 
   3001    masm.bind(&moreThan6);
   3002    static_assert(RegExpObject::MaxPairCount == 14);
   3003    emitAllocObject(RegExpObject::MaxPairCount);
   3004 
   3005    masm.bind(&allocated);
   3006  }
   3007 
   3008  static_assert(sizeof(MatchPair) == 2 * sizeof(int32_t),
   3009                "MatchPair consists of two int32 values representing the start"
   3010                "and the end offset of the match");
   3011 
   3012  int32_t pairsVectorStartOffset = RegExpPairsVectorStartOffset;
   3013 
   3014  // Incremented by one below for each match pair.
   3015  Register matchIndex = temp2;
   3016  masm.move32(Imm32(0), matchIndex);
   3017 
   3018  // The element in which to store the result of the current match.
   3019  size_t elementsOffset = NativeObject::offsetOfFixedElements();
   3020  BaseObjectElementIndex objectMatchElement(object, matchIndex, elementsOffset);
   3021 
   3022  // The current match pair's "start" and "limit" member.
   3023  BaseIndex matchPairStart(FramePointer, matchIndex, TimesEight,
   3024                           pairsVectorStartOffset + MatchPair::offsetOfStart());
   3025  BaseIndex matchPairLimit(FramePointer, matchIndex, TimesEight,
   3026                           pairsVectorStartOffset + MatchPair::offsetOfLimit());
   3027 
   3028  Label* depStrFailure = &oolEntry;
   3029  Label restoreRegExpAndLastIndex;
   3030 
   3031  Register temp4;
   3032  if (maybeTemp4 == InvalidReg) {
   3033    depStrFailure = &restoreRegExpAndLastIndex;
   3034 
   3035    // We don't have enough registers for a fourth temporary. Reuse |regexp|
   3036    // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
   3037    masm.push(regexp);
   3038    temp4 = regexp;
   3039  } else {
   3040    temp4 = maybeTemp4;
   3041  }
   3042 
   3043  Register temp5;
   3044  if (maybeTemp5 == InvalidReg) {
   3045    depStrFailure = &restoreRegExpAndLastIndex;
   3046 
   3047    // We don't have enough registers for a fifth temporary. Reuse |lastIndex|
   3048    // as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
   3049    masm.push(lastIndex);
   3050    temp5 = lastIndex;
   3051  } else {
   3052    temp5 = maybeTemp5;
   3053  }
   3054 
   3055  auto maybeRestoreRegExpAndLastIndex = [&]() {
   3056    if (maybeTemp5 == InvalidReg) {
   3057      masm.pop(lastIndex);
   3058    }
   3059    if (maybeTemp4 == InvalidReg) {
   3060      masm.pop(regexp);
   3061    }
   3062  };
   3063 
   3064  // Loop to construct the match strings. There are two different loops,
   3065  // depending on whether the input is a Two-Byte or a Latin-1 string.
   3066  CreateDependentString depStrs[]{
   3067      {CharEncoding::TwoByte, temp3, temp4, temp5, depStrFailure},
   3068      {CharEncoding::Latin1, temp3, temp4, temp5, depStrFailure},
   3069  };
   3070 
   3071  {
   3072    Label isLatin1, done;
   3073    masm.branchLatin1String(input, &isLatin1);
   3074 
   3075    for (auto& depStr : depStrs) {
   3076      if (depStr.encoding() == CharEncoding::Latin1) {
   3077        masm.bind(&isLatin1);
   3078      }
   3079 
   3080      Label matchLoop;
   3081      masm.bind(&matchLoop);
   3082 
   3083      static_assert(MatchPair::NoMatch == -1,
   3084                    "MatchPair::start is negative if no match was found");
   3085 
   3086      Label isUndefined, storeDone;
   3087      masm.branch32(Assembler::LessThan, matchPairStart, Imm32(0),
   3088                    &isUndefined);
   3089      {
   3090        depStr.generate(masm, cx->names(), CompileRuntime::get(cx->runtime()),
   3091                        input, matchPairStart, matchPairLimit,
   3092                        initialStringHeap);
   3093 
   3094        // Storing into nursery-allocated results object's elements; no post
   3095        // barrier.
   3096        masm.storeValue(JSVAL_TYPE_STRING, depStr.string(), objectMatchElement);
   3097        masm.jump(&storeDone);
   3098      }
   3099      masm.bind(&isUndefined);
   3100      {
   3101        masm.storeValue(UndefinedValue(), objectMatchElement);
   3102      }
   3103      masm.bind(&storeDone);
   3104 
   3105      masm.add32(Imm32(1), matchIndex);
   3106      masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex,
   3107                    &done);
   3108      masm.jump(&matchLoop);
   3109    }
   3110 
   3111 #ifdef DEBUG
   3112    masm.assumeUnreachable("The match string loop doesn't fall through.");
   3113 #endif
   3114 
   3115    masm.bind(&done);
   3116  }
   3117 
   3118  maybeRestoreRegExpAndLastIndex();
   3119 
   3120  // Fill in the rest of the output object.
   3121  masm.store32(
   3122      matchIndex,
   3123      Address(object,
   3124              elementsOffset + ObjectElements::offsetOfInitializedLength()));
   3125  masm.store32(
   3126      matchIndex,
   3127      Address(object, elementsOffset + ObjectElements::offsetOfLength()));
   3128 
   3129  Address firstMatchPairStartAddress(
   3130      FramePointer, pairsVectorStartOffset + MatchPair::offsetOfStart());
   3131  Address firstMatchPairLimitAddress(
   3132      FramePointer, pairsVectorStartOffset + MatchPair::offsetOfLimit());
   3133 
   3134  static_assert(RegExpRealm::MatchResultObjectIndexSlot == 0,
   3135                "First slot holds the 'index' property");
   3136  static_assert(RegExpRealm::MatchResultObjectInputSlot == 1,
   3137                "Second slot holds the 'input' property");
   3138 
   3139  masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
   3140 
   3141  masm.load32(firstMatchPairStartAddress, temp3);
   3142  masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0));
   3143 
   3144  // No post barrier needed (address is within nursery object.)
   3145  masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value)));
   3146 
   3147  // For the ExecMatch stub, if the regular expression is global or sticky, we
   3148  // have to update its .lastIndex slot.
   3149  if (isExecMatch) {
   3150    MOZ_ASSERT(object != lastIndex);
   3151    Label notGlobalOrSticky;
   3152    masm.branchTest32(Assembler::Zero, flagsSlot,
   3153                      Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
   3154                      &notGlobalOrSticky);
   3155    masm.load32(firstMatchPairLimitAddress, lastIndex);
   3156    masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
   3157    masm.bind(&notGlobalOrSticky);
   3158  }
   3159 
   3160  // All done!
   3161  masm.tagValue(JSVAL_TYPE_OBJECT, object, result);
   3162  masm.pop(FramePointer);
   3163  masm.ret();
   3164 
   3165  masm.bind(&notFound);
   3166  if (isExecMatch) {
   3167    Label notGlobalOrSticky;
   3168    masm.branchTest32(Assembler::Zero, flagsSlot,
   3169                      Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
   3170                      &notGlobalOrSticky);
   3171    masm.bind(&notFoundZeroLastIndex);
   3172    masm.storeValue(Int32Value(0), lastIndexSlot);
   3173    masm.bind(&notGlobalOrSticky);
   3174  }
   3175  masm.moveValue(NullValue(), result);
   3176  masm.pop(FramePointer);
   3177  masm.ret();
   3178 
   3179  // Fallback paths for CreateDependentString.
   3180  for (auto& depStr : depStrs) {
   3181    depStr.generateFallback(masm);
   3182  }
   3183 
   3184  // Fall-through to the ool entry after restoring the registers.
   3185  masm.bind(&restoreRegExpAndLastIndex);
   3186  maybeRestoreRegExpAndLastIndex();
   3187 
   3188  // Use an undefined value to signal to the caller that the OOL stub needs to
   3189  // be called.
   3190  masm.bind(&oolEntry);
   3191  masm.moveValue(UndefinedValue(), result);
   3192  masm.pop(FramePointer);
   3193  masm.ret();
   3194 
   3195  Linker linker(masm);
   3196  JitCode* code = linker.newCode(cx, CodeKind::Other);
   3197  if (!code) {
   3198    return nullptr;
   3199  }
   3200 
   3201  const char* name = isExecMatch ? "RegExpExecMatchStub" : "RegExpMatcherStub";
   3202  CollectPerfSpewerJitCodeProfile(code, name);
   3203 #ifdef MOZ_VTUNE
   3204  vtune::MarkStub(code, name);
   3205 #endif
   3206 
   3207  return code;
   3208 }
   3209 
   3210 JitCode* JitZone::generateRegExpMatcherStub(JSContext* cx) {
   3211  return GenerateRegExpMatchStubShared(cx, initialStringHeap,
   3212                                       JitZone::StubKind::RegExpMatcher);
   3213 }
   3214 
   3215 JitCode* JitZone::generateRegExpExecMatchStub(JSContext* cx) {
   3216  return GenerateRegExpMatchStubShared(cx, initialStringHeap,
   3217                                       JitZone::StubKind::RegExpExecMatch);
   3218 }
   3219 
   3220 void CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir) {
   3221  MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
   3222  MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
   3223  MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg);
   3224  MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
   3225 
   3226 #if defined(JS_NUNBOX32)
   3227  static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
   3228  static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
   3229  static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
   3230  static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
   3231  static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Type);
   3232  static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Data);
   3233 #elif defined(JS_PUNBOX64)
   3234  static_assert(RegExpMatcherRegExpReg != JSReturnReg);
   3235  static_assert(RegExpMatcherStringReg != JSReturnReg);
   3236  static_assert(RegExpMatcherLastIndexReg != JSReturnReg);
   3237 #endif
   3238 
   3239  masm.reserveStack(RegExpReservedStack);
   3240 
   3241  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
   3242    Register lastIndex = ToRegister(lir->lastIndex());
   3243    Register input = ToRegister(lir->string());
   3244    Register regexp = ToRegister(lir->regexp());
   3245 
   3246    AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
   3247    regs.take(lastIndex);
   3248    regs.take(input);
   3249    regs.take(regexp);
   3250    Register temp = regs.takeAny();
   3251 
   3252    masm.computeEffectiveAddress(
   3253        Address(masm.getStackPointer(), InputOutputDataSize), temp);
   3254 
   3255    pushArg(temp);
   3256    pushArg(lastIndex);
   3257    pushArg(input);
   3258    pushArg(regexp);
   3259 
   3260    // We are not using oolCallVM because we are in a Call, and that live
   3261    // registers are already saved by the the register allocator.
   3262    using Fn = bool (*)(JSContext*, HandleObject regexp, HandleString input,
   3263                        int32_t lastIndex, MatchPairs* pairs,
   3264                        MutableHandleValue output);
   3265    callVM<Fn, RegExpMatcherRaw>(lir);
   3266 
   3267    masm.jump(ool.rejoin());
   3268  });
   3269  addOutOfLineCode(ool, lir->mir());
   3270 
   3271  JitCode* regExpMatcherStub =
   3272      snapshot_->getZoneStub(JitZone::StubKind::RegExpMatcher);
   3273  masm.call(regExpMatcherStub);
   3274  masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
   3275  masm.bind(ool->rejoin());
   3276 
   3277  masm.freeStack(RegExpReservedStack);
   3278 }
   3279 
   3280 void CodeGenerator::visitRegExpExecMatch(LRegExpExecMatch* lir) {
   3281  MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
   3282  MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
   3283  MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
   3284 
   3285 #if defined(JS_NUNBOX32)
   3286  static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
   3287  static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
   3288  static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
   3289  static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
   3290 #elif defined(JS_PUNBOX64)
   3291  static_assert(RegExpMatcherRegExpReg != JSReturnReg);
   3292  static_assert(RegExpMatcherStringReg != JSReturnReg);
   3293 #endif
   3294 
   3295  masm.reserveStack(RegExpReservedStack);
   3296 
   3297  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
   3298    Register input = ToRegister(lir->string());
   3299    Register regexp = ToRegister(lir->regexp());
   3300 
   3301    AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
   3302    regs.take(input);
   3303    regs.take(regexp);
   3304    Register temp = regs.takeAny();
   3305 
   3306    masm.computeEffectiveAddress(
   3307        Address(masm.getStackPointer(), InputOutputDataSize), temp);
   3308 
   3309    pushArg(temp);
   3310    pushArg(input);
   3311    pushArg(regexp);
   3312 
   3313    // We are not using oolCallVM because we are in a Call and live registers
   3314    // have already been saved by the register allocator.
   3315    using Fn =
   3316        bool (*)(JSContext*, Handle<RegExpObject*> regexp, HandleString input,
   3317                 MatchPairs* pairs, MutableHandleValue output);
   3318    callVM<Fn, RegExpBuiltinExecMatchFromJit>(lir);
   3319    masm.jump(ool.rejoin());
   3320  });
   3321  addOutOfLineCode(ool, lir->mir());
   3322 
   3323  JitCode* regExpExecMatchStub =
   3324      snapshot_->getZoneStub(JitZone::StubKind::RegExpExecMatch);
   3325  masm.call(regExpExecMatchStub);
   3326  masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
   3327 
   3328  masm.bind(ool->rejoin());
   3329  masm.freeStack(RegExpReservedStack);
   3330 }
   3331 
   3332 JitCode* JitZone::generateRegExpSearcherStub(JSContext* cx) {
   3333  JitSpew(JitSpew_Codegen, "# Emitting RegExpSearcher stub");
   3334 
   3335  Register regexp = RegExpSearcherRegExpReg;
   3336  Register input = RegExpSearcherStringReg;
   3337  Register lastIndex = RegExpSearcherLastIndexReg;
   3338  Register result = ReturnReg;
   3339 
   3340  // We are free to clobber all registers, as LRegExpSearcher is a call
   3341  // instruction.
   3342  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
   3343  regs.take(input);
   3344  regs.take(regexp);
   3345  regs.take(lastIndex);
   3346 
   3347  Register temp1 = regs.takeAny();
   3348  Register temp2 = regs.takeAny();
   3349  Register temp3 = regs.takeAny();
   3350 
   3351  TempAllocator temp(&cx->tempLifoAlloc());
   3352  JitContext jcx(cx);
   3353  StackMacroAssembler masm(cx, temp);
   3354  AutoCreatedBy acb(masm, "JitZone::generateRegExpSearcherStub");
   3355 
   3356 #ifdef JS_USE_LINK_REGISTER
   3357  masm.pushReturnAddress();
   3358 #endif
   3359  masm.push(FramePointer);
   3360  masm.moveStackPtrTo(FramePointer);
   3361 
   3362 #ifdef DEBUG
   3363  // Store sentinel value to cx->regExpSearcherLastLimit.
   3364  // See comment in RegExpSearcherImpl.
   3365  masm.loadJSContext(temp1);
   3366  masm.store32(Imm32(RegExpSearcherLastLimitSentinel),
   3367               Address(temp1, JSContext::offsetOfRegExpSearcherLastLimit()));
   3368 #endif
   3369 
   3370  Label notFound, oolEntry;
   3371  if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
   3372                               temp3, initialStringHeap, &notFound, &oolEntry,
   3373                               JitZone::StubKind::RegExpSearcher)) {
   3374    return nullptr;
   3375  }
   3376 
   3377  int32_t pairsVectorStartOffset = RegExpPairsVectorStartOffset;
   3378  Address matchPairStart(FramePointer,
   3379                         pairsVectorStartOffset + MatchPair::offsetOfStart());
   3380  Address matchPairLimit(FramePointer,
   3381                         pairsVectorStartOffset + MatchPair::offsetOfLimit());
   3382 
   3383  // Store match limit to cx->regExpSearcherLastLimit and return the index.
   3384  masm.load32(matchPairLimit, result);
   3385  masm.loadJSContext(input);
   3386  masm.store32(result,
   3387               Address(input, JSContext::offsetOfRegExpSearcherLastLimit()));
   3388  masm.load32(matchPairStart, result);
   3389  masm.pop(FramePointer);
   3390  masm.ret();
   3391 
   3392  masm.bind(&notFound);
   3393  masm.move32(Imm32(RegExpSearcherResultNotFound), result);
   3394  masm.pop(FramePointer);
   3395  masm.ret();
   3396 
   3397  masm.bind(&oolEntry);
   3398  masm.move32(Imm32(RegExpSearcherResultFailed), result);
   3399  masm.pop(FramePointer);
   3400  masm.ret();
   3401 
   3402  Linker linker(masm);
   3403  JitCode* code = linker.newCode(cx, CodeKind::Other);
   3404  if (!code) {
   3405    return nullptr;
   3406  }
   3407 
   3408  CollectPerfSpewerJitCodeProfile(code, "RegExpSearcherStub");
   3409 #ifdef MOZ_VTUNE
   3410  vtune::MarkStub(code, "RegExpSearcherStub");
   3411 #endif
   3412 
   3413  return code;
   3414 }
   3415 
   3416 void CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir) {
   3417  MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpSearcherRegExpReg);
   3418  MOZ_ASSERT(ToRegister(lir->string()) == RegExpSearcherStringReg);
   3419  MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpSearcherLastIndexReg);
   3420  MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
   3421 
   3422  static_assert(RegExpSearcherRegExpReg != ReturnReg);
   3423  static_assert(RegExpSearcherStringReg != ReturnReg);
   3424  static_assert(RegExpSearcherLastIndexReg != ReturnReg);
   3425 
   3426  masm.reserveStack(RegExpReservedStack);
   3427 
   3428  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
   3429    Register lastIndex = ToRegister(lir->lastIndex());
   3430    Register input = ToRegister(lir->string());
   3431    Register regexp = ToRegister(lir->regexp());
   3432 
   3433    AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
   3434    regs.take(lastIndex);
   3435    regs.take(input);
   3436    regs.take(regexp);
   3437    Register temp = regs.takeAny();
   3438 
   3439    masm.computeEffectiveAddress(
   3440        Address(masm.getStackPointer(), InputOutputDataSize), temp);
   3441 
   3442    pushArg(temp);
   3443    pushArg(lastIndex);
   3444    pushArg(input);
   3445    pushArg(regexp);
   3446 
   3447    // We are not using oolCallVM because we are in a Call, and that live
   3448    // registers are already saved by the the register allocator.
   3449    using Fn = bool (*)(JSContext* cx, HandleObject regexp, HandleString input,
   3450                        int32_t lastIndex, MatchPairs* pairs, int32_t* result);
   3451    callVM<Fn, RegExpSearcherRaw>(lir);
   3452 
   3453    masm.jump(ool.rejoin());
   3454  });
   3455  addOutOfLineCode(ool, lir->mir());
   3456 
   3457  JitCode* regExpSearcherStub =
   3458      snapshot_->getZoneStub(JitZone::StubKind::RegExpSearcher);
   3459  masm.call(regExpSearcherStub);
   3460  masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed),
   3461                ool->entry());
   3462  masm.bind(ool->rejoin());
   3463 
   3464  masm.freeStack(RegExpReservedStack);
   3465 }
   3466 
   3467 void CodeGenerator::visitRegExpSearcherLastLimit(
   3468    LRegExpSearcherLastLimit* lir) {
   3469  Register result = ToRegister(lir->output());
   3470  Register scratch = ToRegister(lir->temp0());
   3471 
   3472  masm.loadAndClearRegExpSearcherLastLimit(result, scratch);
   3473 }
   3474 
   3475 JitCode* JitZone::generateRegExpExecTestStub(JSContext* cx) {
   3476  JitSpew(JitSpew_Codegen, "# Emitting RegExpExecTest stub");
   3477 
   3478  Register regexp = RegExpExecTestRegExpReg;
   3479  Register input = RegExpExecTestStringReg;
   3480  Register result = ReturnReg;
   3481 
   3482  TempAllocator temp(&cx->tempLifoAlloc());
   3483  JitContext jcx(cx);
   3484  StackMacroAssembler masm(cx, temp);
   3485  AutoCreatedBy acb(masm, "JitZone::generateRegExpExecTestStub");
   3486 
   3487 #ifdef JS_USE_LINK_REGISTER
   3488  masm.pushReturnAddress();
   3489 #endif
   3490  masm.push(FramePointer);
   3491  masm.moveStackPtrTo(FramePointer);
   3492 
   3493  // We are free to clobber all registers, as LRegExpExecTest is a call
   3494  // instruction.
   3495  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
   3496  regs.take(input);
   3497  regs.take(regexp);
   3498 
   3499  // Ensure lastIndex != result.
   3500  regs.take(result);
   3501  Register lastIndex = regs.takeAny();
   3502  regs.add(result);
   3503  Register temp1 = regs.takeAny();
   3504  Register temp2 = regs.takeAny();
   3505  Register temp3 = regs.takeAny();
   3506 
   3507  Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
   3508  Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
   3509 
   3510  // Load lastIndex and skip RegExp execution if needed.
   3511  Label notFoundZeroLastIndex;
   3512  masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
   3513 
   3514  Label notFound, oolEntry;
   3515  if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
   3516                               temp3, initialStringHeap, &notFound, &oolEntry,
   3517                               JitZone::StubKind::RegExpExecTest)) {
   3518    return nullptr;
   3519  }
   3520 
   3521  // Set `result` to true/false to indicate found/not-found, or to
   3522  // RegExpExecTestResultFailed if we have to retry in C++. If the regular
   3523  // expression is global or sticky, we also have to update its .lastIndex slot.
   3524 
   3525  Label done;
   3526  int32_t pairsVectorStartOffset = RegExpPairsVectorStartOffset;
   3527  Address matchPairLimit(FramePointer,
   3528                         pairsVectorStartOffset + MatchPair::offsetOfLimit());
   3529 
   3530  masm.move32(Imm32(1), result);
   3531  masm.branchTest32(Assembler::Zero, flagsSlot,
   3532                    Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
   3533                    &done);
   3534  masm.load32(matchPairLimit, lastIndex);
   3535  masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
   3536  masm.jump(&done);
   3537 
   3538  masm.bind(&notFound);
   3539  masm.move32(Imm32(0), result);
   3540  masm.branchTest32(Assembler::Zero, flagsSlot,
   3541                    Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
   3542                    &done);
   3543  masm.storeValue(Int32Value(0), lastIndexSlot);
   3544  masm.jump(&done);
   3545 
   3546  masm.bind(&notFoundZeroLastIndex);
   3547  masm.move32(Imm32(0), result);
   3548  masm.storeValue(Int32Value(0), lastIndexSlot);
   3549  masm.jump(&done);
   3550 
   3551  masm.bind(&oolEntry);
   3552  masm.move32(Imm32(RegExpExecTestResultFailed), result);
   3553 
   3554  masm.bind(&done);
   3555  masm.pop(FramePointer);
   3556  masm.ret();
   3557 
   3558  Linker linker(masm);
   3559  JitCode* code = linker.newCode(cx, CodeKind::Other);
   3560  if (!code) {
   3561    return nullptr;
   3562  }
   3563 
   3564  CollectPerfSpewerJitCodeProfile(code, "RegExpExecTestStub");
   3565 #ifdef MOZ_VTUNE
   3566  vtune::MarkStub(code, "RegExpExecTestStub");
   3567 #endif
   3568 
   3569  return code;
   3570 }
   3571 
   3572 void CodeGenerator::visitRegExpExecTest(LRegExpExecTest* lir) {
   3573  MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpExecTestRegExpReg);
   3574  MOZ_ASSERT(ToRegister(lir->string()) == RegExpExecTestStringReg);
   3575  MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
   3576 
   3577  static_assert(RegExpExecTestRegExpReg != ReturnReg);
   3578  static_assert(RegExpExecTestStringReg != ReturnReg);
   3579 
   3580  masm.reserveStack(RegExpReservedStack);
   3581 
   3582  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
   3583    Register input = ToRegister(lir->string());
   3584    Register regexp = ToRegister(lir->regexp());
   3585 
   3586    pushArg(input);
   3587    pushArg(regexp);
   3588 
   3589    // We are not using oolCallVM because we are in a Call and live registers
   3590    // have already been saved by the register allocator.
   3591    using Fn = bool (*)(JSContext* cx, Handle<RegExpObject*> regexp,
   3592                        HandleString input, bool* result);
   3593    callVM<Fn, RegExpBuiltinExecTestFromJit>(lir);
   3594 
   3595    masm.jump(ool.rejoin());
   3596  });
   3597  addOutOfLineCode(ool, lir->mir());
   3598 
   3599  JitCode* regExpExecTestStub =
   3600      snapshot_->getZoneStub(JitZone::StubKind::RegExpExecTest);
   3601  masm.call(regExpExecTestStub);
   3602 
   3603  masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpExecTestResultFailed),
   3604                ool->entry());
   3605 
   3606  masm.bind(ool->rejoin());
   3607 
   3608  masm.freeStack(RegExpReservedStack);
   3609 }
   3610 
   3611 void CodeGenerator::visitRegExpHasCaptureGroups(LRegExpHasCaptureGroups* ins) {
   3612  Register regexp = ToRegister(ins->regexp());
   3613  Register input = ToRegister(ins->input());
   3614  Register output = ToRegister(ins->output());
   3615 
   3616  using Fn =
   3617      bool (*)(JSContext*, Handle<RegExpObject*>, Handle<JSString*>, bool*);
   3618  auto* ool = oolCallVM<Fn, js::RegExpHasCaptureGroups>(
   3619      ins, ArgList(regexp, input), StoreRegisterTo(output));
   3620 
   3621  // Load RegExpShared in |output|.
   3622  Label vmCall;
   3623  masm.loadParsedRegExpShared(regexp, output, ool->entry());
   3624 
   3625  // Return true iff pairCount > 1.
   3626  Label returnTrue;
   3627  masm.branch32(Assembler::Above,
   3628                Address(output, RegExpShared::offsetOfPairCount()), Imm32(1),
   3629                &returnTrue);
   3630  masm.move32(Imm32(0), output);
   3631  masm.jump(ool->rejoin());
   3632 
   3633  masm.bind(&returnTrue);
   3634  masm.move32(Imm32(1), output);
   3635 
   3636  masm.bind(ool->rejoin());
   3637 }
   3638 
   3639 static void FindFirstDollarIndex(MacroAssembler& masm, Register str,
   3640                                 Register len, Register temp0, Register temp1,
   3641                                 Register output, CharEncoding encoding) {
   3642 #ifdef DEBUG
   3643  Label ok;
   3644  masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
   3645  masm.assumeUnreachable("Length should be greater than 0.");
   3646  masm.bind(&ok);
   3647 #endif
   3648 
   3649  Register chars = temp0;
   3650  masm.loadStringChars(str, chars, encoding);
   3651 
   3652  masm.move32(Imm32(0), output);
   3653 
   3654  Label start, done;
   3655  masm.bind(&start);
   3656 
   3657  Register currentChar = temp1;
   3658  masm.loadChar(chars, output, currentChar, encoding);
   3659  masm.branch32(Assembler::Equal, currentChar, Imm32('$'), &done);
   3660 
   3661  masm.add32(Imm32(1), output);
   3662  masm.branch32(Assembler::NotEqual, output, len, &start);
   3663 
   3664  masm.move32(Imm32(-1), output);
   3665 
   3666  masm.bind(&done);
   3667 }
   3668 
   3669 void CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins) {
   3670  Register str = ToRegister(ins->str());
   3671  Register output = ToRegister(ins->output());
   3672  Register temp0 = ToRegister(ins->temp0());
   3673  Register temp1 = ToRegister(ins->temp1());
   3674  Register len = ToRegister(ins->temp2());
   3675 
   3676  using Fn = bool (*)(JSContext*, JSString*, int32_t*);
   3677  OutOfLineCode* ool = oolCallVM<Fn, GetFirstDollarIndexRaw>(
   3678      ins, ArgList(str), StoreRegisterTo(output));
   3679 
   3680  masm.branchIfRope(str, ool->entry());
   3681  masm.loadStringLength(str, len);
   3682 
   3683  Label isLatin1, done;
   3684  masm.branchLatin1String(str, &isLatin1);
   3685  {
   3686    FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
   3687                         CharEncoding::TwoByte);
   3688    masm.jump(&done);
   3689  }
   3690  masm.bind(&isLatin1);
   3691  {
   3692    FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
   3693                         CharEncoding::Latin1);
   3694  }
   3695  masm.bind(&done);
   3696  masm.bind(ool->rejoin());
   3697 }
   3698 
   3699 void CodeGenerator::visitStringReplace(LStringReplace* lir) {
   3700  if (lir->replacement()->isConstant()) {
   3701    pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
   3702  } else {
   3703    pushArg(ToRegister(lir->replacement()));
   3704  }
   3705 
   3706  if (lir->pattern()->isConstant()) {
   3707    pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString()));
   3708  } else {
   3709    pushArg(ToRegister(lir->pattern()));
   3710  }
   3711 
   3712  if (lir->string()->isConstant()) {
   3713    pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
   3714  } else {
   3715    pushArg(ToRegister(lir->string()));
   3716  }
   3717 
   3718  using Fn =
   3719      JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
   3720  if (lir->mir()->isFlatReplacement()) {
   3721    callVM<Fn, StringFlatReplaceString>(lir);
   3722  } else {
   3723    callVM<Fn, StringReplace>(lir);
   3724  }
   3725 }
   3726 
   3727 void CodeGenerator::visitBinaryValueCache(LBinaryValueCache* lir) {
   3728  LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
   3729  TypedOrValueRegister lhs = TypedOrValueRegister(ToValue(lir->lhs()));
   3730  TypedOrValueRegister rhs = TypedOrValueRegister(ToValue(lir->rhs()));
   3731  ValueOperand output = ToOutValue(lir);
   3732 
   3733  JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
   3734 
   3735  switch (jsop) {
   3736    case JSOp::Add:
   3737    case JSOp::Sub:
   3738    case JSOp::Mul:
   3739    case JSOp::Div:
   3740    case JSOp::Mod:
   3741    case JSOp::Pow:
   3742    case JSOp::BitAnd:
   3743    case JSOp::BitOr:
   3744    case JSOp::BitXor:
   3745    case JSOp::Lsh:
   3746    case JSOp::Rsh:
   3747    case JSOp::Ursh: {
   3748      IonBinaryArithIC ic(liveRegs, lhs, rhs, output);
   3749      addIC(lir, allocateIC(ic));
   3750      return;
   3751    }
   3752    default:
   3753      MOZ_CRASH("Unsupported jsop in MBinaryValueCache");
   3754  }
   3755 }
   3756 
   3757 void CodeGenerator::visitBinaryBoolCache(LBinaryBoolCache* lir) {
   3758  LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
   3759  TypedOrValueRegister lhs = TypedOrValueRegister(ToValue(lir->lhs()));
   3760  TypedOrValueRegister rhs = TypedOrValueRegister(ToValue(lir->rhs()));
   3761  Register output = ToRegister(lir->output());
   3762 
   3763  JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
   3764 
   3765  switch (jsop) {
   3766    case JSOp::Lt:
   3767    case JSOp::Le:
   3768    case JSOp::Gt:
   3769    case JSOp::Ge:
   3770    case JSOp::Eq:
   3771    case JSOp::Ne:
   3772    case JSOp::StrictEq:
   3773    case JSOp::StrictNe: {
   3774      IonCompareIC ic(liveRegs, lhs, rhs, output);
   3775      addIC(lir, allocateIC(ic));
   3776      return;
   3777    }
   3778    default:
   3779      MOZ_CRASH("Unsupported jsop in MBinaryBoolCache");
   3780  }
   3781 }
   3782 
   3783 void CodeGenerator::visitUnaryCache(LUnaryCache* lir) {
   3784  LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
   3785  TypedOrValueRegister input = TypedOrValueRegister(ToValue(lir->input()));
   3786  ValueOperand output = ToOutValue(lir);
   3787 
   3788  IonUnaryArithIC ic(liveRegs, input, output);
   3789  addIC(lir, allocateIC(ic));
   3790 }
   3791 
   3792 void CodeGenerator::visitModuleMetadata(LModuleMetadata* lir) {
   3793  pushArg(ImmPtr(lir->mir()->module()));
   3794 
   3795  using Fn = JSObject* (*)(JSContext*, HandleObject);
   3796  callVM<Fn, js::GetOrCreateModuleMetaObject>(lir);
   3797 }
   3798 
   3799 void CodeGenerator::visitDynamicImport(LDynamicImport* lir) {
   3800  pushArg(ToValue(lir->options()));
   3801  pushArg(ToValue(lir->specifier()));
   3802  pushArg(ImmGCPtr(current->mir()->info().script()));
   3803 
   3804  using Fn = JSObject* (*)(JSContext*, HandleScript, HandleValue, HandleValue);
   3805  callVM<Fn, js::StartDynamicModuleImport>(lir);
   3806 }
   3807 
   3808 void CodeGenerator::visitLambda(LLambda* lir) {
   3809  Register envChain = ToRegister(lir->environmentChain());
   3810  Register output = ToRegister(lir->output());
   3811  Register tempReg = ToRegister(lir->temp0());
   3812  gc::Heap heap = lir->mir()->initialHeap();
   3813 
   3814  JSFunction* fun = lir->mir()->templateFunction();
   3815  MOZ_ASSERT(fun->isTenured());
   3816 
   3817  using Fn = JSObject* (*)(JSContext*, HandleFunction, HandleObject, gc::Heap);
   3818  OutOfLineCode* ool = oolCallVM<Fn, js::LambdaOptimizedFallback>(
   3819      lir, ArgList(ImmGCPtr(fun), envChain, Imm32(uint32_t(heap))),
   3820      StoreRegisterTo(output));
   3821 
   3822  TemplateObject templateObject(fun);
   3823  masm.createGCObject(output, tempReg, templateObject, heap, ool->entry(),
   3824                      /* initContents = */ true,
   3825                      AllocSiteInput(gc::CatchAllAllocSite::Optimized));
   3826 
   3827  masm.storeValue(JSVAL_TYPE_OBJECT, envChain,
   3828                  Address(output, JSFunction::offsetOfEnvironment()));
   3829 
   3830  // If we specified the tenured heap then we need a post barrier. Otherwise no
   3831  // post barrier needed as the output is guaranteed to be allocated in the
   3832  // nursery.
   3833  if (heap == gc::Heap::Tenured) {
   3834    Label skipBarrier;
   3835    masm.branchPtrInNurseryChunk(Assembler::NotEqual, envChain, tempReg,
   3836                                 &skipBarrier);
   3837    saveVolatile(tempReg);
   3838    emitPostWriteBarrier(output);
   3839    restoreVolatile(tempReg);
   3840    masm.bind(&skipBarrier);
   3841  }
   3842 
   3843  masm.bind(ool->rejoin());
   3844 }
   3845 
   3846 void CodeGenerator::visitFunctionWithProto(LFunctionWithProto* lir) {
   3847  Register envChain = ToRegister(lir->envChain());
   3848  Register prototype = ToRegister(lir->prototype());
   3849 
   3850  pushArg(prototype);
   3851  pushArg(envChain);
   3852  pushArg(ImmGCPtr(lir->mir()->function()));
   3853 
   3854  using Fn =
   3855      JSObject* (*)(JSContext*, HandleFunction, HandleObject, HandleObject);
   3856  callVM<Fn, js::FunWithProtoOperation>(lir);
   3857 }
   3858 
   3859 void CodeGenerator::visitSetFunName(LSetFunName* lir) {
   3860  pushArg(Imm32(lir->mir()->prefixKind()));
   3861  pushArg(ToValue(lir->name()));
   3862  pushArg(ToRegister(lir->fun()));
   3863 
   3864  using Fn =
   3865      bool (*)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind);
   3866  callVM<Fn, js::SetFunctionName>(lir);
   3867 }
   3868 
   3869 void CodeGenerator::visitOsiPoint(LOsiPoint* lir) {
   3870  // Note: markOsiPoint ensures enough space exists between the last
   3871  // LOsiPoint and this one to patch adjacent call instructions.
   3872 
   3873  MOZ_ASSERT(masm.framePushed() == frameSize());
   3874 
   3875  uint32_t osiCallPointOffset = markOsiPoint(lir);
   3876 
   3877  LSafepoint* safepoint = lir->associatedSafepoint();
   3878  MOZ_ASSERT(!safepoint->osiCallPointOffset());
   3879  safepoint->setOsiCallPointOffset(osiCallPointOffset);
   3880 
   3881 #ifdef DEBUG
   3882  // There should be no movegroups or other instructions between
   3883  // an instruction and its OsiPoint. This is necessary because
   3884  // we use the OsiPoint's snapshot from within VM calls.
   3885  for (LInstructionReverseIterator iter(current->rbegin(lir));
   3886       iter != current->rend(); iter++) {
   3887    if (*iter == lir) {
   3888      continue;
   3889    }
   3890    MOZ_ASSERT(!iter->isMoveGroup());
   3891    MOZ_ASSERT(iter->safepoint() == safepoint);
   3892    break;
   3893  }
   3894 #endif
   3895 
   3896 #ifdef CHECK_OSIPOINT_REGISTERS
   3897  if (shouldVerifyOsiPointRegs(safepoint)) {
   3898    verifyOsiPointRegs(safepoint);
   3899  }
   3900 #endif
   3901 }
   3902 
   3903 void CodeGenerator::visitPhi(LPhi* lir) {
   3904  MOZ_CRASH("Unexpected LPhi in CodeGenerator");
   3905 }
   3906 
   3907 void CodeGenerator::visitGoto(LGoto* lir) {
   3908  // It would be valid to do simply `jumpToBlock(lir->target()); return;`.
   3909  // That shorts out chains of completely empty (apart from the final Goto)
   3910  // blocks.  However, we try to do a bit better by shorting out chains of
   3911  // blocks which are either completely empty or contain only MoveGroups, by
   3912  // emitting the MoveGroups at this point.  Hence this is a very limited form
   3913  // of tail duplication, in which the duplicated tail(s) consist entirely of
   3914  // MoveGroups.
   3915  //
   3916  // Ideally this logic should be in CodeGeneratorShared::jumpToBlock as it
   3917  // would cover more use cases.  That unfortunately creates a circular
   3918  // dependency between the classes CodeGeneratorShared, CodeGenerator{Arch}
   3919  // and CodeGenerator, which is not easy to resolve; specifically,
   3920  // CodeGeneratorShared would need to call CodeGenerator::visitMoveGroup, but
   3921  // CodeGenerator is (indirectly) a child class of CodeGeneratorShared.
   3922  //
   3923  // See CodeGeneratorShared::jumpToBlock(MBasicBlock*) as reference.
   3924  uint32_t numMoveGroupsCloned = 0;
   3925  MBasicBlock* target = lir->target();
   3926  while (true) {
   3927    LBlock* targetLBlock = target->lir();
   3928    LBlock* nextLBlock = targetLBlock->isMoveGroupsThenGoto();
   3929    if (!nextLBlock) {
   3930      break;
   3931    }
   3932    // This block is merely zero-or-more MoveGroups followed by a Goto.  Emit
   3933    // the MoveGroups and keep following the chain.
   3934    auto iter = targetLBlock->begin();
   3935    while (true) {
   3936      LInstruction* ins = *iter;
   3937      if (!ins->isMoveGroup()) {
   3938        break;
   3939      }
   3940      visitMoveGroup(ins->toMoveGroup());
   3941      iter++;
   3942      numMoveGroupsCloned++;
   3943    }
   3944    // Ensured by LBlock::isMoveGroupsThenGoto
   3945    MOZ_ASSERT((*iter)->isGoto());
   3946    MOZ_ASSERT((*iter)->toGoto()->getSuccessor(0)->lir() == nextLBlock);
   3947    iter++;
   3948    MOZ_RELEASE_ASSERT(iter == targetLBlock->end());
   3949    target = nextLBlock->mir();
   3950    if (numMoveGroupsCloned >= 1) {
   3951      // Be very conservative about cloning.  Higher numbers give more
   3952      // aggressive chasing but seem to sometimes cause a slight cycle count
   3953      // regression.  In practice, cloning one happens occasionally, cloning of
   3954      // two groups happens very rarely, and cloning of more than 2 groups has
   3955      // only been seen in artificially constructed test cases.
   3956      break;
   3957    }
   3958  }
   3959 
   3960  // If the above loop exited due to hitting the MoveGroup clone limit, we
   3961  // still need to skip past any "trivial" blocks, to avoid asserting in
   3962  // `target->lir()->label()` below.
   3963  target = skipTrivialBlocks(target);
   3964 
   3965  // No jump necessary if we can fall through to the next block.
   3966  if (isNextBlock(target->lir())) {
   3967    return;
   3968  }
   3969 
   3970  masm.jump(target->lir()->label());
   3971 }
   3972 
   3973 void CodeGenerator::visitTableSwitch(LTableSwitch* ins) {
   3974  MTableSwitch* mir = ins->mir();
   3975  Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
   3976 
   3977  Register intIndex;
   3978  if (mir->getOperand(0)->type() != MIRType::Int32) {
   3979    intIndex = ToRegister(ins->temp0());
   3980 
   3981    // The input is a double, so try and convert it to an integer.
   3982    // If it does not fit in an integer, take the default case.
   3983    masm.convertDoubleToInt32(ToFloatRegister(ins->index()), intIndex,
   3984                              defaultcase, false);
   3985  } else {
   3986    intIndex = ToRegister(ins->index());
   3987  }
   3988 
   3989  emitTableSwitchDispatch(mir, intIndex, ToTempRegisterOrInvalid(ins->temp1()));
   3990 }
   3991 
   3992 void CodeGenerator::visitTableSwitchV(LTableSwitchV* ins) {
   3993  MTableSwitch* mir = ins->mir();
   3994  Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
   3995 
   3996  Register index = ToRegister(ins->temp0());
   3997  ValueOperand value = ToValue(ins->input());
   3998  Register tag = masm.extractTag(value, index);
   3999  masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase);
   4000 
   4001  Label unboxInt, isInt;
   4002  masm.branchTestInt32(Assembler::Equal, tag, &unboxInt);
   4003  {
   4004    FloatRegister floatIndex = ToFloatRegister(ins->temp1());
   4005    masm.unboxDouble(value, floatIndex);
   4006    masm.convertDoubleToInt32(floatIndex, index, defaultcase, false);
   4007    masm.jump(&isInt);
   4008  }
   4009 
   4010  masm.bind(&unboxInt);
   4011  masm.unboxInt32(value, index);
   4012 
   4013  masm.bind(&isInt);
   4014 
   4015  emitTableSwitchDispatch(mir, index, ToTempRegisterOrInvalid(ins->temp2()));
   4016 }
   4017 
   4018 void CodeGenerator::visitParameter(LParameter* lir) {}
   4019 
   4020 void CodeGenerator::visitCallee(LCallee* lir) {
   4021  Register callee = ToRegister(lir->output());
   4022  Address ptr(FramePointer, JitFrameLayout::offsetOfCalleeToken());
   4023 
   4024  masm.loadFunctionFromCalleeToken(ptr, callee);
   4025 }
   4026 
   4027 void CodeGenerator::visitIsConstructing(LIsConstructing* lir) {
   4028  Register output = ToRegister(lir->output());
   4029  Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
   4030  masm.loadPtr(calleeToken, output);
   4031 
   4032  // We must be inside a function.
   4033  MOZ_ASSERT(current->mir()->info().script()->function());
   4034 
   4035  // The low bit indicates whether this call is constructing, just clear the
   4036  // other bits.
   4037  static_assert(CalleeToken_Function == 0x0,
   4038                "CalleeTokenTag value should match");
   4039  static_assert(CalleeToken_FunctionConstructing == 0x1,
   4040                "CalleeTokenTag value should match");
   4041  masm.andPtr(Imm32(0x1), output);
   4042 }
   4043 
   4044 void CodeGenerator::visitReturn(LReturn* lir) {
   4045 #if defined(JS_NUNBOX32)
   4046  DebugOnly<LAllocation*> type = lir->getOperand(TYPE_INDEX);
   4047  DebugOnly<LAllocation*> payload = lir->getOperand(PAYLOAD_INDEX);
   4048  MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type);
   4049  MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data);
   4050 #elif defined(JS_PUNBOX64)
   4051  DebugOnly<LAllocation*> result = lir->getOperand(0);
   4052  MOZ_ASSERT(ToRegister(result) == JSReturnReg);
   4053 #endif
   4054  // Don't emit a jump to the return label if this is the last block, as
   4055  // it'll fall through to the epilogue.
   4056  //
   4057  // This is -not- true however for a Generator-return, which may appear in the
   4058  // middle of the last block, so we should always emit the jump there.
   4059  if (current->mir() != *gen->graph().poBegin() || lir->isGenerator()) {
   4060    masm.jump(&returnLabel_);
   4061  }
   4062 }
   4063 
   4064 void CodeGenerator::visitOsrEntry(LOsrEntry* lir) {
   4065  Register temp = ToRegister(lir->temp());
   4066 
   4067  // Remember the OSR entry offset into the code buffer.
   4068  masm.flushBuffer();
   4069  setOsrEntryOffset(masm.size());
   4070 
   4071  // Allocate the full frame for this function
   4072  // Note we have a new entry here. So we reset MacroAssembler::framePushed()
   4073  // to 0, before reserving the stack.
   4074  MOZ_ASSERT(masm.framePushed() == frameSize());
   4075  masm.setFramePushed(0);
   4076 
   4077  // The Baseline code ensured both the frame pointer and stack pointer point to
   4078  // the JitFrameLayout on the stack.
   4079 
   4080  // If profiling, save the current frame pointer to a per-thread global field.
   4081  if (isProfilerInstrumentationEnabled()) {
   4082    masm.profilerEnterFrame(FramePointer, temp);
   4083  }
   4084 
   4085  masm.reserveStack(frameSize());
   4086  MOZ_ASSERT(masm.framePushed() == frameSize());
   4087 
   4088  // Ensure that the Ion frames is properly aligned.
   4089  masm.assertStackAlignment(JitStackAlignment, 0);
   4090 }
   4091 
   4092 void CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir) {
   4093  const LAllocation* frame = lir->entry();
   4094  const LDefinition* object = lir->output();
   4095 
   4096  const ptrdiff_t frameOffset =
   4097      BaselineFrame::reverseOffsetOfEnvironmentChain();
   4098 
   4099  masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
   4100 }
   4101 
   4102 void CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir) {
   4103  const LAllocation* frame = lir->entry();
   4104  const LDefinition* object = lir->output();
   4105 
   4106  const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj();
   4107 
   4108  masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
   4109 }
   4110 
   4111 void CodeGenerator::visitOsrValue(LOsrValue* value) {
   4112  const LAllocation* frame = value->entry();
   4113  const ValueOperand out = ToOutValue(value);
   4114 
   4115  const ptrdiff_t frameOffset = value->mir()->frameOffset();
   4116 
   4117  masm.loadValue(Address(ToRegister(frame), frameOffset), out);
   4118 }
   4119 
   4120 void CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir) {
   4121  const LAllocation* frame = lir->entry();
   4122  const ValueOperand out = ToOutValue(lir);
   4123 
   4124  Address flags =
   4125      Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags());
   4126  Address retval =
   4127      Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue());
   4128 
   4129  masm.moveValue(UndefinedValue(), out);
   4130 
   4131  Label done;
   4132  masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL),
   4133                    &done);
   4134  masm.loadValue(retval, out);
   4135  masm.bind(&done);
   4136 }
   4137 
   4138 void CodeGenerator::visitStackArgT(LStackArgT* lir) {
   4139  const LAllocation* arg = lir->arg();
   4140  MIRType argType = lir->type();
   4141  uint32_t argslot = lir->argslot();
   4142  MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
   4143 
   4144  Address dest = AddressOfPassedArg(argslot);
   4145 
   4146  if (arg->isFloatReg()) {
   4147    masm.boxDouble(ToFloatRegister(arg), dest);
   4148  } else if (arg->isGeneralReg()) {
   4149    masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest);
   4150  } else {
   4151    masm.storeValue(arg->toConstant()->toJSValue(), dest);
   4152  }
   4153 }
   4154 
   4155 void CodeGenerator::visitStackArgV(LStackArgV* lir) {
   4156  ValueOperand val = ToValue(lir->value());
   4157  uint32_t argslot = lir->argslot();
   4158  MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
   4159 
   4160  masm.storeValue(val, AddressOfPassedArg(argslot));
   4161 }
   4162 
   4163 void CodeGenerator::visitMoveGroup(LMoveGroup* group) {
   4164  if (!group->numMoves()) {
   4165    return;
   4166  }
   4167 
   4168  MoveResolver& resolver = masm.moveResolver();
   4169 
   4170  for (size_t i = 0; i < group->numMoves(); i++) {
   4171    const LMove& move = group->getMove(i);
   4172 
   4173    LAllocation from = move.from();
   4174    LAllocation to = move.to();
   4175    LDefinition::Type type = move.type();
   4176 
   4177    // No bogus moves.
   4178    MOZ_ASSERT(from != to);
   4179    MOZ_ASSERT(!from.isConstant());
   4180    MoveOp::Type moveType;
   4181    switch (type) {
   4182      case LDefinition::OBJECT:
   4183      case LDefinition::SLOTS:
   4184      case LDefinition::WASM_ANYREF:
   4185      case LDefinition::WASM_STRUCT_DATA:
   4186      case LDefinition::WASM_ARRAY_DATA:
   4187 #ifdef JS_NUNBOX32
   4188      case LDefinition::TYPE:
   4189      case LDefinition::PAYLOAD:
   4190 #else
   4191      case LDefinition::BOX:
   4192 #endif
   4193      case LDefinition::GENERAL:
   4194      case LDefinition::STACKRESULTS:
   4195        moveType = MoveOp::GENERAL;
   4196        break;
   4197      case LDefinition::INT32:
   4198        moveType = MoveOp::INT32;
   4199        break;
   4200      case LDefinition::FLOAT32:
   4201        moveType = MoveOp::FLOAT32;
   4202        break;
   4203      case LDefinition::DOUBLE:
   4204        moveType = MoveOp::DOUBLE;
   4205        break;
   4206      case LDefinition::SIMD128:
   4207        moveType = MoveOp::SIMD128;
   4208        break;
   4209      default:
   4210        MOZ_CRASH("Unexpected move type");
   4211    }
   4212 
   4213    masm.propagateOOM(
   4214        resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
   4215  }
   4216 
   4217  masm.propagateOOM(resolver.resolve());
   4218  if (masm.oom()) {
   4219    return;
   4220  }
   4221 
   4222  MoveEmitter emitter(masm);
   4223 
   4224 #ifdef JS_CODEGEN_X86
   4225  if (group->maybeScratchRegister().isGeneralReg()) {
   4226    emitter.setScratchRegister(
   4227        group->maybeScratchRegister().toGeneralReg()->reg());
   4228  } else {
   4229    resolver.sortMemoryToMemoryMoves();
   4230  }
   4231 #endif
   4232 
   4233  emitter.emit(resolver);
   4234  emitter.finish();
   4235 }
   4236 
   4237 void CodeGenerator::visitInteger(LInteger* lir) {
   4238  masm.move32(Imm32(lir->i32()), ToRegister(lir->output()));
   4239 }
   4240 
   4241 void CodeGenerator::visitInteger64(LInteger64* lir) {
   4242  masm.move64(Imm64(lir->i64()), ToOutRegister64(lir));
   4243 }
   4244 
   4245 void CodeGenerator::visitPointer(LPointer* lir) {
   4246  masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output()));
   4247 }
   4248 
   4249 void CodeGenerator::visitDouble(LDouble* ins) {
   4250  masm.loadConstantDouble(ins->value(), ToFloatRegister(ins->output()));
   4251 }
   4252 
   4253 void CodeGenerator::visitFloat32(LFloat32* ins) {
   4254  masm.loadConstantFloat32(ins->value(), ToFloatRegister(ins->output()));
   4255 }
   4256 
   4257 void CodeGenerator::visitValue(LValue* value) {
   4258  ValueOperand result = ToOutValue(value);
   4259  masm.moveValue(value->value(), result);
   4260 }
   4261 
   4262 void CodeGenerator::visitNurseryObject(LNurseryObject* lir) {
   4263  Register output = ToRegister(lir->output());
   4264  uint32_t nurseryIndex = lir->mir()->nurseryObjectIndex();
   4265 
   4266  // Load a pointer to the entry in IonScript's nursery objects list.
   4267  CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), output);
   4268  masm.propagateOOM(nurseryObjectLabels_.emplaceBack(label, nurseryIndex));
   4269 
   4270  // Load the JSObject*.
   4271  masm.loadPtr(Address(output, 0), output);
   4272 }
   4273 
   4274 void CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir) {
   4275  // No-op.
   4276 }
   4277 
   4278 void CodeGenerator::visitDebugEnterGCUnsafeRegion(
   4279    LDebugEnterGCUnsafeRegion* lir) {
   4280  Register temp = ToRegister(lir->temp0());
   4281 
   4282  masm.loadJSContext(temp);
   4283 
   4284  Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
   4285  masm.add32(Imm32(1), inUnsafeRegion);
   4286 
   4287  Label ok;
   4288  masm.branch32(Assembler::GreaterThan, inUnsafeRegion, Imm32(0), &ok);
   4289  masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
   4290  masm.bind(&ok);
   4291 }
   4292 
   4293 void CodeGenerator::visitDebugLeaveGCUnsafeRegion(
   4294    LDebugLeaveGCUnsafeRegion* lir) {
   4295  Register temp = ToRegister(lir->temp0());
   4296 
   4297  masm.loadJSContext(temp);
   4298 
   4299  Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
   4300  masm.add32(Imm32(-1), inUnsafeRegion);
   4301 
   4302  Label ok;
   4303  masm.branch32(Assembler::GreaterThanOrEqual, inUnsafeRegion, Imm32(0), &ok);
   4304  masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
   4305  masm.bind(&ok);
   4306 }
   4307 
   4308 void CodeGenerator::visitSlots(LSlots* lir) {
   4309  Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots());
   4310  masm.loadPtr(slots, ToRegister(lir->output()));
   4311 }
   4312 
   4313 void CodeGenerator::visitLoadDynamicSlotV(LLoadDynamicSlotV* lir) {
   4314  ValueOperand dest = ToOutValue(lir);
   4315  Register base = ToRegister(lir->input());
   4316  int32_t offset = lir->mir()->slot() * sizeof(js::Value);
   4317 
   4318  masm.loadValue(Address(base, offset), dest);
   4319 }
   4320 
   4321 void CodeGenerator::visitLoadDynamicSlotFromOffset(
   4322    LLoadDynamicSlotFromOffset* lir) {
   4323  ValueOperand dest = ToOutValue(lir);
   4324  Register slots = ToRegister(lir->slots());
   4325  Register offset = ToRegister(lir->offset());
   4326 
   4327  // slots[offset]
   4328  masm.loadValue(BaseIndex(slots, offset, TimesOne), dest);
   4329 }
   4330 
   4331 static ConstantOrRegister ToConstantOrRegister(const LAllocation* value,
   4332                                               MIRType valueType) {
   4333  if (value->isConstant()) {
   4334    return ConstantOrRegister(value->toConstant()->toJSValue());
   4335  }
   4336  return TypedOrValueRegister(valueType, ToAnyRegister(value));
   4337 }
   4338 
   4339 void CodeGenerator::visitStoreDynamicSlotT(LStoreDynamicSlotT* lir) {
   4340  Register base = ToRegister(lir->slots());
   4341  int32_t offset = lir->mir()->slot() * sizeof(js::Value);
   4342  Address dest(base, offset);
   4343 
   4344  if (lir->mir()->needsBarrier()) {
   4345    emitPreBarrier(dest);
   4346  }
   4347 
   4348  MIRType valueType = lir->mir()->value()->type();
   4349  ConstantOrRegister value = ToConstantOrRegister(lir->value(), valueType);
   4350  masm.storeUnboxedValue(value, valueType, dest);
   4351 }
   4352 
   4353 void CodeGenerator::visitStoreDynamicSlotV(LStoreDynamicSlotV* lir) {
   4354  Register base = ToRegister(lir->slots());
   4355  int32_t offset = lir->mir()->slot() * sizeof(Value);
   4356 
   4357  ValueOperand value = ToValue(lir->value());
   4358 
   4359  if (lir->mir()->needsBarrier()) {
   4360    emitPreBarrier(Address(base, offset));
   4361  }
   4362 
   4363  masm.storeValue(value, Address(base, offset));
   4364 }
   4365 
   4366 void CodeGenerator::visitStoreDynamicSlotFromOffsetV(
   4367    LStoreDynamicSlotFromOffsetV* lir) {
   4368  Register slots = ToRegister(lir->slots());
   4369  Register offset = ToRegister(lir->offset());
   4370  ValueOperand value = ToValue(lir->value());
   4371  Register temp = ToRegister(lir->temp0());
   4372 
   4373  BaseIndex baseIndex(slots, offset, TimesOne);
   4374  masm.computeEffectiveAddress(baseIndex, temp);
   4375 
   4376  Address address(temp, 0);
   4377 
   4378  emitPreBarrier(address);
   4379 
   4380  // obj->slots[offset]
   4381  masm.storeValue(value, address);
   4382 }
   4383 
   4384 void CodeGenerator::visitStoreDynamicSlotFromOffsetT(
   4385    LStoreDynamicSlotFromOffsetT* lir) {
   4386  Register slots = ToRegister(lir->slots());
   4387  Register offset = ToRegister(lir->offset());
   4388  const LAllocation* value = lir->value();
   4389  MIRType valueType = lir->mir()->value()->type();
   4390  Register temp = ToRegister(lir->temp0());
   4391 
   4392  BaseIndex baseIndex(slots, offset, TimesOne);
   4393  masm.computeEffectiveAddress(baseIndex, temp);
   4394 
   4395  Address address(temp, 0);
   4396 
   4397  emitPreBarrier(address);
   4398 
   4399  // obj->slots[offset]
   4400  ConstantOrRegister nvalue =
   4401      value->isConstant()
   4402          ? ConstantOrRegister(value->toConstant()->toJSValue())
   4403          : TypedOrValueRegister(valueType, ToAnyRegister(value));
   4404  masm.storeConstantOrRegister(nvalue, address);
   4405 }
   4406 
   4407 void CodeGenerator::visitElements(LElements* lir) {
   4408  Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements());
   4409  masm.loadPtr(elements, ToRegister(lir->output()));
   4410 }
   4411 
   4412 void CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir) {
   4413  Address environment(ToRegister(lir->function()),
   4414                      JSFunction::offsetOfEnvironment());
   4415  masm.unboxObject(environment, ToRegister(lir->output()));
   4416 }
   4417 
   4418 void CodeGenerator::visitHomeObject(LHomeObject* lir) {
   4419  Register func = ToRegister(lir->function());
   4420  Address homeObject(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
   4421 
   4422  masm.assertFunctionIsExtended(func);
   4423 #ifdef DEBUG
   4424  Label isObject;
   4425  masm.branchTestObject(Assembler::Equal, homeObject, &isObject);
   4426  masm.assumeUnreachable("[[HomeObject]] must be Object");
   4427  masm.bind(&isObject);
   4428 #endif
   4429 
   4430  masm.unboxObject(homeObject, ToRegister(lir->output()));
   4431 }
   4432 
   4433 void CodeGenerator::visitHomeObjectSuperBase(LHomeObjectSuperBase* lir) {
   4434  Register homeObject = ToRegister(lir->homeObject());
   4435  ValueOperand output = ToOutValue(lir);
   4436  Register temp = output.scratchReg();
   4437 
   4438  masm.loadObjProto(homeObject, temp);
   4439 
   4440 #ifdef DEBUG
   4441  // We won't encounter a lazy proto, because the prototype is guaranteed to
   4442  // either be a JSFunction or a PlainObject, and only proxy objects can have a
   4443  // lazy proto.
   4444  MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
   4445 
   4446  Label proxyCheckDone;
   4447  masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
   4448  masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperBase");
   4449  masm.bind(&proxyCheckDone);
   4450 #endif
   4451 
   4452  Label nullProto, done;
   4453  masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
   4454 
   4455  // Box prototype and return
   4456  masm.tagValue(JSVAL_TYPE_OBJECT, temp, output);
   4457  masm.jump(&done);
   4458 
   4459  masm.bind(&nullProto);
   4460  masm.moveValue(NullValue(), output);
   4461 
   4462  masm.bind(&done);
   4463 }
   4464 
   4465 template <class T>
   4466 static T* ToConstantObject(MDefinition* def) {
   4467  MOZ_ASSERT(def->isConstant());
   4468  return &def->toConstant()->toObject().as<T>();
   4469 }
   4470 
   4471 void CodeGenerator::visitNewLexicalEnvironmentObject(
   4472    LNewLexicalEnvironmentObject* lir) {
   4473  Register output = ToRegister(lir->output());
   4474  Register temp = ToRegister(lir->temp0());
   4475 
   4476  auto* templateObj = ToConstantObject<BlockLexicalEnvironmentObject>(
   4477      lir->mir()->templateObj());
   4478  auto* scope = &templateObj->scope();
   4479  gc::Heap initialHeap = gc::Heap::Default;
   4480 
   4481  using Fn =
   4482      BlockLexicalEnvironmentObject* (*)(JSContext*, Handle<LexicalScope*>);
   4483  auto* ool =
   4484      oolCallVM<Fn, BlockLexicalEnvironmentObject::createWithoutEnclosing>(
   4485          lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
   4486 
   4487  TemplateObject templateObject(templateObj);
   4488  masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
   4489 
   4490  masm.bind(ool->rejoin());
   4491 }
   4492 
   4493 void CodeGenerator::visitNewClassBodyEnvironmentObject(
   4494    LNewClassBodyEnvironmentObject* lir) {
   4495  Register output = ToRegister(lir->output());
   4496  Register temp = ToRegister(lir->temp0());
   4497 
   4498  auto* templateObj = ToConstantObject<ClassBodyLexicalEnvironmentObject>(
   4499      lir->mir()->templateObj());
   4500  auto* scope = &templateObj->scope();
   4501  gc::Heap initialHeap = gc::Heap::Default;
   4502 
   4503  using Fn = ClassBodyLexicalEnvironmentObject* (*)(JSContext*,
   4504                                                    Handle<ClassBodyScope*>);
   4505  auto* ool =
   4506      oolCallVM<Fn, ClassBodyLexicalEnvironmentObject::createWithoutEnclosing>(
   4507          lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
   4508 
   4509  TemplateObject templateObject(templateObj);
   4510  masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
   4511 
   4512  masm.bind(ool->rejoin());
   4513 }
   4514 
   4515 void CodeGenerator::visitNewVarEnvironmentObject(
   4516    LNewVarEnvironmentObject* lir) {
   4517  Register output = ToRegister(lir->output());
   4518  Register temp = ToRegister(lir->temp0());
   4519 
   4520  auto* templateObj =
   4521      ToConstantObject<VarEnvironmentObject>(lir->mir()->templateObj());
   4522  auto* scope = &templateObj->scope().as<VarScope>();
   4523  gc::Heap initialHeap = gc::Heap::Default;
   4524 
   4525  using Fn = VarEnvironmentObject* (*)(JSContext*, Handle<VarScope*>);
   4526  auto* ool = oolCallVM<Fn, VarEnvironmentObject::createWithoutEnclosing>(
   4527      lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
   4528 
   4529  TemplateObject templateObject(templateObj);
   4530  masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
   4531 
   4532  masm.bind(ool->rejoin());
   4533 }
   4534 
   4535 void CodeGenerator::visitGuardShape(LGuardShape* guard) {
   4536  Register obj = ToRegister(guard->object());
   4537  Register temp = ToTempRegisterOrInvalid(guard->temp0());
   4538  Label bail;
   4539  masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), temp,
   4540                          obj, &bail);
   4541  bailoutFrom(&bail, guard->snapshot());
   4542 }
   4543 
   4544 void CodeGenerator::visitGuardFuse(LGuardFuse* guard) {
   4545  auto fuseIndex = guard->mir()->fuseIndex();
   4546 
   4547  Label bail;
   4548 
   4549  // Bake specific fuse address for Ion code, because we won't share this code
   4550  // across realms.
   4551  GuardFuse* fuse = mirGen().realm->realmFuses().getFuseByIndex(fuseIndex);
   4552  masm.branchPtr(Assembler::NotEqual, AbsoluteAddress(fuse->fuseRef()),
   4553                 ImmWord(0), &bail);
   4554 
   4555  bailoutFrom(&bail, guard->snapshot());
   4556 }
   4557 
   4558 void CodeGenerator::visitGuardMultipleShapes(LGuardMultipleShapes* guard) {
   4559  Register obj = ToRegister(guard->object());
   4560  Register shapeList = ToRegister(guard->shapeList());
   4561  Register temp = ToRegister(guard->temp0());
   4562  Register temp2 = ToRegister(guard->temp1());
   4563  Register temp3 = ToRegister(guard->temp2());
   4564  Register spectre = ToTempRegisterOrInvalid(guard->temp3());
   4565 
   4566  Label bail;
   4567  masm.loadPtr(Address(shapeList, NativeObject::offsetOfElements()), temp);
   4568  masm.branchTestObjShapeList(obj, temp, temp2, temp3, spectre, &bail);
   4569  bailoutFrom(&bail, guard->snapshot());
   4570 }
   4571 
   4572 void CodeGenerator::visitGuardShapeList(LGuardShapeList* guard) {
   4573  Register obj = ToRegister(guard->object());
   4574  Register temp = ToRegister(guard->temp0());
   4575  Register spectre = ToTempRegisterOrInvalid(guard->temp1());
   4576 
   4577  Label done, bail;
   4578  masm.loadObjShapeUnsafe(obj, temp);
   4579 
   4580  // Count the number of branches to emit.
   4581  const auto& shapes = guard->mir()->shapeList()->shapes();
   4582  size_t branchesLeft = std::count_if(shapes.begin(), shapes.end(),
   4583                                      [](Shape* s) { return s != nullptr; });
   4584  MOZ_RELEASE_ASSERT(branchesLeft > 0);
   4585 
   4586  for (Shape* shape : shapes) {
   4587    if (!shape) {
   4588      continue;
   4589    }
   4590    if (branchesLeft > 1) {
   4591      masm.branchPtr(Assembler::Equal, temp, ImmGCPtr(shape), &done);
   4592      if (spectre != InvalidReg) {
   4593        masm.spectreMovePtr(Assembler::Equal, spectre, obj);
   4594      }
   4595    } else {
   4596      // This is the last branch so invert the condition and jump to |bail|.
   4597      masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(shape), &bail);
   4598      if (spectre != InvalidReg) {
   4599        masm.spectreMovePtr(Assembler::NotEqual, spectre, obj);
   4600      }
   4601    }
   4602    branchesLeft--;
   4603  }
   4604  MOZ_ASSERT(branchesLeft == 0);
   4605 
   4606  masm.bind(&done);
   4607  bailoutFrom(&bail, guard->snapshot());
   4608 }
   4609 
   4610 void CodeGenerator::visitGuardShapeListToOffset(
   4611    LGuardShapeListToOffset* guard) {
   4612  Register obj = ToRegister(guard->object());
   4613  Register temp = ToRegister(guard->temp0());
   4614  Register spectre = ToTempRegisterOrInvalid(guard->temp1());
   4615  Register offset = ToRegister(guard->output());
   4616 
   4617  Label done, bail;
   4618  masm.loadObjShapeUnsafe(obj, temp);
   4619 
   4620  // Count the number of branches to emit.
   4621  const auto& shapes = guard->mir()->shapeList()->shapes();
   4622  const auto& offsets = guard->mir()->shapeList()->offsets();
   4623  size_t branchesLeft = std::count_if(shapes.begin(), shapes.end(),
   4624                                      [](Shape* s) { return s != nullptr; });
   4625  MOZ_RELEASE_ASSERT(branchesLeft > 0);
   4626 
   4627  size_t index = 0;
   4628  for (Shape* shape : shapes) {
   4629    if (!shape) {
   4630      index++;
   4631      continue;
   4632    }
   4633 
   4634    if (branchesLeft > 1) {
   4635      Label next;
   4636      masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(shape), &next);
   4637      if (spectre != InvalidReg) {
   4638        masm.spectreMovePtr(Assembler::NotEqual, spectre, obj);
   4639      }
   4640      masm.move32(Imm32(offsets[index]), offset);
   4641      masm.jump(&done);
   4642      masm.bind(&next);
   4643    } else {
   4644      masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(shape), &bail);
   4645      if (spectre != InvalidReg) {
   4646        masm.spectreMovePtr(Assembler::NotEqual, spectre, obj);
   4647      }
   4648      masm.move32(Imm32(offsets[index]), offset);
   4649    }
   4650 
   4651    branchesLeft--;
   4652    index++;
   4653  }
   4654  MOZ_ASSERT(branchesLeft == 0);
   4655 
   4656  masm.bind(&done);
   4657  bailoutFrom(&bail, guard->snapshot());
   4658 }
   4659 
   4660 void CodeGenerator::visitGuardMultipleShapesToOffset(
   4661    LGuardMultipleShapesToOffset* guard) {
   4662  Register obj = ToRegister(guard->object());
   4663  Register shapeList = ToRegister(guard->shapeList());
   4664  Register temp = ToRegister(guard->temp0());
   4665  Register temp1 = ToRegister(guard->temp1());
   4666  Register temp2 = ToRegister(guard->temp2());
   4667  Register offset = ToRegister(guard->output());
   4668  Register spectre = JitOptions.spectreObjectMitigations ? offset : InvalidReg;
   4669 
   4670  Label bail;
   4671  masm.loadPtr(Address(shapeList, NativeObject::offsetOfElements()), temp);
   4672  masm.branchTestObjShapeListSetOffset(obj, temp, offset, temp1, temp2, spectre,
   4673                                       &bail);
   4674  bailoutFrom(&bail, guard->snapshot());
   4675 }
   4676 
   4677 void CodeGenerator::visitGuardProto(LGuardProto* guard) {
   4678  Register obj = ToRegister(guard->object());
   4679  Register expected = ToRegister(guard->expected());
   4680  Register temp = ToRegister(guard->temp0());
   4681 
   4682  masm.loadObjProto(obj, temp);
   4683 
   4684  Label bail;
   4685  masm.branchPtr(Assembler::NotEqual, temp, expected, &bail);
   4686  bailoutFrom(&bail, guard->snapshot());
   4687 }
   4688 
   4689 void CodeGenerator::visitGuardNullProto(LGuardNullProto* guard) {
   4690  Register obj = ToRegister(guard->object());
   4691  Register temp = ToRegister(guard->temp0());
   4692 
   4693  masm.loadObjProto(obj, temp);
   4694 
   4695  Label bail;
   4696  masm.branchTestPtr(Assembler::NonZero, temp, temp, &bail);
   4697  bailoutFrom(&bail, guard->snapshot());
   4698 }
   4699 
   4700 void CodeGenerator::visitGuardIsNativeObject(LGuardIsNativeObject* guard) {
   4701  Register obj = ToRegister(guard->object());
   4702  Register temp = ToRegister(guard->temp0());
   4703 
   4704  Label bail;
   4705  masm.branchIfNonNativeObj(obj, temp, &bail);
   4706  bailoutFrom(&bail, guard->snapshot());
   4707 }
   4708 
   4709 void CodeGenerator::visitGuardGlobalGeneration(LGuardGlobalGeneration* guard) {
   4710  Register temp = ToRegister(guard->temp0());
   4711  Label bail;
   4712 
   4713  masm.load32(AbsoluteAddress(guard->mir()->generationAddr()), temp);
   4714  masm.branch32(Assembler::NotEqual, temp, Imm32(guard->mir()->expected()),
   4715                &bail);
   4716  bailoutFrom(&bail, guard->snapshot());
   4717 }
   4718 
   4719 void CodeGenerator::visitGuardIsProxy(LGuardIsProxy* guard) {
   4720  Register obj = ToRegister(guard->object());
   4721  Register temp = ToRegister(guard->temp0());
   4722 
   4723  Label bail;
   4724  masm.branchTestObjectIsProxy(false, obj, temp, &bail);
   4725  bailoutFrom(&bail, guard->snapshot());
   4726 }
   4727 
   4728 void CodeGenerator::visitGuardIsNotProxy(LGuardIsNotProxy* guard) {
   4729  Register obj = ToRegister(guard->object());
   4730  Register temp = ToRegister(guard->temp0());
   4731 
   4732  Label bail;
   4733  masm.branchTestObjectIsProxy(true, obj, temp, &bail);
   4734  bailoutFrom(&bail, guard->snapshot());
   4735 }
   4736 
   4737 void CodeGenerator::visitGuardIsNotDOMProxy(LGuardIsNotDOMProxy* guard) {
   4738  Register proxy = ToRegister(guard->proxy());
   4739  Register temp = ToRegister(guard->temp0());
   4740 
   4741  Label bail;
   4742  masm.branchTestProxyHandlerFamily(Assembler::Equal, proxy, temp,
   4743                                    GetDOMProxyHandlerFamily(), &bail);
   4744  bailoutFrom(&bail, guard->snapshot());
   4745 }
   4746 
   4747 void CodeGenerator::visitProxyGet(LProxyGet* lir) {
   4748  Register proxy = ToRegister(lir->proxy());
   4749  Register temp = ToRegister(lir->temp0());
   4750 
   4751  pushArg(lir->mir()->id(), temp);
   4752  pushArg(proxy);
   4753 
   4754  using Fn = bool (*)(JSContext*, HandleObject, HandleId, MutableHandleValue);
   4755  callVM<Fn, ProxyGetProperty>(lir);
   4756 }
   4757 
   4758 void CodeGenerator::visitProxyGetByValue(LProxyGetByValue* lir) {
   4759  Register proxy = ToRegister(lir->proxy());
   4760  ValueOperand idVal = ToValue(lir->idVal());
   4761 
   4762  pushArg(idVal);
   4763  pushArg(proxy);
   4764 
   4765  using Fn =
   4766      bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
   4767  callVM<Fn, ProxyGetPropertyByValue>(lir);
   4768 }
   4769 
   4770 void CodeGenerator::visitProxyHasProp(LProxyHasProp* lir) {
   4771  Register proxy = ToRegister(lir->proxy());
   4772  ValueOperand idVal = ToValue(lir->id());
   4773 
   4774  pushArg(idVal);
   4775  pushArg(proxy);
   4776 
   4777  using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
   4778  if (lir->mir()->hasOwn()) {
   4779    callVM<Fn, ProxyHasOwn>(lir);
   4780  } else {
   4781    callVM<Fn, ProxyHas>(lir);
   4782  }
   4783 }
   4784 
   4785 void CodeGenerator::visitProxySet(LProxySet* lir) {
   4786  Register proxy = ToRegister(lir->proxy());
   4787  ValueOperand rhs = ToValue(lir->rhs());
   4788  Register temp = ToRegister(lir->temp0());
   4789 
   4790  pushArg(Imm32(lir->mir()->strict()));
   4791  pushArg(rhs);
   4792  pushArg(lir->mir()->id(), temp);
   4793  pushArg(proxy);
   4794 
   4795  using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
   4796  callVM<Fn, ProxySetProperty>(lir);
   4797 }
   4798 
   4799 void CodeGenerator::visitProxySetByValue(LProxySetByValue* lir) {
   4800  Register proxy = ToRegister(lir->proxy());
   4801  ValueOperand idVal = ToValue(lir->idVal());
   4802  ValueOperand rhs = ToValue(lir->rhs());
   4803 
   4804  pushArg(Imm32(lir->mir()->strict()));
   4805  pushArg(rhs);
   4806  pushArg(idVal);
   4807  pushArg(proxy);
   4808 
   4809  using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
   4810  callVM<Fn, ProxySetPropertyByValue>(lir);
   4811 }
   4812 
   4813 void CodeGenerator::visitCallSetArrayLength(LCallSetArrayLength* lir) {
   4814  Register obj = ToRegister(lir->obj());
   4815  ValueOperand rhs = ToValue(lir->rhs());
   4816 
   4817  pushArg(Imm32(lir->mir()->strict()));
   4818  pushArg(rhs);
   4819  pushArg(obj);
   4820 
   4821  using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
   4822  callVM<Fn, jit::SetArrayLength>(lir);
   4823 }
   4824 
   4825 void CodeGenerator::visitMegamorphicLoadSlot(LMegamorphicLoadSlot* lir) {
   4826  Register obj = ToRegister(lir->object());
   4827  Register temp0 = ToRegister(lir->temp0());
   4828  Register temp1 = ToRegister(lir->temp1());
   4829  Register temp2 = ToRegister(lir->temp2());
   4830  Register temp3 = ToRegister(lir->temp3());
   4831  ValueOperand output = ToOutValue(lir);
   4832 
   4833  Label cacheHit;
   4834  masm.emitMegamorphicCacheLookup(lir->mir()->name(), obj, temp0, temp1, temp2,
   4835                                  output, &cacheHit);
   4836 
   4837  Label bail;
   4838  masm.branchIfNonNativeObj(obj, temp0, &bail);
   4839 
   4840  masm.Push(UndefinedValue());
   4841  masm.moveStackPtrTo(temp3);
   4842 
   4843  using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
   4844                      MegamorphicCache::Entry* cacheEntry, Value* vp);
   4845  masm.setupAlignedABICall();
   4846  masm.loadJSContext(temp0);
   4847  masm.passABIArg(temp0);
   4848  masm.passABIArg(obj);
   4849  masm.movePropertyKey(lir->mir()->name(), temp1);
   4850  masm.passABIArg(temp1);
   4851  masm.passABIArg(temp2);
   4852  masm.passABIArg(temp3);
   4853 
   4854  masm.callWithABI<Fn, GetNativeDataPropertyPure>();
   4855 
   4856  MOZ_ASSERT(!output.aliases(ReturnReg));
   4857  masm.Pop(output);
   4858 
   4859  masm.branchIfFalseBool(ReturnReg, &bail);
   4860  masm.bind(&cacheHit);
   4861 
   4862  bailoutFrom(&bail, lir->snapshot());
   4863 }
   4864 
   4865 void CodeGenerator::visitMegamorphicLoadSlotPermissive(
   4866    LMegamorphicLoadSlotPermissive* lir) {
   4867  Register obj = ToRegister(lir->object());
   4868  Register temp0 = ToRegister(lir->temp0());
   4869  Register temp1 = ToRegister(lir->temp1());
   4870  Register temp2 = ToRegister(lir->temp2());
   4871  Register temp3 = ToRegister(lir->temp3());
   4872  ValueOperand output = ToOutValue(lir);
   4873 
   4874  masm.movePtr(obj, temp3);
   4875 
   4876  Label done, getter, nullGetter;
   4877  masm.emitMegamorphicCacheLookup(lir->mir()->name(), obj, temp0, temp1, temp2,
   4878                                  output, &done, &getter);
   4879 
   4880  masm.movePropertyKey(lir->mir()->name(), temp1);
   4881  pushArg(temp2);
   4882  pushArg(temp1);
   4883  pushArg(obj);
   4884 
   4885  using Fn = bool (*)(JSContext*, HandleObject, HandleId,
   4886                      MegamorphicCacheEntry*, MutableHandleValue);
   4887  callVM<Fn, GetPropMaybeCached>(lir);
   4888 
   4889  masm.jump(&done);
   4890 
   4891  masm.bind(&getter);
   4892 
   4893  emitCallMegamorphicGetter(lir, output, temp3, temp1, temp2, &nullGetter);
   4894  masm.jump(&done);
   4895 
   4896  masm.bind(&nullGetter);
   4897  masm.moveValue(UndefinedValue(), output);
   4898 
   4899  masm.bind(&done);
   4900 }
   4901 
   4902 void CodeGenerator::visitMegamorphicLoadSlotByValue(
   4903    LMegamorphicLoadSlotByValue* lir) {
   4904  Register obj = ToRegister(lir->object());
   4905  ValueOperand idVal = ToValue(lir->idVal());
   4906  Register temp0 = ToRegister(lir->temp0());
   4907  Register temp1 = ToRegister(lir->temp1());
   4908  Register temp2 = ToRegister(lir->temp2());
   4909  ValueOperand output = ToOutValue(lir);
   4910 
   4911  Label cacheHit, bail;
   4912  masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
   4913                                         output, &cacheHit);
   4914 
   4915  masm.branchIfNonNativeObj(obj, temp0, &bail);
   4916 
   4917  // idVal will be in vp[0], result will be stored in vp[1].
   4918  masm.reserveStack(sizeof(Value));
   4919  masm.Push(idVal);
   4920  masm.moveStackPtrTo(temp0);
   4921 
   4922  using Fn = bool (*)(JSContext* cx, JSObject* obj,
   4923                      MegamorphicCache::Entry* cacheEntry, Value* vp);
   4924  masm.setupAlignedABICall();
   4925  masm.loadJSContext(temp1);
   4926  masm.passABIArg(temp1);
   4927  masm.passABIArg(obj);
   4928  masm.passABIArg(temp2);
   4929  masm.passABIArg(temp0);
   4930  masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
   4931 
   4932  MOZ_ASSERT(!idVal.aliases(temp0));
   4933  masm.storeCallPointerResult(temp0);
   4934  masm.Pop(idVal);
   4935 
   4936  uint32_t framePushed = masm.framePushed();
   4937  Label ok;
   4938  masm.branchIfTrueBool(temp0, &ok);
   4939  masm.freeStack(sizeof(Value));  // Discard result Value.
   4940  masm.jump(&bail);
   4941 
   4942  masm.bind(&ok);
   4943  masm.setFramePushed(framePushed);
   4944  masm.Pop(output);
   4945 
   4946  masm.bind(&cacheHit);
   4947 
   4948  bailoutFrom(&bail, lir->snapshot());
   4949 }
   4950 
   4951 void CodeGenerator::visitMegamorphicLoadSlotByValuePermissive(
   4952    LMegamorphicLoadSlotByValuePermissive* lir) {
   4953  Register obj = ToRegister(lir->object());
   4954  ValueOperand idVal = ToValue(lir->idVal());
   4955  Register temp0 = ToRegister(lir->temp0());
   4956  Register temp1 = ToRegister(lir->temp1());
   4957  Register temp2 = ToRegister(lir->temp2());
   4958  ValueOperand output = ToOutValue(lir);
   4959 
   4960  // If we have enough registers available, we can call getters directly from
   4961  // jitcode. On x86, we have to call into the VM.
   4962 #ifndef JS_CODEGEN_X86
   4963  Label done, getter, nullGetter;
   4964  Register temp3 = ToRegister(lir->temp3());
   4965  masm.movePtr(obj, temp3);
   4966 
   4967  masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
   4968                                         output, &done, &getter);
   4969 #else
   4970  Label done;
   4971  masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
   4972                                         output, &done);
   4973 #endif
   4974 
   4975  pushArg(temp2);
   4976  pushArg(idVal);
   4977  pushArg(obj);
   4978 
   4979  using Fn = bool (*)(JSContext*, HandleObject, HandleValue,
   4980                      MegamorphicCacheEntry*, MutableHandleValue);
   4981  callVM<Fn, GetElemMaybeCached>(lir);
   4982 
   4983 #ifndef JS_CODEGEN_X86
   4984  masm.jump(&done);
   4985  masm.bind(&getter);
   4986 
   4987  emitCallMegamorphicGetter(lir, output, temp3, temp1, temp2, &nullGetter);
   4988  masm.jump(&done);
   4989 
   4990  masm.bind(&nullGetter);
   4991  masm.moveValue(UndefinedValue(), output);
   4992 #endif
   4993 
   4994  masm.bind(&done);
   4995 }
   4996 
   4997 void CodeGenerator::visitMegamorphicStoreSlot(LMegamorphicStoreSlot* lir) {
   4998  Register obj = ToRegister(lir->object());
   4999  ValueOperand value = ToValue(lir->rhs());
   5000 
   5001  Register temp0 = ToRegister(lir->temp0());
   5002 #ifndef JS_CODEGEN_X86
   5003  Register temp1 = ToRegister(lir->temp1());
   5004  Register temp2 = ToRegister(lir->temp2());
   5005 #endif
   5006 
   5007  // The instruction is marked as call-instruction so only these registers are
   5008  // live.
   5009  LiveRegisterSet liveRegs;
   5010  liveRegs.addUnchecked(obj);
   5011  liveRegs.addUnchecked(value);
   5012  liveRegs.addUnchecked(temp0);
   5013 #ifndef JS_CODEGEN_X86
   5014  liveRegs.addUnchecked(temp1);
   5015  liveRegs.addUnchecked(temp2);
   5016 #endif
   5017 
   5018  Label cacheHit, done;
   5019 #ifdef JS_CODEGEN_X86
   5020  masm.emitMegamorphicCachedSetSlot(
   5021      lir->mir()->name(), obj, temp0, value, liveRegs, &cacheHit,
   5022      [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
   5023        EmitPreBarrier(masm, addr, mirType);
   5024      });
   5025 #else
   5026  masm.emitMegamorphicCachedSetSlot(
   5027      lir->mir()->name(), obj, temp0, temp1, temp2, value, liveRegs, &cacheHit,
   5028      [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
   5029        EmitPreBarrier(masm, addr, mirType);
   5030      });
   5031 #endif
   5032 
   5033  pushArg(Imm32(lir->mir()->strict()));
   5034  pushArg(value);
   5035  pushArg(lir->mir()->name(), temp0);
   5036  pushArg(obj);
   5037 
   5038  using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
   5039  callVM<Fn, SetPropertyMegamorphic<true>>(lir);
   5040 
   5041  masm.jump(&done);
   5042  masm.bind(&cacheHit);
   5043 
   5044  masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
   5045  masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
   5046 
   5047  // Note: because this is a call-instruction, no registers need to be saved.
   5048  MOZ_ASSERT(lir->isCall());
   5049  emitPostWriteBarrier(obj);
   5050 
   5051  masm.bind(&done);
   5052 }
   5053 
   5054 void CodeGenerator::visitMegamorphicHasProp(LMegamorphicHasProp* lir) {
   5055  Register obj = ToRegister(lir->object());
   5056  ValueOperand idVal = ToValue(lir->idVal());
   5057  Register temp0 = ToRegister(lir->temp0());
   5058  Register temp1 = ToRegister(lir->temp1());
   5059  Register temp2 = ToRegister(lir->temp2());
   5060  Register output = ToRegister(lir->output());
   5061 
   5062  Label bail, cacheHit;
   5063  masm.emitMegamorphicCacheLookupExists(idVal, obj, temp0, temp1, temp2, output,
   5064                                        &cacheHit, lir->mir()->hasOwn());
   5065 
   5066  masm.branchIfNonNativeObj(obj, temp0, &bail);
   5067 
   5068  // idVal will be in vp[0], result will be stored in vp[1].
   5069  masm.reserveStack(sizeof(Value));
   5070  masm.Push(idVal);
   5071  masm.moveStackPtrTo(temp0);
   5072 
   5073  using Fn = bool (*)(JSContext* cx, JSObject* obj,
   5074                      MegamorphicCache::Entry* cacheEntry, Value* vp);
   5075  masm.setupAlignedABICall();
   5076  masm.loadJSContext(temp1);
   5077  masm.passABIArg(temp1);
   5078  masm.passABIArg(obj);
   5079  masm.passABIArg(temp2);
   5080  masm.passABIArg(temp0);
   5081  if (lir->mir()->hasOwn()) {
   5082    masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
   5083  } else {
   5084    masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
   5085  }
   5086 
   5087  MOZ_ASSERT(!idVal.aliases(temp0));
   5088  masm.storeCallPointerResult(temp0);
   5089  masm.Pop(idVal);
   5090 
   5091  uint32_t framePushed = masm.framePushed();
   5092  Label ok;
   5093  masm.branchIfTrueBool(temp0, &ok);
   5094  masm.freeStack(sizeof(Value));  // Discard result Value.
   5095  masm.jump(&bail);
   5096 
   5097  masm.bind(&ok);
   5098  masm.setFramePushed(framePushed);
   5099  masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
   5100  masm.freeStack(sizeof(Value));
   5101  masm.bind(&cacheHit);
   5102 
   5103  bailoutFrom(&bail, lir->snapshot());
   5104 }
   5105 
   5106 void CodeGenerator::visitSmallObjectVariableKeyHasProp(
   5107    LSmallObjectVariableKeyHasProp* lir) {
   5108  Register id = ToRegister(lir->idStr());
   5109  Register output = ToRegister(lir->output());
   5110 
   5111 #ifdef DEBUG
   5112  Label isAtom;
   5113  masm.branchTest32(Assembler::NonZero, Address(id, JSString::offsetOfFlags()),
   5114                    Imm32(JSString::ATOM_BIT), &isAtom);
   5115  masm.assumeUnreachable("Expected atom input");
   5116  masm.bind(&isAtom);
   5117 #endif
   5118 
   5119  SharedShape* shape = &lir->mir()->shape()->asShared();
   5120 
   5121  Label done, success;
   5122  for (SharedShapePropertyIter<NoGC> iter(shape); !iter.done(); iter++) {
   5123    masm.branchPtr(Assembler::Equal, id, ImmGCPtr(iter->key().toAtom()),
   5124                   &success);
   5125  }
   5126  masm.move32(Imm32(0), output);
   5127  masm.jump(&done);
   5128  masm.bind(&success);
   5129  masm.move32(Imm32(1), output);
   5130  masm.bind(&done);
   5131 }
   5132 
   5133 void CodeGenerator::visitGuardToArrayBuffer(LGuardToArrayBuffer* guard) {
   5134  Register obj = ToRegister(guard->object());
   5135  Register temp = ToRegister(guard->temp0());
   5136 
   5137  // branchIfIsNotArrayBuffer may zero the object register on speculative paths
   5138  // (we should have a defineReuseInput allocation in this case).
   5139 
   5140  Label bail;
   5141  masm.branchIfIsNotArrayBuffer(obj, temp, &bail);
   5142  bailoutFrom(&bail, guard->snapshot());
   5143 }
   5144 
   5145 void CodeGenerator::visitGuardToSharedArrayBuffer(
   5146    LGuardToSharedArrayBuffer* guard) {
   5147  Register obj = ToRegister(guard->object());
   5148  Register temp = ToRegister(guard->temp0());
   5149 
   5150  // branchIfIsNotSharedArrayBuffer may zero the object register on speculative
   5151  // paths (we should have a defineReuseInput allocation in this case).
   5152 
   5153  Label bail;
   5154  masm.branchIfIsNotSharedArrayBuffer(obj, temp, &bail);
   5155  bailoutFrom(&bail, guard->snapshot());
   5156 }
   5157 
   5158 void CodeGenerator::visitGuardIsNotArrayBufferMaybeShared(
   5159    LGuardIsNotArrayBufferMaybeShared* guard) {
   5160  Register obj = ToRegister(guard->object());
   5161  Register temp = ToRegister(guard->temp0());
   5162 
   5163  Label bail;
   5164  masm.branchIfIsArrayBufferMaybeShared(obj, temp, &bail);
   5165  bailoutFrom(&bail, guard->snapshot());
   5166 }
   5167 
   5168 void CodeGenerator::visitGuardIsTypedArray(LGuardIsTypedArray* guard) {
   5169  Register obj = ToRegister(guard->object());
   5170  Register temp = ToRegister(guard->temp0());
   5171 
   5172  Label bail;
   5173  masm.loadObjClassUnsafe(obj, temp);
   5174  masm.branchIfClassIsNotTypedArray(temp, &bail);
   5175  bailoutFrom(&bail, guard->snapshot());
   5176 }
   5177 
   5178 void CodeGenerator::visitGuardIsNonResizableTypedArray(
   5179    LGuardIsNonResizableTypedArray* guard) {
   5180  Register obj = ToRegister(guard->object());
   5181  Register temp = ToRegister(guard->temp0());
   5182 
   5183  Label bail;
   5184  masm.loadObjClassUnsafe(obj, temp);
   5185  masm.branchIfClassIsNotNonResizableTypedArray(temp, &bail);
   5186  bailoutFrom(&bail, guard->snapshot());
   5187 }
   5188 
   5189 void CodeGenerator::visitGuardIsResizableTypedArray(
   5190    LGuardIsResizableTypedArray* guard) {
   5191  Register obj = ToRegister(guard->object());
   5192  Register temp = ToRegister(guard->temp0());
   5193 
   5194  Label bail;
   5195  masm.loadObjClassUnsafe(obj, temp);
   5196  masm.branchIfClassIsNotResizableTypedArray(temp, &bail);
   5197  bailoutFrom(&bail, guard->snapshot());
   5198 }
   5199 
   5200 void CodeGenerator::visitGuardHasProxyHandler(LGuardHasProxyHandler* guard) {
   5201  Register obj = ToRegister(guard->object());
   5202 
   5203  Label bail;
   5204 
   5205  Address handlerAddr(obj, ProxyObject::offsetOfHandler());
   5206  masm.branchPtr(Assembler::NotEqual, handlerAddr,
   5207                 ImmPtr(guard->mir()->handler()), &bail);
   5208 
   5209  bailoutFrom(&bail, guard->snapshot());
   5210 }
   5211 
   5212 void CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard) {
   5213  Register input = ToRegister(guard->input());
   5214  Register expected = ToRegister(guard->expected());
   5215 
   5216  Assembler::Condition cond =
   5217      guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
   5218  bailoutCmpPtr(cond, input, expected, guard->snapshot());
   5219 }
   5220 
   5221 void CodeGenerator::visitGuardSpecificFunction(LGuardSpecificFunction* guard) {
   5222  Register input = ToRegister(guard->input());
   5223  Register expected = ToRegister(guard->expected());
   5224 
   5225  bailoutCmpPtr(Assembler::NotEqual, input, expected, guard->snapshot());
   5226 }
   5227 
   5228 void CodeGenerator::visitGuardSpecificAtom(LGuardSpecificAtom* guard) {
   5229  Register str = ToRegister(guard->str());
   5230  Register scratch = ToRegister(guard->temp0());
   5231 
   5232  LiveRegisterSet volatileRegs = liveVolatileRegs(guard);
   5233  volatileRegs.takeUnchecked(scratch);
   5234 
   5235  Label bail;
   5236  masm.guardSpecificAtom(str, guard->mir()->atom(), scratch, volatileRegs,
   5237                         &bail);
   5238  bailoutFrom(&bail, guard->snapshot());
   5239 }
   5240 
   5241 void CodeGenerator::visitGuardSpecificSymbol(LGuardSpecificSymbol* guard) {
   5242  Register symbol = ToRegister(guard->symbol());
   5243 
   5244  bailoutCmpPtr(Assembler::NotEqual, symbol, ImmGCPtr(guard->mir()->expected()),
   5245                guard->snapshot());
   5246 }
   5247 
   5248 void CodeGenerator::visitGuardSpecificInt32(LGuardSpecificInt32* guard) {
   5249  Register num = ToRegister(guard->num());
   5250 
   5251  bailoutCmp32(Assembler::NotEqual, num, Imm32(guard->mir()->expected()),
   5252               guard->snapshot());
   5253 }
   5254 
   5255 void CodeGenerator::visitGuardStringToIndex(LGuardStringToIndex* lir) {
   5256  Register str = ToRegister(lir->string());
   5257  Register output = ToRegister(lir->output());
   5258 
   5259  Label vmCall, done;
   5260  masm.loadStringIndexValue(str, output, &vmCall);
   5261  masm.jump(&done);
   5262 
   5263  {
   5264    masm.bind(&vmCall);
   5265 
   5266    LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
   5267    volatileRegs.takeUnchecked(output);
   5268    masm.PushRegsInMask(volatileRegs);
   5269 
   5270    using Fn = int32_t (*)(JSString* str);
   5271    masm.setupAlignedABICall();
   5272    masm.passABIArg(str);
   5273    masm.callWithABI<Fn, GetIndexFromString>();
   5274    masm.storeCallInt32Result(output);
   5275 
   5276    masm.PopRegsInMask(volatileRegs);
   5277 
   5278    // GetIndexFromString returns a negative value on failure.
   5279    bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
   5280  }
   5281 
   5282  masm.bind(&done);
   5283 }
   5284 
   5285 void CodeGenerator::visitGuardStringToInt32(LGuardStringToInt32* lir) {
   5286  Register str = ToRegister(lir->string());
   5287  Register output = ToRegister(lir->output());
   5288  Register temp = ToRegister(lir->temp0());
   5289 
   5290  LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
   5291 
   5292  Label bail;
   5293  masm.guardStringToInt32(str, output, temp, volatileRegs, &bail);
   5294  bailoutFrom(&bail, lir->snapshot());
   5295 }
   5296 
   5297 void CodeGenerator::visitGuardStringToDouble(LGuardStringToDouble* lir) {
   5298  Register str = ToRegister(lir->string());
   5299  FloatRegister output = ToFloatRegister(lir->output());
   5300  Register temp0 = ToRegister(lir->temp0());
   5301  Register temp1 = ToRegister(lir->temp1());
   5302 
   5303  Label vmCall, done;
   5304  // Use indexed value as fast path if possible.
   5305  masm.loadStringIndexValue(str, temp0, &vmCall);
   5306  masm.convertInt32ToDouble(temp0, output);
   5307  masm.jump(&done);
   5308  {
   5309    masm.bind(&vmCall);
   5310 
   5311    // Reserve stack for holding the result value of the call.
   5312    masm.reserveStack(sizeof(double));
   5313    masm.moveStackPtrTo(temp0);
   5314 
   5315    LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
   5316    volatileRegs.takeUnchecked(temp0);
   5317    volatileRegs.takeUnchecked(temp1);
   5318    masm.PushRegsInMask(volatileRegs);
   5319 
   5320    using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
   5321    masm.setupAlignedABICall();
   5322    masm.loadJSContext(temp1);
   5323    masm.passABIArg(temp1);
   5324    masm.passABIArg(str);
   5325    masm.passABIArg(temp0);
   5326    masm.callWithABI<Fn, StringToNumberPure>();
   5327    masm.storeCallPointerResult(temp0);
   5328 
   5329    masm.PopRegsInMask(volatileRegs);
   5330 
   5331    Label ok;
   5332    masm.branchIfTrueBool(temp0, &ok);
   5333    {
   5334      // OOM path, recovered by StringToNumberPure.
   5335      //
   5336      // Use addToStackPtr instead of freeStack as freeStack tracks stack height
   5337      // flow-insensitively, and using it here would confuse the stack height
   5338      // tracking.
   5339      masm.addToStackPtr(Imm32(sizeof(double)));
   5340      bailout(lir->snapshot());
   5341    }
   5342    masm.bind(&ok);
   5343    masm.Pop(output);
   5344  }
   5345  masm.bind(&done);
   5346 }
   5347 
   5348 void CodeGenerator::visitGuardNoDenseElements(LGuardNoDenseElements* guard) {
   5349  Register obj = ToRegister(guard->object());
   5350  Register temp = ToRegister(guard->temp0());
   5351 
   5352  // Load obj->elements.
   5353  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
   5354 
   5355  // Make sure there are no dense elements.
   5356  Address initLength(temp, ObjectElements::offsetOfInitializedLength());
   5357  bailoutCmp32(Assembler::NotEqual, initLength, Imm32(0), guard->snapshot());
   5358 }
   5359 
   5360 void CodeGenerator::visitBooleanToInt64(LBooleanToInt64* lir) {
   5361  Register input = ToRegister(lir->input());
   5362  Register64 output = ToOutRegister64(lir);
   5363 
   5364  masm.move32To64ZeroExtend(input, output);
   5365 }
   5366 
   5367 void CodeGenerator::emitStringToInt64(LInstruction* lir, Register input,
   5368                                      Register64 output) {
   5369  Register temp = output.scratchReg();
   5370 
   5371  saveLive(lir);
   5372 
   5373  masm.reserveStack(sizeof(uint64_t));
   5374  masm.moveStackPtrTo(temp);
   5375  pushArg(temp);
   5376  pushArg(input);
   5377 
   5378  using Fn = bool (*)(JSContext*, HandleString, uint64_t*);
   5379  callVM<Fn, DoStringToInt64>(lir);
   5380 
   5381  masm.load64(Address(masm.getStackPointer(), 0), output);
   5382  masm.freeStack(sizeof(uint64_t));
   5383 
   5384  restoreLiveIgnore(lir, StoreValueTo(output).clobbered());
   5385 }
   5386 
   5387 void CodeGenerator::visitStringToInt64(LStringToInt64* lir) {
   5388  Register input = ToRegister(lir->input());
   5389  Register64 output = ToOutRegister64(lir);
   5390 
   5391  emitStringToInt64(lir, input, output);
   5392 }
   5393 
   5394 void CodeGenerator::visitValueToInt64(LValueToInt64* lir) {
   5395  ValueOperand input = ToValue(lir->input());
   5396  Register temp = ToRegister(lir->temp0());
   5397  Register64 output = ToOutRegister64(lir);
   5398 
   5399  int checks = 3;
   5400 
   5401  Label fail, done;
   5402  // Jump to fail if this is the last check and we fail it,
   5403  // otherwise to the next test.
   5404  auto emitTestAndUnbox = [&](auto testAndUnbox) {
   5405    MOZ_ASSERT(checks > 0);
   5406 
   5407    checks--;
   5408    Label notType;
   5409    Label* target = checks ? &notType : &fail;
   5410 
   5411    testAndUnbox(target);
   5412 
   5413    if (checks) {
   5414      masm.jump(&done);
   5415      masm.bind(&notType);
   5416    }
   5417  };
   5418 
   5419  Register tag = masm.extractTag(input, temp);
   5420 
   5421  // BigInt.
   5422  emitTestAndUnbox([&](Label* target) {
   5423    masm.branchTestBigInt(Assembler::NotEqual, tag, target);
   5424    masm.unboxBigInt(input, temp);
   5425    masm.loadBigInt64(temp, output);
   5426  });
   5427 
   5428  // Boolean
   5429  emitTestAndUnbox([&](Label* target) {
   5430    masm.branchTestBoolean(Assembler::NotEqual, tag, target);
   5431    masm.unboxBoolean(input, temp);
   5432    masm.move32To64ZeroExtend(temp, output);
   5433  });
   5434 
   5435  // String
   5436  emitTestAndUnbox([&](Label* target) {
   5437    masm.branchTestString(Assembler::NotEqual, tag, target);
   5438    masm.unboxString(input, temp);
   5439    emitStringToInt64(lir, temp, output);
   5440  });
   5441 
   5442  MOZ_ASSERT(checks == 0);
   5443 
   5444  bailoutFrom(&fail, lir->snapshot());
   5445  masm.bind(&done);
   5446 }
   5447 
   5448 void CodeGenerator::visitTruncateBigIntToInt64(LTruncateBigIntToInt64* lir) {
   5449  Register operand = ToRegister(lir->input());
   5450  Register64 output = ToOutRegister64(lir);
   5451 
   5452  masm.loadBigInt64(operand, output);
   5453 }
   5454 
   5455 OutOfLineCode* CodeGenerator::createBigIntOutOfLine(LInstruction* lir,
   5456                                                    Scalar::Type type,
   5457                                                    Register64 input,
   5458                                                    Register output) {
   5459 #if JS_BITS_PER_WORD == 32
   5460  using Fn = BigInt* (*)(JSContext*, uint32_t, uint32_t);
   5461  auto args = ArgList(input.low, input.high);
   5462 #else
   5463  using Fn = BigInt* (*)(JSContext*, uint64_t);
   5464  auto args = ArgList(input);
   5465 #endif
   5466 
   5467  if (type == Scalar::BigInt64) {
   5468    return oolCallVM<Fn, jit::CreateBigIntFromInt64>(lir, args,
   5469                                                     StoreRegisterTo(output));
   5470  }
   5471  MOZ_ASSERT(type == Scalar::BigUint64);
   5472  return oolCallVM<Fn, jit::CreateBigIntFromUint64>(lir, args,
   5473                                                    StoreRegisterTo(output));
   5474 }
   5475 
   5476 void CodeGenerator::emitCreateBigInt(LInstruction* lir, Scalar::Type type,
   5477                                     Register64 input, Register output,
   5478                                     Register maybeTemp,
   5479                                     Register64 maybeTemp64) {
   5480  OutOfLineCode* ool = createBigIntOutOfLine(lir, type, input, output);
   5481 
   5482  if (maybeTemp != InvalidReg) {
   5483    masm.newGCBigInt(output, maybeTemp, initialBigIntHeap(), ool->entry());
   5484  } else {
   5485    AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
   5486    regs.take(input);
   5487    regs.take(output);
   5488 
   5489    Register temp = regs.takeAny();
   5490 
   5491    masm.push(temp);
   5492 
   5493    Label fail, ok;
   5494    masm.newGCBigInt(output, temp, initialBigIntHeap(), &fail);
   5495    masm.pop(temp);
   5496    masm.jump(&ok);
   5497    masm.bind(&fail);
   5498    masm.pop(temp);
   5499    masm.jump(ool->entry());
   5500    masm.bind(&ok);
   5501  }
   5502  masm.initializeBigInt64(type, output, input, maybeTemp64);
   5503  masm.bind(ool->rejoin());
   5504 }
   5505 
   5506 void CodeGenerator::emitCallMegamorphicGetter(
   5507    LInstruction* lir, ValueOperand accessorAndOutput, Register obj,
   5508    Register calleeScratch, Register argcScratch, Label* nullGetter) {
   5509  MOZ_ASSERT(calleeScratch == IonGenericCallCalleeReg);
   5510  MOZ_ASSERT(argcScratch == IonGenericCallArgcReg);
   5511 
   5512  masm.unboxNonDouble(accessorAndOutput, calleeScratch,
   5513                      JSVAL_TYPE_PRIVATE_GCTHING);
   5514 
   5515  masm.loadPtr(Address(calleeScratch, GetterSetter::offsetOfGetter()),
   5516               calleeScratch);
   5517  masm.branchTestPtr(Assembler::Zero, calleeScratch, calleeScratch, nullGetter);
   5518 
   5519  if (JitStackValueAlignment > 1) {
   5520    masm.reserveStack(sizeof(Value) * (JitStackValueAlignment - 1));
   5521  }
   5522  masm.pushValue(JSVAL_TYPE_OBJECT, obj);
   5523 
   5524  masm.checkStackAlignment();
   5525 
   5526  masm.move32(Imm32(0), argcScratch);
   5527  ensureOsiSpace();
   5528 
   5529  TrampolinePtr genericCallStub =
   5530      gen->jitRuntime()->getIonGenericCallStub(IonGenericCallKind::Call);
   5531  uint32_t callOffset = masm.callJit(genericCallStub);
   5532  markSafepointAt(callOffset, lir);
   5533 
   5534  masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
   5535 
   5536  masm.moveValue(JSReturnOperand, accessorAndOutput);
   5537 
   5538  masm.setFramePushed(frameSize());
   5539  emitRestoreStackPointerFromFP();
   5540 }
   5541 
   5542 void CodeGenerator::visitInt64ToBigInt(LInt64ToBigInt* lir) {
   5543  Register64 input = ToRegister64(lir->input());
   5544  Register64 temp = ToRegister64(lir->temp0());
   5545  Register output = ToRegister(lir->output());
   5546 
   5547  emitCreateBigInt(lir, Scalar::BigInt64, input, output, temp.scratchReg(),
   5548                   temp);
   5549 }
   5550 
   5551 void CodeGenerator::visitUint64ToBigInt(LUint64ToBigInt* lir) {
   5552  Register64 input = ToRegister64(lir->input());
   5553  Register temp = ToRegister(lir->temp0());
   5554  Register output = ToRegister(lir->output());
   5555 
   5556  emitCreateBigInt(lir, Scalar::BigUint64, input, output, temp);
   5557 }
   5558 
   5559 void CodeGenerator::visitInt64ToIntPtr(LInt64ToIntPtr* lir) {
   5560  Register64 input = ToRegister64(lir->input());
   5561 #ifdef JS_64BIT
   5562  MOZ_ASSERT(input.reg == ToRegister(lir->output()));
   5563 #else
   5564  Register output = ToRegister(lir->output());
   5565 #endif
   5566 
   5567  Label bail;
   5568  if (lir->mir()->isSigned()) {
   5569    masm.branchInt64NotInPtrRange(input, &bail);
   5570  } else {
   5571    masm.branchUInt64NotInPtrRange(input, &bail);
   5572  }
   5573  bailoutFrom(&bail, lir->snapshot());
   5574 
   5575 #ifndef JS_64BIT
   5576  masm.move64To32(input, output);
   5577 #endif
   5578 }
   5579 
   5580 void CodeGenerator::visitIntPtrToInt64(LIntPtrToInt64* lir) {
   5581 #ifdef JS_64BIT
   5582  MOZ_CRASH("Not used on 64-bit platforms");
   5583 #else
   5584  Register input = ToRegister(lir->input());
   5585  Register64 output = ToOutRegister64(lir);
   5586 
   5587  masm.move32To64SignExtend(input, output);
   5588 #endif
   5589 }
   5590 
   5591 Address CodeGenerator::getNurseryValueAddress(ValueOrNurseryValueIndex val,
   5592                                              Register reg) {
   5593  // Move the address of the Value stored in the IonScript into |reg|.
   5594  uint32_t nurseryIndex = val.toNurseryValueIndex();
   5595  CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), reg);
   5596  masm.propagateOOM(nurseryValueLabels_.emplaceBack(label, nurseryIndex));
   5597  return Address(reg, 0);
   5598 }
   5599 
   5600 void CodeGenerator::visitGuardValue(LGuardValue* lir) {
   5601  ValueOperand input = ToValue(lir->input());
   5602  Register temp = ToTempRegisterOrInvalid(lir->temp0());
   5603  ValueOrNurseryValueIndex expected = lir->mir()->expected();
   5604 
   5605  Label bail;
   5606  if (expected.isValue()) {
   5607    Value expectedVal = expected.toValue();
   5608    if (expectedVal.isNaN()) {
   5609      MOZ_ASSERT(temp != InvalidReg);
   5610      masm.branchTestNaNValue(Assembler::NotEqual, input, temp, &bail);
   5611    } else {
   5612      MOZ_ASSERT(temp == InvalidReg);
   5613      masm.branchTestValue(Assembler::NotEqual, input, expectedVal, &bail);
   5614    }
   5615  } else {
   5616    // Compare to the Value stored in IonScript's nursery values list.
   5617    MOZ_ASSERT(temp != InvalidReg);
   5618    Address valueAddr = getNurseryValueAddress(expected, temp);
   5619    masm.branchTestValue(Assembler::NotEqual, valueAddr, input, &bail);
   5620  }
   5621 
   5622  bailoutFrom(&bail, lir->snapshot());
   5623 }
   5624 
   5625 void CodeGenerator::visitGuardNullOrUndefined(LGuardNullOrUndefined* lir) {
   5626  ValueOperand input = ToValue(lir->value());
   5627 
   5628  ScratchTagScope tag(masm, input);
   5629  masm.splitTagForTest(input, tag);
   5630 
   5631  Label done;
   5632  masm.branchTestNull(Assembler::Equal, tag, &done);
   5633 
   5634  Label bail;
   5635  masm.branchTestUndefined(Assembler::NotEqual, tag, &bail);
   5636  bailoutFrom(&bail, lir->snapshot());
   5637 
   5638  masm.bind(&done);
   5639 }
   5640 
   5641 void CodeGenerator::visitGuardIsNotObject(LGuardIsNotObject* lir) {
   5642  ValueOperand input = ToValue(lir->value());
   5643 
   5644  Label bail;
   5645  masm.branchTestObject(Assembler::Equal, input, &bail);
   5646  bailoutFrom(&bail, lir->snapshot());
   5647 }
   5648 
   5649 void CodeGenerator::visitGuardFunctionFlags(LGuardFunctionFlags* lir) {
   5650  Register function = ToRegister(lir->function());
   5651 
   5652  Label bail;
   5653  if (uint16_t flags = lir->mir()->expectedFlags()) {
   5654    masm.branchTestFunctionFlags(function, flags, Assembler::Zero, &bail);
   5655  }
   5656  if (uint16_t flags = lir->mir()->unexpectedFlags()) {
   5657    masm.branchTestFunctionFlags(function, flags, Assembler::NonZero, &bail);
   5658  }
   5659  bailoutFrom(&bail, lir->snapshot());
   5660 }
   5661 
   5662 void CodeGenerator::visitGuardFunctionIsNonBuiltinCtor(
   5663    LGuardFunctionIsNonBuiltinCtor* lir) {
   5664  Register function = ToRegister(lir->function());
   5665  Register temp = ToRegister(lir->temp0());
   5666 
   5667  Label bail;
   5668  masm.branchIfNotFunctionIsNonBuiltinCtor(function, temp, &bail);
   5669  bailoutFrom(&bail, lir->snapshot());
   5670 }
   5671 
   5672 void CodeGenerator::visitGuardFunctionKind(LGuardFunctionKind* lir) {
   5673  Register function = ToRegister(lir->function());
   5674  Register temp = ToRegister(lir->temp0());
   5675 
   5676  Assembler::Condition cond =
   5677      lir->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
   5678 
   5679  Label bail;
   5680  masm.branchFunctionKind(cond, lir->mir()->expected(), function, temp, &bail);
   5681  bailoutFrom(&bail, lir->snapshot());
   5682 }
   5683 
   5684 void CodeGenerator::visitGuardFunctionScript(LGuardFunctionScript* lir) {
   5685  Register function = ToRegister(lir->function());
   5686 
   5687  Address scriptAddr(function, JSFunction::offsetOfJitInfoOrScript());
   5688  bailoutCmpPtr(Assembler::NotEqual, scriptAddr,
   5689                ImmGCPtr(lir->mir()->expected()), lir->snapshot());
   5690 }
   5691 
   5692 // Out-of-line path to update the store buffer.
   5693 class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator> {
   5694  LInstruction* lir_;
   5695  const LAllocation* object_;
   5696 
   5697 public:
   5698  OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object)
   5699      : lir_(lir), object_(object) {}
   5700 
   5701  void accept(CodeGenerator* codegen) override {
   5702    codegen->visitOutOfLineCallPostWriteBarrier(this);
   5703  }
   5704 
   5705  LInstruction* lir() const { return lir_; }
   5706  const LAllocation* object() const { return object_; }
   5707 };
   5708 
   5709 static void EmitStoreBufferCheckForConstant(MacroAssembler& masm,
   5710                                            const gc::TenuredCell* cell,
   5711                                            AllocatableGeneralRegisterSet& regs,
   5712                                            Label* exit, Label* callVM) {
   5713  Register temp = regs.takeAny();
   5714 
   5715  gc::Arena* arena = cell->arena();
   5716 
   5717  Register cells = temp;
   5718  masm.loadPtr(AbsoluteAddress(&arena->bufferedCells()), cells);
   5719 
   5720  size_t index = gc::ArenaCellSet::getCellIndex(cell);
   5721  size_t word;
   5722  uint32_t mask;
   5723  gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask);
   5724  size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t);
   5725 
   5726  masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask),
   5727                    exit);
   5728 
   5729  // Check whether this is the sentinel set and if so call the VM to allocate
   5730  // one for this arena.
   5731  masm.branchPtr(Assembler::Equal,
   5732                 Address(cells, gc::ArenaCellSet::offsetOfArena()),
   5733                 ImmPtr(nullptr), callVM);
   5734 
   5735  // Add the cell to the set.
   5736  masm.or32(Imm32(mask), Address(cells, offset));
   5737  masm.jump(exit);
   5738 
   5739  regs.add(temp);
   5740 }
   5741 
   5742 static void EmitPostWriteBarrier(MacroAssembler& masm, CompileRuntime* runtime,
   5743                                 Register objreg, JSObject* maybeConstant,
   5744                                 bool isGlobal,
   5745                                 AllocatableGeneralRegisterSet& regs) {
   5746  MOZ_ASSERT_IF(isGlobal, maybeConstant);
   5747 
   5748  Label callVM;
   5749  Label exit;
   5750 
   5751  Register temp = regs.takeAny();
   5752 
   5753  // We already have a fast path to check whether a global is in the store
   5754  // buffer.
   5755  if (!isGlobal) {
   5756    if (maybeConstant) {
   5757      // Check store buffer bitmap directly for known object.
   5758      EmitStoreBufferCheckForConstant(masm, &maybeConstant->asTenured(), regs,
   5759                                      &exit, &callVM);
   5760    } else {
   5761      // Check one element cache to avoid VM call.
   5762      masm.branchPtr(Assembler::Equal,
   5763                     AbsoluteAddress(runtime->addressOfLastBufferedWholeCell()),
   5764                     objreg, &exit);
   5765    }
   5766  }
   5767 
   5768  // Call into the VM to barrier the write.
   5769  masm.bind(&callVM);
   5770 
   5771  Register runtimereg = temp;
   5772  masm.mov(ImmPtr(runtime), runtimereg);
   5773 
   5774  masm.setupAlignedABICall();
   5775  masm.passABIArg(runtimereg);
   5776  masm.passABIArg(objreg);
   5777  if (isGlobal) {
   5778    using Fn = void (*)(JSRuntime* rt, GlobalObject* obj);
   5779    masm.callWithABI<Fn, PostGlobalWriteBarrier>();
   5780  } else {
   5781    using Fn = void (*)(JSRuntime* rt, js::gc::Cell* obj);
   5782    masm.callWithABI<Fn, PostWriteBarrier>();
   5783  }
   5784 
   5785  masm.bind(&exit);
   5786 }
   5787 
   5788 void CodeGenerator::emitPostWriteBarrier(const LAllocation* obj) {
   5789  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
   5790 
   5791  Register objreg;
   5792  JSObject* object = nullptr;
   5793  bool isGlobal = false;
   5794  if (obj->isConstant()) {
   5795    object = &obj->toConstant()->toObject();
   5796    isGlobal = isGlobalObject(object);
   5797    objreg = regs.takeAny();
   5798    masm.movePtr(ImmGCPtr(object), objreg);
   5799  } else {
   5800    objreg = ToRegister(obj);
   5801    regs.takeUnchecked(objreg);
   5802  }
   5803 
   5804  EmitPostWriteBarrier(masm, gen->runtime, objreg, object, isGlobal, regs);
   5805 }
   5806 
   5807 // Returns true if `def` might be allocated in the nursery.
   5808 static bool ValueNeedsPostBarrier(MDefinition* def) {
   5809  if (def->isBox()) {
   5810    def = def->toBox()->input();
   5811  }
   5812  if (def->type() == MIRType::Value) {
   5813    return true;
   5814  }
   5815  return NeedsPostBarrier(def->type());
   5816 }
   5817 
   5818 void CodeGenerator::emitElementPostWriteBarrier(
   5819    MInstruction* mir, const LiveRegisterSet& liveVolatileRegs, Register obj,
   5820    Register index, Register scratch, const ConstantOrRegister& val,
   5821    int32_t indexDiff) {
   5822  if (val.constant()) {
   5823    MOZ_ASSERT_IF(val.value().isGCThing(),
   5824                  !IsInsideNursery(val.value().toGCThing()));
   5825    return;
   5826  }
   5827 
   5828  TypedOrValueRegister reg = val.reg();
   5829  if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
   5830    return;
   5831  }
   5832 
   5833  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
   5834    masm.PushRegsInMask(liveVolatileRegs);
   5835 
   5836    if (indexDiff != 0) {
   5837      masm.add32(Imm32(indexDiff), index);
   5838    }
   5839 
   5840    masm.setupUnalignedABICall(scratch);
   5841    masm.movePtr(ImmPtr(gen->runtime), scratch);
   5842    masm.passABIArg(scratch);
   5843    masm.passABIArg(obj);
   5844    masm.passABIArg(index);
   5845    using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
   5846    masm.callWithABI<Fn, PostWriteElementBarrier>();
   5847 
   5848    // We don't need a sub32 here because index must be in liveVolatileRegs
   5849    // if indexDiff is not zero, so it will be restored below.
   5850    MOZ_ASSERT_IF(indexDiff != 0, liveVolatileRegs.has(index));
   5851 
   5852    masm.PopRegsInMask(liveVolatileRegs);
   5853 
   5854    masm.jump(ool.rejoin());
   5855  });
   5856  addOutOfLineCode(ool, mir);
   5857 
   5858  masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, ool->rejoin());
   5859 
   5860  if (reg.hasValue()) {
   5861    masm.branchValueIsNurseryCell(Assembler::Equal, reg.valueReg(), scratch,
   5862                                  ool->entry());
   5863  } else {
   5864    masm.branchPtrInNurseryChunk(Assembler::Equal, reg.typedReg().gpr(),
   5865                                 scratch, ool->entry());
   5866  }
   5867 
   5868  masm.bind(ool->rejoin());
   5869 }
   5870 
   5871 void CodeGenerator::emitPostWriteBarrier(Register objreg) {
   5872  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
   5873  regs.takeUnchecked(objreg);
   5874  EmitPostWriteBarrier(masm, gen->runtime, objreg, nullptr, false, regs);
   5875 }
   5876 
   5877 void CodeGenerator::visitOutOfLineCallPostWriteBarrier(
   5878    OutOfLineCallPostWriteBarrier* ool) {
   5879  saveLiveVolatile(ool->lir());
   5880  const LAllocation* obj = ool->object();
   5881  emitPostWriteBarrier(obj);
   5882  restoreLiveVolatile(ool->lir());
   5883 
   5884  masm.jump(ool->rejoin());
   5885 }
   5886 
   5887 void CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal,
   5888                                                OutOfLineCode* ool) {
   5889  // Check whether an object is a global that we have already barriered before
   5890  // calling into the VM.
   5891  //
   5892  // We only check for the script's global, not other globals within the same
   5893  // compartment, because we bake in a pointer to realm->globalWriteBarriered
   5894  // and doing that would be invalid for other realms because they could be
   5895  // collected before the Ion code is discarded.
   5896 
   5897  if (!maybeGlobal->isConstant()) {
   5898    return;
   5899  }
   5900 
   5901  JSObject* obj = &maybeGlobal->toConstant()->toObject();
   5902  if (gen->realm->maybeGlobal() != obj) {
   5903    return;
   5904  }
   5905 
   5906  const uint32_t* addr = gen->realm->addressOfGlobalWriteBarriered();
   5907  masm.branch32(Assembler::NotEqual, AbsoluteAddress(addr), Imm32(0),
   5908                ool->rejoin());
   5909 }
   5910 
   5911 template <class LPostBarrierType, MIRType nurseryType>
   5912 void CodeGenerator::visitPostWriteBarrierCommon(LPostBarrierType* lir,
   5913                                                OutOfLineCode* ool) {
   5914  static_assert(NeedsPostBarrier(nurseryType));
   5915 
   5916  addOutOfLineCode(ool, lir->mir());
   5917 
   5918  Register temp = ToTempRegisterOrInvalid(lir->temp0());
   5919 
   5920  if (lir->object()->isConstant()) {
   5921    // The object must be tenured because MIR and LIR can't contain nursery
   5922    // pointers.
   5923    MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
   5924  } else {
   5925    masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
   5926                                 temp, ool->rejoin());
   5927  }
   5928 
   5929  maybeEmitGlobalBarrierCheck(lir->object(), ool);
   5930 
   5931  Register value = ToRegister(lir->value());
   5932  if constexpr (nurseryType == MIRType::Object) {
   5933    MOZ_ASSERT(lir->mir()->value()->type() == MIRType::Object);
   5934  } else if constexpr (nurseryType == MIRType::String) {
   5935    MOZ_ASSERT(lir->mir()->value()->type() == MIRType::String);
   5936  } else {
   5937    static_assert(nurseryType == MIRType::BigInt);
   5938    MOZ_ASSERT(lir->mir()->value()->type() == MIRType::BigInt);
   5939  }
   5940  masm.branchPtrInNurseryChunk(Assembler::Equal, value, temp, ool->entry());
   5941 
   5942  masm.bind(ool->rejoin());
   5943 }
   5944 
   5945 template <class LPostBarrierType>
   5946 void CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir,
   5947                                                 OutOfLineCode* ool) {
   5948  addOutOfLineCode(ool, lir->mir());
   5949 
   5950  Register temp = ToTempRegisterOrInvalid(lir->temp0());
   5951 
   5952  if (lir->object()->isConstant()) {
   5953    // The object must be tenured because MIR and LIR can't contain nursery
   5954    // pointers.
   5955    MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
   5956  } else {
   5957    masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
   5958                                 temp, ool->rejoin());
   5959  }
   5960 
   5961  maybeEmitGlobalBarrierCheck(lir->object(), ool);
   5962 
   5963  ValueOperand value = ToValue(lir->value());
   5964  masm.branchValueIsNurseryCell(Assembler::Equal, value, temp, ool->entry());
   5965 
   5966  masm.bind(ool->rejoin());
   5967 }
   5968 
   5969 void CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir) {
   5970  auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
   5971  visitPostWriteBarrierCommon<LPostWriteBarrierO, MIRType::Object>(lir, ool);
   5972 }
   5973 
   5974 void CodeGenerator::visitPostWriteBarrierS(LPostWriteBarrierS* lir) {
   5975  auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
   5976  visitPostWriteBarrierCommon<LPostWriteBarrierS, MIRType::String>(lir, ool);
   5977 }
   5978 
   5979 void CodeGenerator::visitPostWriteBarrierBI(LPostWriteBarrierBI* lir) {
   5980  auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
   5981  visitPostWriteBarrierCommon<LPostWriteBarrierBI, MIRType::BigInt>(lir, ool);
   5982 }
   5983 
   5984 void CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir) {
   5985  auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
   5986  visitPostWriteBarrierCommonV(lir, ool);
   5987 }
   5988 
   5989 // Out-of-line path to update the store buffer.
   5990 class OutOfLineCallPostWriteElementBarrier
   5991    : public OutOfLineCodeBase<CodeGenerator> {
   5992  LInstruction* lir_;
   5993  const LAllocation* object_;
   5994  const LAllocation* index_;
   5995 
   5996 public:
   5997  OutOfLineCallPostWriteElementBarrier(LInstruction* lir,
   5998                                       const LAllocation* object,
   5999                                       const LAllocation* index)
   6000      : lir_(lir), object_(object), index_(index) {}
   6001 
   6002  void accept(CodeGenerator* codegen) override {
   6003    codegen->visitOutOfLineCallPostWriteElementBarrier(this);
   6004  }
   6005 
   6006  LInstruction* lir() const { return lir_; }
   6007 
   6008  const LAllocation* object() const { return object_; }
   6009 
   6010  const LAllocation* index() const { return index_; }
   6011 };
   6012 
   6013 void CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(
   6014    OutOfLineCallPostWriteElementBarrier* ool) {
   6015  saveLiveVolatile(ool->lir());
   6016 
   6017  const LAllocation* obj = ool->object();
   6018  const LAllocation* index = ool->index();
   6019 
   6020  Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj);
   6021  Register indexreg = ToRegister(index);
   6022 
   6023  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
   6024  regs.takeUnchecked(indexreg);
   6025 
   6026  if (obj->isConstant()) {
   6027    objreg = regs.takeAny();
   6028    masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg);
   6029  } else {
   6030    regs.takeUnchecked(objreg);
   6031  }
   6032 
   6033  Register runtimereg = regs.takeAny();
   6034  using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
   6035  masm.setupAlignedABICall();
   6036  masm.mov(ImmPtr(gen->runtime), runtimereg);
   6037  masm.passABIArg(runtimereg);
   6038  masm.passABIArg(objreg);
   6039  masm.passABIArg(indexreg);
   6040  masm.callWithABI<Fn, PostWriteElementBarrier>();
   6041 
   6042  restoreLiveVolatile(ool->lir());
   6043 
   6044  masm.jump(ool->rejoin());
   6045 }
   6046 
   6047 void CodeGenerator::visitPostWriteElementBarrierO(
   6048    LPostWriteElementBarrierO* lir) {
   6049  auto ool = new (alloc())
   6050      OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
   6051  visitPostWriteBarrierCommon<LPostWriteElementBarrierO, MIRType::Object>(lir,
   6052                                                                          ool);
   6053 }
   6054 
   6055 void CodeGenerator::visitPostWriteElementBarrierS(
   6056    LPostWriteElementBarrierS* lir) {
   6057  auto ool = new (alloc())
   6058      OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
   6059  visitPostWriteBarrierCommon<LPostWriteElementBarrierS, MIRType::String>(lir,
   6060                                                                          ool);
   6061 }
   6062 
   6063 void CodeGenerator::visitPostWriteElementBarrierBI(
   6064    LPostWriteElementBarrierBI* lir) {
   6065  auto ool = new (alloc())
   6066      OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
   6067  visitPostWriteBarrierCommon<LPostWriteElementBarrierBI, MIRType::BigInt>(lir,
   6068                                                                           ool);
   6069 }
   6070 
   6071 void CodeGenerator::visitPostWriteElementBarrierV(
   6072    LPostWriteElementBarrierV* lir) {
   6073  auto ool = new (alloc())
   6074      OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
   6075  visitPostWriteBarrierCommonV(lir, ool);
   6076 }
   6077 
   6078 void CodeGenerator::visitAssertCanElidePostWriteBarrier(
   6079    LAssertCanElidePostWriteBarrier* lir) {
   6080  Register object = ToRegister(lir->object());
   6081  ValueOperand value = ToValue(lir->value());
   6082  Register temp = ToRegister(lir->temp0());
   6083 
   6084  Label ok;
   6085  masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp, &ok);
   6086  masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp, &ok);
   6087 
   6088  masm.assumeUnreachable("Unexpected missing post write barrier");
   6089 
   6090  masm.bind(&ok);
   6091 }
   6092 
   6093 template <typename LCallIns>
   6094 void CodeGenerator::emitCallNative(LCallIns* call, JSNative native,
   6095                                   Register argContextReg, Register argUintNReg,
   6096                                   Register argVpReg, Register tempReg,
   6097                                   uint32_t unusedStack) {
   6098  masm.checkStackAlignment();
   6099 
   6100  // Native functions have the signature:
   6101  //  bool (*)(JSContext*, unsigned, Value* vp)
   6102  // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
   6103  // are the function arguments.
   6104 
   6105  // Allocate space for the outparam, moving the StackPointer to what will be
   6106  // &vp[1].
   6107  masm.adjustStack(unusedStack);
   6108 
   6109  // Push a Value containing the callee object: natives are allowed to access
   6110  // their callee before setting the return value. The StackPointer is moved
   6111  // to &vp[0].
   6112  //
   6113  // Also reserves the space for |NativeExitFrameLayout::{lo,hi}CalleeResult_|.
   6114  if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
   6115    Register calleeReg = ToRegister(call->getCallee());
   6116    masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
   6117 
   6118    // Enter the callee realm.
   6119    if (call->mir()->maybeCrossRealm()) {
   6120      masm.switchToObjectRealm(calleeReg, tempReg);
   6121    }
   6122  } else {
   6123    WrappedFunction* target = call->mir()->getSingleTarget();
   6124    masm.Push(ObjectValue(*target->rawNativeJSFunction()));
   6125 
   6126    // Enter the callee realm.
   6127    if (call->mir()->maybeCrossRealm()) {
   6128      masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), tempReg);
   6129      masm.switchToObjectRealm(tempReg, tempReg);
   6130    }
   6131  }
   6132 
   6133  // Preload arguments into registers.
   6134  masm.loadJSContext(argContextReg);
   6135  masm.moveStackPtrTo(argVpReg);
   6136 
   6137  // Initialize |NativeExitFrameLayout::argc_|.
   6138  masm.Push(argUintNReg);
   6139 
   6140  // Construct native exit frame.
   6141  //
   6142  // |buildFakeExitFrame| initializes |NativeExitFrameLayout::exit_| and
   6143  // |enterFakeExitFrameForNative| initializes |NativeExitFrameLayout::footer_|.
   6144  //
   6145  // The NativeExitFrameLayout is now fully initialized.
   6146  uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
   6147  masm.enterFakeExitFrameForNative(argContextReg, tempReg,
   6148                                   call->mir()->isConstructing());
   6149 
   6150  markSafepointAt(safepointOffset, call);
   6151 
   6152  // Construct and execute call.
   6153  masm.setupAlignedABICall();
   6154  masm.passABIArg(argContextReg);
   6155  masm.passABIArg(argUintNReg);
   6156  masm.passABIArg(argVpReg);
   6157 
   6158  ensureOsiSpace();
   6159  // If we're using a simulator build, `native` will already point to the
   6160  // simulator's call-redirection code for LCallClassHook. Load the address in
   6161  // a register first so that we don't try to redirect it a second time.
   6162  bool emittedCall = false;
   6163 #ifdef JS_SIMULATOR
   6164  if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
   6165    masm.movePtr(ImmPtr(native), tempReg);
   6166    masm.callWithABI(tempReg);
   6167    emittedCall = true;
   6168  }
   6169 #endif
   6170  if (!emittedCall) {
   6171    masm.callWithABI(DynamicFunction<JSNative>(native), ABIType::General,
   6172                     CheckUnsafeCallWithABI::DontCheckHasExitFrame);
   6173  }
   6174 
   6175  // Test for failure.
   6176  masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
   6177 
   6178  // Exit the callee realm.
   6179  if (call->mir()->maybeCrossRealm()) {
   6180    masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
   6181  }
   6182 
   6183  // Load the outparam vp[0] into output register(s).
   6184  masm.loadValue(
   6185      Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
   6186      JSReturnOperand);
   6187 
   6188  // Until C++ code is instrumented against Spectre, prevent speculative
   6189  // execution from returning any private data.
   6190  if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() &&
   6191      call->mir()->hasLiveDefUses()) {
   6192    masm.speculationBarrier();
   6193  }
   6194 
   6195 #ifdef DEBUG
   6196  // Native constructors are guaranteed to return an Object value.
   6197  if (call->mir()->isConstructing()) {
   6198    Label notPrimitive;
   6199    masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
   6200                             &notPrimitive);
   6201    masm.assumeUnreachable("native constructors don't return primitives");
   6202    masm.bind(&notPrimitive);
   6203  }
   6204 #endif
   6205 }
   6206 
   6207 template <typename LCallIns>
   6208 void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
   6209  uint32_t unusedStack =
   6210      UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
   6211 
   6212  // Registers used for callWithABI() argument-passing.
   6213  const Register argContextReg = ToRegister(call->getArgContextReg());
   6214  const Register argUintNReg = ToRegister(call->getArgUintNReg());
   6215  const Register argVpReg = ToRegister(call->getArgVpReg());
   6216 
   6217  // Misc. temporary registers.
   6218  const Register tempReg = ToRegister(call->getTempReg());
   6219 
   6220  DebugOnly<uint32_t> initialStack = masm.framePushed();
   6221 
   6222  // Initialize the argc register.
   6223  masm.move32(Imm32(call->mir()->numActualArgs()), argUintNReg);
   6224 
   6225  // Create the exit frame and call the native.
   6226  emitCallNative(call, native, argContextReg, argUintNReg, argVpReg, tempReg,
   6227                 unusedStack);
   6228 
   6229  // The next instruction is removing the footer of the exit frame, so there
   6230  // is no need for leaveFakeExitFrame.
   6231 
   6232  // Move the StackPointer back to its original location, unwinding the native
   6233  // exit frame.
   6234  masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack);
   6235  MOZ_ASSERT(masm.framePushed() == initialStack);
   6236 }
   6237 
   6238 void CodeGenerator::visitCallNative(LCallNative* call) {
   6239  WrappedFunction* target = call->getSingleTarget();
   6240  MOZ_ASSERT(target);
   6241  MOZ_ASSERT(target->isNativeWithoutJitEntry());
   6242 
   6243  JSNative native = target->native();
   6244  if (call->ignoresReturnValue() && target->hasJitInfo()) {
   6245    const JSJitInfo* jitInfo = target->jitInfo();
   6246    if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
   6247      native = jitInfo->ignoresReturnValueMethod;
   6248    }
   6249  }
   6250  emitCallNative(call, native);
   6251 }
   6252 
   6253 void CodeGenerator::visitCallClassHook(LCallClassHook* call) {
   6254  emitCallNative(call, call->mir()->target());
   6255 }
   6256 
   6257 static void LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv,
   6258                           DOMObjectKind kind) {
   6259  // Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This
   6260  // will be in the first slot but may be fixed or non-fixed.
   6261  MOZ_ASSERT(obj != priv);
   6262 
   6263  switch (kind) {
   6264    case DOMObjectKind::Native:
   6265      // If it's a native object, the value must be in a fixed slot.
   6266      // See CanAttachDOMCall in CacheIR.cpp.
   6267      masm.debugAssertObjHasFixedSlots(obj, priv);
   6268      masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv);
   6269      break;
   6270    case DOMObjectKind::Proxy: {
   6271 #ifdef DEBUG
   6272      // Sanity check: it must be a DOM proxy.
   6273      Label isDOMProxy;
   6274      masm.branchTestProxyHandlerFamily(
   6275          Assembler::Equal, obj, priv, GetDOMProxyHandlerFamily(), &isDOMProxy);
   6276      masm.assumeUnreachable("Expected a DOM proxy");
   6277      masm.bind(&isDOMProxy);
   6278 #endif
   6279      masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), priv);
   6280      masm.loadPrivate(
   6281          Address(priv, js::detail::ProxyReservedSlots::offsetOfSlot(0)), priv);
   6282      break;
   6283    }
   6284  }
   6285 }
   6286 
   6287 void CodeGenerator::visitCallDOMNative(LCallDOMNative* call) {
   6288  WrappedFunction* target = call->getSingleTarget();
   6289  MOZ_ASSERT(target);
   6290  MOZ_ASSERT(target->isNativeWithoutJitEntry());
   6291  MOZ_ASSERT(target->hasJitInfo());
   6292  MOZ_ASSERT(call->mir()->isCallDOMNative());
   6293 
   6294  int unusedStack = UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
   6295 
   6296  // Registers used for callWithABI() argument-passing.
   6297  const Register argJSContext = ToRegister(call->getArgJSContext());
   6298  const Register argObj = ToRegister(call->getArgObj());
   6299  const Register argPrivate = ToRegister(call->getArgPrivate());
   6300  const Register argArgs = ToRegister(call->getArgArgs());
   6301 
   6302  DebugOnly<uint32_t> initialStack = masm.framePushed();
   6303 
   6304  masm.checkStackAlignment();
   6305 
   6306  // DOM methods have the signature:
   6307  //  bool (*)(JSContext*, HandleObject, void* private, const
   6308  //  JSJitMethodCallArgs& args)
   6309  // Where args is initialized from an argc and a vp, vp[0] is space for an
   6310  // outparam and the callee, vp[1] is |this|, and vp[2] onward are the
   6311  // function arguments.  Note that args stores the argv, not the vp, and
   6312  // argv == vp + 2.
   6313 
   6314  // Nestle the stack up against the pushed arguments, leaving StackPointer at
   6315  // &vp[1]
   6316  masm.adjustStack(unusedStack);
   6317  // argObj is filled with the extracted object, then returned.
   6318  Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj);
   6319  MOZ_ASSERT(obj == argObj);
   6320 
   6321  // Push a Value containing the callee object: natives are allowed to access
   6322  // their callee before setting the return value. After this the StackPointer
   6323  // points to &vp[0].
   6324  masm.Push(ObjectValue(*target->rawNativeJSFunction()));
   6325 
   6326  // Now compute the argv value.  Since StackPointer is pointing to &vp[0] and
   6327  // argv is &vp[2] we just need to add 2*sizeof(Value) to the current
   6328  // StackPointer.
   6329  static_assert(JSJitMethodCallArgsTraits::offsetOfArgv == 0);
   6330  static_assert(JSJitMethodCallArgsTraits::offsetOfArgc ==
   6331                IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv);
   6332  masm.computeEffectiveAddress(
   6333      Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs);
   6334 
   6335  LoadDOMPrivate(masm, obj, argPrivate,
   6336                 static_cast<MCallDOMNative*>(call->mir())->objectKind());
   6337 
   6338  // Push argc from the call instruction into what will become the IonExitFrame
   6339  masm.Push(Imm32(call->numActualArgs()));
   6340 
   6341  // Push our argv onto the stack
   6342  masm.Push(argArgs);
   6343  // And store our JSJitMethodCallArgs* in argArgs.
   6344  masm.moveStackPtrTo(argArgs);
   6345 
   6346  // Push |this| object for passing HandleObject. We push after argc to
   6347  // maintain the same sp-relative location of the object pointer with other
   6348  // DOMExitFrames.
   6349  masm.Push(argObj);
   6350  masm.moveStackPtrTo(argObj);
   6351 
   6352  if (call->mir()->maybeCrossRealm()) {
   6353    // We use argJSContext as scratch register here.
   6354    masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), argJSContext);
   6355    masm.switchToObjectRealm(argJSContext, argJSContext);
   6356  }
   6357 
   6358  bool preTenureWrapperAllocation =
   6359      call->mir()->to<MCallDOMNative>()->initialHeap() == gc::Heap::Tenured;
   6360  if (preTenureWrapperAllocation) {
   6361    auto ptr = ImmPtr(mirGen().realm->zone()->tenuringAllocSite());
   6362    masm.storeLocalAllocSite(ptr, argJSContext);
   6363  }
   6364 
   6365  // Construct native exit frame.
   6366  uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext);
   6367 
   6368  masm.loadJSContext(argJSContext);
   6369  masm.enterFakeExitFrame(argJSContext, argJSContext,
   6370                          ExitFrameType::IonDOMMethod);
   6371 
   6372  markSafepointAt(safepointOffset, call);
   6373 
   6374  // Construct and execute call.
   6375  masm.setupAlignedABICall();
   6376  masm.loadJSContext(argJSContext);
   6377  masm.passABIArg(argJSContext);
   6378  masm.passABIArg(argObj);
   6379  masm.passABIArg(argPrivate);
   6380  masm.passABIArg(argArgs);
   6381  ensureOsiSpace();
   6382  masm.callWithABI(DynamicFunction<JSJitMethodOp>(target->jitInfo()->method),
   6383                   ABIType::General,
   6384                   CheckUnsafeCallWithABI::DontCheckHasExitFrame);
   6385 
   6386  if (target->jitInfo()->isInfallible) {
   6387    masm.loadValue(Address(masm.getStackPointer(),
   6388                           IonDOMMethodExitFrameLayout::offsetOfResult()),
   6389                   JSReturnOperand);
   6390  } else {
   6391    // Test for failure.
   6392    masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
   6393 
   6394    // Load the outparam vp[0] into output register(s).
   6395    masm.loadValue(Address(masm.getStackPointer(),
   6396                           IonDOMMethodExitFrameLayout::offsetOfResult()),
   6397                   JSReturnOperand);
   6398  }
   6399 
   6400  static_assert(!JSReturnOperand.aliases(ReturnReg),
   6401                "Clobbering ReturnReg should not affect the return value");
   6402 
   6403  // Switch back to the current realm if needed. Note: if the DOM method threw
   6404  // an exception, the exception handler will do this.
   6405  if (call->mir()->maybeCrossRealm()) {
   6406    masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
   6407  }
   6408 
   6409  // Wipe out the preTenuring bit from the local alloc site
   6410  // On exception we handle this in C++
   6411  if (preTenureWrapperAllocation) {
   6412    masm.storeLocalAllocSite(ImmPtr(nullptr), ReturnReg);
   6413  }
   6414 
   6415  // Until C++ code is instrumented against Spectre, prevent speculative
   6416  // execution from returning any private data.
   6417  if (JitOptions.spectreJitToCxxCalls && call->mir()->hasLiveDefUses()) {
   6418    masm.speculationBarrier();
   6419  }
   6420 
   6421  // The next instruction is removing the footer of the exit frame, so there
   6422  // is no need for leaveFakeExitFrame.
   6423 
   6424  // Move the StackPointer back to its original location, unwinding the native
   6425  // exit frame.
   6426  masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack);
   6427  MOZ_ASSERT(masm.framePushed() == initialStack);
   6428 }
   6429 
   6430 void CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir) {
   6431  pushArg(ImmGCPtr(lir->mir()->name()));
   6432 
   6433  using Fn = bool (*)(JSContext* cx, Handle<PropertyName*>, MutableHandleValue);
   6434  callVM<Fn, GetIntrinsicValue>(lir);
   6435 }
   6436 
   6437 void CodeGenerator::emitCallInvokeFunction(
   6438    LInstruction* call, Register calleereg, bool constructing,
   6439    bool ignoresReturnValue, uint32_t argc, uint32_t unusedStack) {
   6440  // Nestle %esp up to the argument vector.
   6441  // Each path must account for framePushed_ separately, for callVM to be valid.
   6442  masm.freeStack(unusedStack);
   6443 
   6444  pushArg(masm.getStackPointer());  // argv.
   6445  pushArg(Imm32(argc));             // argc.
   6446  pushArg(Imm32(ignoresReturnValue));
   6447  pushArg(Imm32(constructing));  // constructing.
   6448  pushArg(calleereg);            // JSFunction*.
   6449 
   6450  using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
   6451                      MutableHandleValue);
   6452  callVM<Fn, jit::InvokeFunction>(call);
   6453 
   6454  // Un-nestle %esp from the argument vector. No prefix was pushed.
   6455  masm.reserveStack(unusedStack);
   6456 }
   6457 
   6458 void CodeGenerator::visitCallGeneric(LCallGeneric* call) {
   6459  // The callee is passed straight through to the trampoline.
   6460  MOZ_ASSERT(ToRegister(call->getCallee()) == IonGenericCallCalleeReg);
   6461 
   6462  Register argcReg = ToRegister(call->getArgc());
   6463  uint32_t unusedStack =
   6464      UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
   6465 
   6466  // Known-target case is handled by LCallKnown.
   6467  MOZ_ASSERT(!call->hasSingleTarget());
   6468 
   6469  masm.checkStackAlignment();
   6470 
   6471  masm.move32(Imm32(call->numActualArgs()), argcReg);
   6472 
   6473  // Nestle the StackPointer up to the argument vector.
   6474  masm.freeStack(unusedStack);
   6475  ensureOsiSpace();
   6476 
   6477  auto kind = call->mir()->isConstructing() ? IonGenericCallKind::Construct
   6478                                            : IonGenericCallKind::Call;
   6479 
   6480  TrampolinePtr genericCallStub =
   6481      gen->jitRuntime()->getIonGenericCallStub(kind);
   6482  uint32_t callOffset = masm.callJit(genericCallStub);
   6483  markSafepointAt(callOffset, call);
   6484 
   6485  if (call->mir()->maybeCrossRealm()) {
   6486    static_assert(!JSReturnOperand.aliases(ReturnReg),
   6487                  "ReturnReg available as scratch after scripted calls");
   6488    masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
   6489  }
   6490 
   6491  // If the return value of the constructing function is Primitive,
   6492  // replace the return value with the Object from CreateThis.
   6493  if (call->mir()->isConstructing()) {
   6494    Label notPrimitive;
   6495    masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
   6496                             &notPrimitive);
   6497    size_t thisvOffset =
   6498        JitFrameLayout::offsetOfThis() - JitFrameLayout::bytesPoppedAfterCall();
   6499    masm.loadValue(Address(masm.getStackPointer(), thisvOffset),
   6500                   JSReturnOperand);
   6501 #ifdef DEBUG
   6502    masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
   6503                             &notPrimitive);
   6504    masm.assumeUnreachable("CreateThis creates an object");
   6505 #endif
   6506    masm.bind(&notPrimitive);
   6507  }
   6508 
   6509  // Restore stack pointer.
   6510  masm.setFramePushed(frameSize());
   6511  emitRestoreStackPointerFromFP();
   6512 }
   6513 
   6514 void JitRuntime::generateIonGenericCallArgumentsShift(
   6515    MacroAssembler& masm, Register argc, Register curr, Register end,
   6516    Register scratch, Label* done) {
   6517  static_assert(sizeof(Value) == 8);
   6518  // There are |argc| Values on the stack. Shift them all down by 8 bytes,
   6519  // overwriting the first value.
   6520 
   6521  // Initialize `curr` to the destination of the first copy, and `end` to the
   6522  // final value of curr.
   6523  masm.moveStackPtrTo(curr);
   6524  masm.computeEffectiveAddress(BaseValueIndex(curr, argc), end);
   6525 
   6526  Label loop;
   6527  masm.bind(&loop);
   6528  masm.branchPtr(Assembler::Equal, curr, end, done);
   6529  masm.loadPtr(Address(curr, 8), scratch);
   6530  masm.storePtr(scratch, Address(curr, 0));
   6531  masm.addPtr(Imm32(sizeof(uintptr_t)), curr);
   6532  masm.jump(&loop);
   6533 }
   6534 
   6535 void JitRuntime::generateIonGenericCallStub(MacroAssembler& masm,
   6536                                            IonGenericCallKind kind) {
   6537  AutoCreatedBy acb(masm, "JitRuntime::generateIonGenericCallStub");
   6538  ionGenericCallStubOffset_[kind] = startTrampolineCode(masm);
   6539 
   6540  // This code is tightly coupled with visitCallGeneric.
   6541  //
   6542  // Upon entry:
   6543  //   IonGenericCallCalleeReg contains a pointer to the callee object.
   6544  //   IonGenericCallArgcReg contains the number of actual args.
   6545  //   The arguments have been pushed onto the stack:
   6546  //     [newTarget] (iff isConstructing)
   6547  //     [argN]
   6548  //     ...
   6549  //     [arg1]
   6550  //     [arg0]
   6551  //     [this]
   6552  //     <return address> (if not JS_USE_LINK_REGISTER)
   6553  //
   6554  // This trampoline is responsible for entering the callee's realm,
   6555  // massaging the stack into the right shape, and then performing a
   6556  // tail call. We will return directly to the Ion code from the
   6557  // callee.
   6558  //
   6559  // To do a tail call, we keep the return address in a register, even
   6560  // on platforms that don't normally use a link register, and push it
   6561  // just before jumping to the callee, after we are done setting up
   6562  // the stack.
   6563  //
   6564  // The caller is responsible for switching back to the caller's
   6565  // realm and cleaning up the stack.
   6566 
   6567  Register calleeReg = IonGenericCallCalleeReg;
   6568  Register argcReg = IonGenericCallArgcReg;
   6569  AllocatableGeneralRegisterSet regs(IonGenericCallScratchRegs());
   6570  Register scratch = regs.takeAny();
   6571  Register scratch2 = regs.takeAny();
   6572 
   6573 #ifndef JS_USE_LINK_REGISTER
   6574  Register returnAddrReg = IonGenericCallReturnAddrReg;
   6575  masm.pop(returnAddrReg);
   6576 #endif
   6577 
   6578 #ifdef JS_CODEGEN_ARM
   6579  // The default second scratch register on arm is lr, which we need
   6580  // preserved for tail calls.
   6581  AutoNonDefaultSecondScratchRegister andssr(masm, IonGenericSecondScratchReg);
   6582 #endif
   6583 
   6584  bool isConstructing = kind == IonGenericCallKind::Construct;
   6585 
   6586  Label entry, notFunction, noJitEntry, vmCall;
   6587  masm.bind(&entry);
   6588 
   6589  // Guard that the callee is actually a function.
   6590  masm.branchTestObjIsFunction(Assembler::NotEqual, calleeReg, scratch,
   6591                               calleeReg, &notFunction);
   6592 
   6593  // Guard that the callee supports the [[Call]] or [[Construct]] operation.
   6594  // If these tests fail, we will call into the VM to throw an exception.
   6595  if (isConstructing) {
   6596    masm.branchTestFunctionFlags(calleeReg, FunctionFlags::CONSTRUCTOR,
   6597                                 Assembler::Zero, &vmCall);
   6598  } else {
   6599    masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
   6600                            calleeReg, scratch, &vmCall);
   6601  }
   6602 
   6603  if (isConstructing) {
   6604    // Use the slow path if CreateThis was unable to create the |this| object.
   6605    Address thisAddr(masm.getStackPointer(), 0);
   6606    masm.branchTestNull(Assembler::Equal, thisAddr, &vmCall);
   6607  }
   6608 
   6609  masm.switchToObjectRealm(calleeReg, scratch);
   6610 
   6611  // Load jitCodeRaw for callee if it exists.
   6612  masm.branchIfFunctionHasNoJitEntry(calleeReg, &noJitEntry);
   6613 
   6614  // ****************************
   6615  // * Functions with jit entry *
   6616  // ****************************
   6617 
   6618  generateIonGenericHandleUnderflow(masm, isConstructing, &vmCall);
   6619 
   6620  masm.loadJitCodeRaw(calleeReg, scratch2);
   6621 
   6622  // Construct the JitFrameLayout.
   6623  masm.PushCalleeToken(calleeReg, isConstructing);
   6624  masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcReg, scratch);
   6625 #ifndef JS_USE_LINK_REGISTER
   6626  masm.push(returnAddrReg);
   6627 #endif
   6628 
   6629  // Tail call the jit entry.
   6630  masm.jump(scratch2);
   6631 
   6632  // ********************
   6633  // * Native functions *
   6634  // ********************
   6635  masm.bind(&noJitEntry);
   6636  if (!isConstructing) {
   6637    generateIonGenericCallFunCall(masm, &entry, &vmCall);
   6638  }
   6639  generateIonGenericCallNativeFunction(masm, isConstructing);
   6640 
   6641  // *******************
   6642  // * Bound functions *
   6643  // *******************
   6644  // TODO: support class hooks?
   6645  masm.bind(&notFunction);
   6646  if (!isConstructing) {
   6647    // TODO: support generic bound constructors?
   6648    generateIonGenericCallBoundFunction(masm, &entry, &vmCall);
   6649  }
   6650 
   6651  // ********************
   6652  // * Fallback VM call *
   6653  // ********************
   6654  masm.bind(&vmCall);
   6655 
   6656  masm.push(masm.getStackPointer());  // argv
   6657  masm.push(argcReg);                 // argc
   6658  masm.push(Imm32(false));            // ignores return value
   6659  masm.push(Imm32(isConstructing));   // constructing
   6660  masm.push(calleeReg);               // callee
   6661 
   6662  using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
   6663                      MutableHandleValue);
   6664  VMFunctionId id = VMFunctionToId<Fn, jit::InvokeFunction>::id;
   6665  uint32_t invokeFunctionOffset = functionWrapperOffsets_[size_t(id)];
   6666  Label invokeFunctionVMEntry;
   6667  bindLabelToOffset(&invokeFunctionVMEntry, invokeFunctionOffset);
   6668 
   6669  masm.push(FrameDescriptor(FrameType::IonJS));
   6670 #ifndef JS_USE_LINK_REGISTER
   6671  masm.push(returnAddrReg);
   6672 #endif
   6673  masm.jump(&invokeFunctionVMEntry);
   6674 }
   6675 
   6676 void JitRuntime::generateIonGenericHandleUnderflow(MacroAssembler& masm,
   6677                                                   bool isConstructing,
   6678                                                   Label* vmCall) {
   6679  Register calleeReg = IonGenericCallCalleeReg;
   6680  Register argcReg = IonGenericCallArgcReg;
   6681  AllocatableGeneralRegisterSet regs(IonGenericCallScratchRegs());
   6682  Register numMissing = regs.takeAny();
   6683  Register src = regs.takeAny();
   6684  Register dest = regs.takeAny();
   6685 
   6686  // On x86 we have fewer registers than we'd like, so we generate
   6687  // slightly less efficient code.
   6688  Register srcEnd, scratch;
   6689  bool mustSpill = false;
   6690  if (regs.empty()) {
   6691    srcEnd = numMissing;
   6692    scratch = calleeReg;
   6693    mustSpill = true;
   6694  } else {
   6695    srcEnd = regs.takeAny();
   6696    scratch = regs.takeAny();
   6697  }
   6698 
   6699  // Compute fun->nargs - argc. If it's positive, it's the number of
   6700  // undefined args we must push.
   6701  Label noUnderflow;
   6702  masm.loadFunctionArgCount(calleeReg, numMissing);
   6703  masm.sub32(argcReg, numMissing);
   6704  masm.branch32(Assembler::LessThanOrEqual, numMissing, Imm32(0), &noUnderflow);
   6705 
   6706  // Ensure that we don't adjust the stack pointer by more than a page.
   6707  masm.branch32(Assembler::Above, numMissing, Imm32(JIT_ARGS_LENGTH_MAX),
   6708                vmCall);
   6709 
   6710  // If numMissing is even, we want to make the following transformation:
   6711  //
   6712  //  INITIAL                               FINAL
   6713  //     [newTarget] (iff isConstructing)   [newTarget] (iff isConstructing)
   6714  //     [argN]                             [undefined]
   6715  //     ...                                [undefined] (...)
   6716  //     [arg1]                             [argN]
   6717  //     [arg0]                             ...
   6718  //     [this] <- sp aligned               [arg1]
   6719  //                                        [arg0]
   6720  //                                        [this] -> moved down numMissing
   6721  //                                                   slots
   6722  //
   6723  // If numMissing is odd, we must also insert padding:
   6724  //     [newTarget] (iff isConstructing)   (padding)
   6725  //     [argN]                             [newTarget] (iff isConstructing)
   6726  //     ...                                [undefined]
   6727  //     [arg1]                             [argN]
   6728  //     [arg0]                             ...
   6729  //     [this] <- sp aligned               [arg1]
   6730  //                                        [arg0]
   6731  //                                        [this] -> moved down numMissing+1
   6732  //                                                   slots
   6733  //
   6734  // Note that |newTarget|, if it exists, must be between the padding and the
   6735  // undefined args. It does not move down along with the actual args.
   6736 
   6737  // The first step is to copy the memory from [this] through [argN] into the
   6738  // correct position. The source of the copy is the current stack pointer.
   6739  masm.moveStackPtrTo(src);
   6740 
   6741  // Compute how far the args must be moved and adjust the stack pointer.
   6742  // If numMissing is even, this is numMissing slots. If numMissing is odd,
   6743  // this is numMissing+1 slots. We can compute this as (numMissing + 1) & ~1.
   6744  masm.add32(Imm32(1), numMissing, dest);
   6745  masm.and32(Imm32(~1), dest);
   6746  masm.lshift32(Imm32(3), dest);
   6747  masm.subFromStackPtr(dest);
   6748  masm.moveStackPtrTo(dest);
   6749 
   6750  // We also set up a register pointing to the last copied argument. On x86
   6751  // we don't have enough registers, so we spill the calleeReg and numMissing.
   6752  if (mustSpill) {
   6753    masm.push(calleeReg);
   6754    masm.push(numMissing);
   6755  }
   6756  masm.computeEffectiveAddress(BaseValueIndex(src, argcReg), srcEnd);
   6757 
   6758  // The stack currently looks like this:
   6759  //
   6760  //   [newTarget]
   6761  //   [argN] <-- srcEnd
   6762  //   ...
   6763  //   [arg0]
   6764  //   [this] <-- src
   6765  //   ...
   6766  //   ...    <-- dest
   6767  //   [spill?]
   6768  //   [spill?]
   6769 
   6770  // Loop to move the arguments.
   6771  Label argLoop;
   6772  masm.bind(&argLoop);
   6773  masm.copy64(Address(src, 0), Address(dest, 0), scratch);
   6774  masm.addPtr(Imm32(sizeof(Value)), src);
   6775  masm.addPtr(Imm32(sizeof(Value)), dest);
   6776  masm.branchPtr(Assembler::BelowOrEqual, src, srcEnd, &argLoop);
   6777 
   6778  if (mustSpill) {
   6779    // We must restore numMissing now, so that we can test if it's odd.
   6780    // The copy64 below still needs calleeReg as a scratch register.
   6781    masm.pop(numMissing);
   6782  }
   6783 
   6784  if (isConstructing) {
   6785    // If numMissing is odd, we must move newTarget down by one slot.
   6786    Label skip;
   6787    masm.branchTest32(Assembler::Zero, numMissing, Imm32(1), &skip);
   6788    Address newTargetSrc(src, 0);
   6789    Address newTargetDest(src, -int32_t(sizeof(Value)));
   6790    masm.copy64(newTargetSrc, newTargetDest, scratch);
   6791    masm.bind(&skip);
   6792  }
   6793 
   6794  if (mustSpill) {
   6795    masm.pop(calleeReg);
   6796  }
   6797 
   6798  // Loop to fill the remaining numMissing slots with UndefinedValue.
   6799  // We do this last so that we can safely clobber numMissing.
   6800  Label undefLoop;
   6801  masm.bind(&undefLoop);
   6802  BaseValueIndex undefSlot(dest, numMissing, -int32_t(sizeof(Value)));
   6803  masm.storeValue(UndefinedValue(), undefSlot);
   6804  masm.branchSub32(Assembler::NonZero, Imm32(1), numMissing, &undefLoop);
   6805 
   6806  masm.bind(&noUnderflow);
   6807 }
   6808 
   6809 void JitRuntime::generateIonGenericCallNativeFunction(MacroAssembler& masm,
   6810                                                      bool isConstructing) {
   6811  Register calleeReg = IonGenericCallCalleeReg;
   6812  Register argcReg = IonGenericCallArgcReg;
   6813  AllocatableGeneralRegisterSet regs(IonGenericCallScratchRegs());
   6814  Register scratch = regs.takeAny();
   6815  Register scratch2 = regs.takeAny();
   6816  Register contextReg = regs.takeAny();
   6817 #ifndef JS_USE_LINK_REGISTER
   6818  Register returnAddrReg = IonGenericCallReturnAddrReg;
   6819 #endif
   6820 
   6821  // Push a value containing the callee, which will become argv[0].
   6822  masm.pushValue(JSVAL_TYPE_OBJECT, calleeReg);
   6823 
   6824  // Load the callee address into calleeReg.
   6825 #ifdef JS_SIMULATOR
   6826  masm.movePtr(ImmPtr(RedirectedCallAnyNative()), calleeReg);
   6827 #else
   6828  masm.loadPrivate(Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
   6829                   calleeReg);
   6830 #endif
   6831 
   6832  // Load argv into scratch2.
   6833  masm.moveStackPtrTo(scratch2);
   6834 
   6835  // Push argc.
   6836  masm.push(argcReg);
   6837 
   6838  masm.loadJSContext(contextReg);
   6839 
   6840  // Construct native exit frame. Note that unlike other cases in this
   6841  // trampoline, this code does not use a tail call.
   6842  masm.push(FrameDescriptor(FrameType::IonJS));
   6843 #ifdef JS_USE_LINK_REGISTER
   6844  masm.pushReturnAddress();
   6845 #else
   6846  masm.push(returnAddrReg);
   6847 #endif
   6848 
   6849  masm.push(FramePointer);
   6850  masm.moveStackPtrTo(FramePointer);
   6851  masm.enterFakeExitFrameForNative(contextReg, scratch, isConstructing);
   6852 
   6853  masm.setupUnalignedABICall(scratch);
   6854  masm.passABIArg(contextReg);  // cx
   6855  masm.passABIArg(argcReg);     // argc
   6856  masm.passABIArg(scratch2);    // argv
   6857 
   6858  masm.callWithABI(calleeReg);
   6859 
   6860  // Test for failure.
   6861  masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
   6862 
   6863  masm.loadValue(
   6864      Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
   6865      JSReturnOperand);
   6866 
   6867  // Leave the exit frame.
   6868  masm.moveToStackPtr(FramePointer);
   6869  masm.pop(FramePointer);
   6870 
   6871  // Return.
   6872  masm.ret();
   6873 }
   6874 
   6875 void JitRuntime::generateIonGenericCallFunCall(MacroAssembler& masm,
   6876                                               Label* entry, Label* vmCall) {
   6877  Register calleeReg = IonGenericCallCalleeReg;
   6878  Register argcReg = IonGenericCallArgcReg;
   6879  AllocatableGeneralRegisterSet regs(IonGenericCallScratchRegs());
   6880  Register scratch = regs.takeAny();
   6881  Register scratch2 = regs.takeAny();
   6882  Register scratch3 = regs.takeAny();
   6883 
   6884  Label notFunCall;
   6885  masm.branchPtr(Assembler::NotEqual,
   6886                 Address(calleeReg, JSFunction::offsetOfNativeOrEnv()),
   6887                 ImmPtr(js::fun_call), &notFunCall);
   6888 
   6889  // In general, we can implement fun_call by replacing calleeReg with
   6890  // |this|, sliding all the other arguments down, and decrementing argc.
   6891  //
   6892  // *BEFORE*                           *AFTER*
   6893  //  [argN]  argc = N+1                 <padding>
   6894  //  ...                                [argN]  argc = N
   6895  //  [arg1]                             ...
   6896  //  [arg0]                             [arg1] <- now arg0
   6897  //  [this] <- top of stack (aligned)   [arg0] <- now this
   6898  //
   6899  // The only exception is when argc is already 0, in which case instead
   6900  // of shifting arguments down we replace [this] with UndefinedValue():
   6901  //
   6902  // *BEFORE*                           *AFTER*
   6903  // [this] argc = 0                     [undef] argc = 0
   6904  //
   6905  // After making this transformation, we can jump back to the beginning
   6906  // of this trampoline to handle the inner call.
   6907 
   6908  // Guard that |this| is an object. If it is, replace calleeReg.
   6909  masm.fallibleUnboxObject(Address(masm.getStackPointer(), 0), scratch, vmCall);
   6910  masm.movePtr(scratch, calleeReg);
   6911 
   6912  Label hasArgs;
   6913  masm.branch32(Assembler::NotEqual, argcReg, Imm32(0), &hasArgs);
   6914 
   6915  // No arguments. Replace |this| with |undefined| and start from the top.
   6916  masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), 0));
   6917  masm.jump(entry);
   6918 
   6919  masm.bind(&hasArgs);
   6920 
   6921  Label doneSliding;
   6922  generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
   6923                                       scratch3, &doneSliding);
   6924  masm.bind(&doneSliding);
   6925  masm.sub32(Imm32(1), argcReg);
   6926 
   6927  masm.jump(entry);
   6928 
   6929  masm.bind(&notFunCall);
   6930 }
   6931 
   6932 void JitRuntime::generateIonGenericCallBoundFunction(MacroAssembler& masm,
   6933                                                     Label* entry,
   6934                                                     Label* vmCall) {
   6935  Register calleeReg = IonGenericCallCalleeReg;
   6936  Register argcReg = IonGenericCallArgcReg;
   6937  AllocatableGeneralRegisterSet regs(IonGenericCallScratchRegs());
   6938  Register scratch = regs.takeAny();
   6939  Register scratch2 = regs.takeAny();
   6940  Register scratch3 = regs.takeAny();
   6941 
   6942  masm.branchTestObjClass(Assembler::NotEqual, calleeReg,
   6943                          &BoundFunctionObject::class_, scratch, calleeReg,
   6944                          vmCall);
   6945 
   6946  Address targetSlot(calleeReg, BoundFunctionObject::offsetOfTargetSlot());
   6947  Address flagsSlot(calleeReg, BoundFunctionObject::offsetOfFlagsSlot());
   6948  Address thisSlot(calleeReg, BoundFunctionObject::offsetOfBoundThisSlot());
   6949  Address firstInlineArgSlot(
   6950      calleeReg, BoundFunctionObject::offsetOfFirstInlineBoundArg());
   6951 
   6952  // Check that we won't be pushing too many arguments.
   6953  masm.load32(flagsSlot, scratch);
   6954  masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
   6955  masm.add32(argcReg, scratch);
   6956  masm.branch32(Assembler::Above, scratch, Imm32(JIT_ARGS_LENGTH_MAX), vmCall);
   6957 
   6958  // The stack is currently correctly aligned for a jit call. We will
   6959  // be updating the `this` value and potentially adding additional
   6960  // arguments. On platforms with 16-byte alignment, if the number of
   6961  // bound arguments is odd, we have to move the arguments that are
   6962  // currently on the stack. For example, with one bound argument:
   6963  //
   6964  // *BEFORE*                           *AFTER*
   6965  //  [argN]                             <padding>
   6966  //  ...                                [argN]   |
   6967  //  [arg1]                             ...      |  These arguments have been
   6968  //  [arg0]                             [arg1]   |  shifted down 8 bytes.
   6969  //  [this] <- top of stack (aligned)   [arg0]   v
   6970  //                                     [bound0]    <- one bound argument (odd)
   6971  //                                     [boundThis] <- top of stack (aligned)
   6972  //
   6973  Label poppedThis;
   6974  if (JitStackValueAlignment > 1) {
   6975    Label alreadyAligned;
   6976    masm.branchTest32(Assembler::Zero, flagsSlot,
   6977                      Imm32(1 << BoundFunctionObject::NumBoundArgsShift),
   6978                      &alreadyAligned);
   6979 
   6980    // We have an odd number of bound arguments. Shift the existing arguments
   6981    // down by 8 bytes.
   6982    generateIonGenericCallArgumentsShift(masm, argcReg, scratch, scratch2,
   6983                                         scratch3, &poppedThis);
   6984    masm.bind(&alreadyAligned);
   6985  }
   6986 
   6987  // Pop the current `this`. It will be replaced with the bound `this`.
   6988  masm.freeStack(sizeof(Value));
   6989  masm.bind(&poppedThis);
   6990 
   6991  // Load the number of bound arguments in scratch
   6992  masm.load32(flagsSlot, scratch);
   6993  masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), scratch);
   6994 
   6995  Label donePushingBoundArguments;
   6996  masm.branch32(Assembler::Equal, scratch, Imm32(0),
   6997                &donePushingBoundArguments);
   6998 
   6999  // Update argc to include bound arguments.
   7000  masm.add32(scratch, argcReg);
   7001 
   7002  // Load &boundArgs[0] in scratch2.
   7003  Label outOfLineBoundArguments, haveBoundArguments;
   7004  masm.branch32(Assembler::Above, scratch,
   7005                Imm32(BoundFunctionObject::MaxInlineBoundArgs),
   7006                &outOfLineBoundArguments);
   7007  masm.computeEffectiveAddress(firstInlineArgSlot, scratch2);
   7008  masm.jump(&haveBoundArguments);
   7009 
   7010  masm.bind(&outOfLineBoundArguments);
   7011  masm.unboxObject(firstInlineArgSlot, scratch2);
   7012  masm.loadPtr(Address(scratch2, NativeObject::offsetOfElements()), scratch2);
   7013 
   7014  masm.bind(&haveBoundArguments);
   7015 
   7016  // Load &boundArgs[numBoundArgs] in scratch.
   7017  BaseObjectElementIndex lastBoundArg(scratch2, scratch);
   7018  masm.computeEffectiveAddress(lastBoundArg, scratch);
   7019 
   7020  // Push the bound arguments, starting with the last one.
   7021  // Copying pre-decrements scratch until scratch2 is reached.
   7022  Label boundArgumentsLoop;
   7023  masm.bind(&boundArgumentsLoop);
   7024  masm.subPtr(Imm32(sizeof(Value)), scratch);
   7025  masm.pushValue(Address(scratch, 0));
   7026  masm.branchPtr(Assembler::Above, scratch, scratch2, &boundArgumentsLoop);
   7027  masm.bind(&donePushingBoundArguments);
   7028 
   7029  // Push the bound `this`.
   7030  masm.pushValue(thisSlot);
   7031 
   7032  // Load the target in calleeReg.
   7033  masm.unboxObject(targetSlot, calleeReg);
   7034 
   7035  // At this point, all preconditions for entering the trampoline are met:
   7036  // - calleeReg contains a pointer to the callee object
   7037  // - argcReg contains the number of actual args (now including bound args)
   7038  // - the arguments are on the stack with the correct alignment.
   7039  // Instead of generating more code, we can jump back to the entry point
   7040  // of the trampoline to call the bound target.
   7041  masm.jump(entry);
   7042 }
   7043 
   7044 void CodeGenerator::visitCallKnown(LCallKnown* call) {
   7045  Register calleereg = ToRegister(call->getFunction());
   7046  Register objreg = ToRegister(call->getTempObject());
   7047  uint32_t unusedStack =
   7048      UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
   7049  WrappedFunction* target = call->getSingleTarget();
   7050 
   7051  // Native single targets (except Wasm and TrampolineNative functions) are
   7052  // handled by LCallNative.
   7053  MOZ_ASSERT(target->hasJitEntry());
   7054 
   7055  // Missing arguments must have been explicitly appended by WarpBuilder.
   7056  DebugOnly<unsigned> numNonArgsOnStack = 1 + call->isConstructing();
   7057  MOZ_ASSERT(target->nargs() <=
   7058             call->mir()->numStackArgs() - numNonArgsOnStack);
   7059 
   7060  MOZ_ASSERT_IF(call->isConstructing(), target->isConstructor());
   7061 
   7062  masm.checkStackAlignment();
   7063 
   7064  if (target->isClassConstructor() && !call->isConstructing()) {
   7065    emitCallInvokeFunction(call, calleereg, call->isConstructing(),
   7066                           call->ignoresReturnValue(), call->numActualArgs(),
   7067                           unusedStack);
   7068    return;
   7069  }
   7070 
   7071  MOZ_ASSERT_IF(target->isClassConstructor(), call->isConstructing());
   7072 
   7073  MOZ_ASSERT(!call->mir()->needsThisCheck());
   7074 
   7075  if (call->mir()->maybeCrossRealm()) {
   7076    masm.switchToObjectRealm(calleereg, objreg);
   7077  }
   7078 
   7079  masm.loadJitCodeRaw(calleereg, objreg);
   7080 
   7081  // Nestle the StackPointer up to the argument vector.
   7082  masm.freeStack(unusedStack);
   7083 
   7084  // Construct the JitFrameLayout.
   7085  masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
   7086  masm.Push(FrameDescriptor(FrameType::IonJS, call->numActualArgs()));
   7087 
   7088  // Finally call the function in objreg.
   7089  ensureOsiSpace();
   7090  uint32_t callOffset = masm.callJit(objreg);
   7091  markSafepointAt(callOffset, call);
   7092 
   7093  if (call->mir()->maybeCrossRealm()) {
   7094    static_assert(!JSReturnOperand.aliases(ReturnReg),
   7095                  "ReturnReg available as scratch after scripted calls");
   7096    masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
   7097  }
   7098 
   7099  // Restore stack pointer: pop JitFrameLayout fields still left on the stack
   7100  // and undo the earlier |freeStack(unusedStack)|.
   7101  int prefixGarbage =
   7102      sizeof(JitFrameLayout) - JitFrameLayout::bytesPoppedAfterCall();
   7103  masm.adjustStack(prefixGarbage - unusedStack);
   7104 
   7105  // If the return value of the constructing function is Primitive,
   7106  // replace the return value with the Object from CreateThis.
   7107  if (call->mir()->isConstructing()) {
   7108    Label notPrimitive;
   7109    masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
   7110                             &notPrimitive);
   7111    masm.loadValue(Address(masm.getStackPointer(), unusedStack),
   7112                   JSReturnOperand);
   7113 #ifdef DEBUG
   7114    masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
   7115                             &notPrimitive);
   7116    masm.assumeUnreachable("CreateThis creates an object");
   7117 #endif
   7118    masm.bind(&notPrimitive);
   7119  }
   7120 }
   7121 
   7122 template <typename T>
   7123 void CodeGenerator::emitCallInvokeFunction(T* apply) {
   7124  pushArg(masm.getStackPointer());                     // argv.
   7125  pushArg(ToRegister(apply->getArgc()));               // argc.
   7126  pushArg(Imm32(apply->mir()->ignoresReturnValue()));  // ignoresReturnValue.
   7127  pushArg(Imm32(apply->mir()->isConstructing()));      // isConstructing.
   7128  pushArg(ToRegister(apply->getFunction()));           // JSFunction*.
   7129 
   7130  using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
   7131                      MutableHandleValue);
   7132  callVM<Fn, jit::InvokeFunction>(apply);
   7133 }
   7134 
   7135 // Do not bailout after the execution of this function since the stack no longer
   7136 // corresponds to what is expected by the snapshots.
   7137 template <typename T>
   7138 void CodeGenerator::emitAllocateSpaceForApply(T* apply, Register calleeReg,
   7139                                              Register argcreg,
   7140                                              Register scratch) {
   7141  Label* oolRejoin = nullptr;
   7142  bool canUnderflow =
   7143      !apply->hasSingleTarget() || apply->getSingleTarget()->nargs() > 0;
   7144 
   7145  if (canUnderflow) {
   7146    auto* ool =
   7147        new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
   7148          // Align the JitFrameLayout on the JitStackAlignment by allocating
   7149          // callee->nargs() slots, possibly rounded up to the nearest odd
   7150          // number (see below). Leave callee->nargs() in `scratch` for the
   7151          // undef loop.
   7152          if (apply->hasSingleTarget()) {
   7153            uint32_t nargs = apply->getSingleTarget()->nargs();
   7154            uint32_t numSlots = JitStackValueAlignment == 1 ? nargs : nargs | 1;
   7155            masm.subFromStackPtr(Imm32((numSlots) * sizeof(Value)));
   7156            masm.move32(Imm32(nargs), scratch);
   7157          } else {
   7158            // `scratch` contains callee->nargs()
   7159            if (JitStackValueAlignment > 1) {
   7160              masm.orPtr(Imm32(1), scratch);
   7161            }
   7162            masm.lshiftPtr(Imm32(ValueShift), scratch);
   7163            masm.subFromStackPtr(scratch);
   7164 
   7165            // We need callee->nargs in `scratch`. If we rounded it up
   7166            // above, we need to reload it. If we only shifted it, we can
   7167            // simply shift it back.
   7168            if (JitStackValueAlignment > 1) {
   7169              masm.loadFunctionArgCount(calleeReg, scratch);
   7170            } else {
   7171              masm.rshiftPtr(Imm32(ValueShift), scratch);
   7172            }
   7173          }
   7174 
   7175          // Count from callee->nargs() down to argc, storing undefined values.
   7176          Label loop;
   7177          masm.bind(&loop);
   7178          masm.sub32(Imm32(1), scratch);
   7179          masm.storeValue(UndefinedValue(),
   7180                          BaseValueIndex(masm.getStackPointer(), scratch));
   7181          masm.branch32(Assembler::Above, scratch, argcreg, &loop);
   7182          masm.jump(ool.rejoin());
   7183        });
   7184    addOutOfLineCode(ool, apply->mir());
   7185    oolRejoin = ool->rejoin();
   7186 
   7187    Label noUnderflow;
   7188    if (apply->hasSingleTarget()) {
   7189      masm.branch32(Assembler::AboveOrEqual, argcreg,
   7190                    Imm32(apply->getSingleTarget()->nargs()), &noUnderflow);
   7191    } else {
   7192      masm.branchTestObjIsFunction(Assembler::NotEqual, calleeReg, scratch,
   7193                                   calleeReg, &noUnderflow);
   7194      masm.loadFunctionArgCount(calleeReg, scratch);
   7195      masm.branch32(Assembler::AboveOrEqual, argcreg, scratch, &noUnderflow);
   7196    }
   7197    masm.branchIfFunctionHasJitEntry(calleeReg, ool->entry());
   7198    masm.bind(&noUnderflow);
   7199  }
   7200 
   7201  // Use scratch register to calculate stack space (including padding).
   7202  masm.movePtr(argcreg, scratch);
   7203 
   7204  // Align the JitFrameLayout on the JitStackAlignment.
   7205  if (JitStackValueAlignment > 1) {
   7206    MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
   7207               "Stack padding assumes that the frameSize is correct");
   7208    MOZ_ASSERT(JitStackValueAlignment == 2);
   7209    // If the number of arguments is odd, then we do not need any padding.
   7210    //
   7211    // Note: The |JitStackValueAlignment == 2| condition requires that the
   7212    // overall number of values on the stack is even. When we have an odd number
   7213    // of arguments, we don't need any padding, because the |thisValue| is
   7214    // pushed after the arguments, so the overall number of values on the stack
   7215    // is even.
   7216    //
   7217    // We can align by unconditionally setting the low bit. If the number of
   7218    // arguments is odd, the low bit was already set, so this adds no padding.
   7219    // If the number of arguments is even, the low bit was not set, so this adds
   7220    // 1, as we require.
   7221    masm.orPtr(Imm32(1), scratch);
   7222  }
   7223 
   7224  // Reserve space for copying the arguments.
   7225  NativeObject::elementsSizeMustNotOverflow();
   7226  masm.lshiftPtr(Imm32(ValueShift), scratch);
   7227  masm.subFromStackPtr(scratch);
   7228 
   7229 #ifdef DEBUG
   7230  // Put a magic value in the space reserved for padding. Note, this code cannot
   7231  // be merged with the previous test, as not all architectures can write below
   7232  // their stack pointers.
   7233  if (JitStackValueAlignment > 1) {
   7234    MOZ_ASSERT(JitStackValueAlignment == 2);
   7235    Label noPaddingNeeded;
   7236    // If the number of arguments is odd, then we do not need any padding.
   7237    masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
   7238    BaseValueIndex dstPtr(masm.getStackPointer(), argcreg);
   7239    masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
   7240    masm.bind(&noPaddingNeeded);
   7241  }
   7242 #endif
   7243 
   7244  if (canUnderflow) {
   7245    masm.bind(oolRejoin);
   7246  }
   7247 }
   7248 
   7249 // Do not bailout after the execution of this function since the stack no longer
   7250 // corresponds to what is expected by the snapshots.
   7251 template <typename T>
   7252 void CodeGenerator::emitAllocateSpaceForConstructAndPushNewTarget(
   7253    T* construct, Register calleeReg, Register argcreg,
   7254    Register newTargetAndScratch) {
   7255  // Push newTarget.
   7256  masm.pushValue(JSVAL_TYPE_OBJECT, newTargetAndScratch);
   7257  if (JitStackValueAlignment > 1) {
   7258    // x86 is short on registers. To free up newTarget for use as a scratch
   7259    // register before we know if we need padding, we push newTarget twice.
   7260    // If the first copy pushed is correctly aligned, we will overwrite the
   7261    // second. If the second copy is correctly aligned, the first is padding.
   7262    masm.pushValue(JSVAL_TYPE_OBJECT, newTargetAndScratch);
   7263  }
   7264  Register scratch = newTargetAndScratch;
   7265 
   7266  Label* oolRejoin = nullptr;
   7267  bool canUnderflow = !construct->hasSingleTarget() ||
   7268                      construct->getSingleTarget()->nargs() > 0;
   7269  if (canUnderflow) {
   7270    auto* ool =
   7271        new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
   7272          // Align the JitFrameLayout on the JitStackAlignment by allocating
   7273          // callee->nargs() slots, rounded down to the nearest odd number (see
   7274          // below).  Leave callee->nargs() in `scratch` for the undef loop.
   7275          if (construct->hasSingleTarget()) {
   7276            uint32_t nargs = construct->getSingleTarget()->nargs();
   7277            uint32_t numSlots =
   7278                JitStackValueAlignment == 1 ? nargs : ((nargs + 1) & ~1) - 1;
   7279            masm.subFromStackPtr(Imm32((numSlots) * sizeof(Value)));
   7280            masm.move32(Imm32(nargs), scratch);
   7281          } else {
   7282            // `scratch` contains callee->nargs()
   7283            if (JitStackValueAlignment > 1) {
   7284              // Round down to nearest odd number.
   7285              masm.addPtr(Imm32(1), scratch);
   7286              masm.andPtr(Imm32(~1), scratch);
   7287              masm.subPtr(Imm32(1), scratch);
   7288            }
   7289            masm.lshiftPtr(Imm32(ValueShift), scratch);
   7290            masm.subFromStackPtr(scratch);
   7291 
   7292            // We need callee->nargs in `scratch`. If we rounded it down
   7293            // above, we need to reload it. If we only shifted it, we can
   7294            // simply shift it back.
   7295            if (JitStackValueAlignment > 1) {
   7296              masm.loadFunctionArgCount(calleeReg, scratch);
   7297            } else {
   7298              masm.rshiftPtr(Imm32(ValueShift), scratch);
   7299            }
   7300          }
   7301 
   7302          // Count from callee->nargs() down to argc, storing undefined values.
   7303          Label loop;
   7304          masm.bind(&loop);
   7305          masm.sub32(Imm32(1), scratch);
   7306          masm.storeValue(UndefinedValue(),
   7307                          BaseValueIndex(masm.getStackPointer(), scratch));
   7308          masm.branch32(Assembler::Above, scratch, argcreg, &loop);
   7309          masm.jump(ool.rejoin());
   7310        });
   7311    addOutOfLineCode(ool, construct->mir());
   7312    oolRejoin = ool->rejoin();
   7313 
   7314    Label noUnderflow;
   7315    if (construct->hasSingleTarget()) {
   7316      masm.branch32(Assembler::AboveOrEqual, argcreg,
   7317                    Imm32(construct->getSingleTarget()->nargs()), &noUnderflow);
   7318    } else {
   7319      masm.branchTestObjIsFunction(Assembler::NotEqual, calleeReg, scratch,
   7320                                   calleeReg, &noUnderflow);
   7321      masm.loadFunctionArgCount(calleeReg, scratch);
   7322      masm.branch32(Assembler::AboveOrEqual, argcreg, scratch, &noUnderflow);
   7323    }
   7324    masm.branchIfFunctionHasJitEntry(calleeReg, ool->entry());
   7325    masm.bind(&noUnderflow);
   7326  }
   7327 
   7328  // Use newTargetAndScratch to calculate stack space (including padding).
   7329  masm.movePtr(argcreg, newTargetAndScratch);
   7330 
   7331  // Align the JitFrameLayout on the JitStackAlignment.
   7332  if (JitStackValueAlignment > 1) {
   7333    MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
   7334               "Stack padding assumes that the frameSize is correct");
   7335    MOZ_ASSERT(JitStackValueAlignment == 2);
   7336    // Note: The |JitStackValueAlignment == 2| condition requires that the
   7337    // overall number of values on the stack is even. We must push `newTarget`,
   7338    // the args, and `this`. We've already pushed newTarget twice. Rounding
   7339    // argc down to the closest odd number will give us the correct alignment:
   7340    //
   7341    //       argc:      *0*          *1*          *2*          *3*
   7342    //  rounds to:       -1           1            1            3
   7343    //              *newTarget   (newTarget)   newTarget   (newTarget)
   7344    // curr sp -->   this         newTarget    arg1         newTarget
   7345    //                           *arg0        *arg0         arg2
   7346    //                            this         this         arg1
   7347    //                                                     *arg0
   7348    //                                                      this
   7349    // The asterisk in each column marks the stack pointer after adding
   7350    // the rounded value. In each case, pushing `this` will result in an
   7351    // even number of total slots.
   7352    masm.addPtr(Imm32(1), scratch);
   7353    masm.andPtr(Imm32(~1), scratch);
   7354    masm.subPtr(Imm32(1), scratch);
   7355  }
   7356 
   7357  // Reserve space for copying the arguments.
   7358  NativeObject::elementsSizeMustNotOverflow();
   7359  masm.lshiftPtr(Imm32(ValueShift), newTargetAndScratch);
   7360  masm.subFromStackPtr(newTargetAndScratch);
   7361 
   7362  if (canUnderflow) {
   7363    masm.bind(oolRejoin);
   7364  }
   7365 }
   7366 
   7367 // Destroys argvIndex and copyreg.
   7368 void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase,
   7369                                           Register argvIndex, Register copyreg,
   7370                                           size_t argvSrcOffset,
   7371                                           size_t argvDstOffset) {
   7372  Label loop;
   7373  masm.bind(&loop);
   7374 
   7375  // As argvIndex is off by 1, and we use the decBranchPtr instruction to loop
   7376  // back, we have to substract the size of the word which are copied.
   7377  BaseValueIndex srcPtr(argvSrcBase, argvIndex,
   7378                        int32_t(argvSrcOffset) - sizeof(void*));
   7379  BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex,
   7380                        int32_t(argvDstOffset) - sizeof(void*));
   7381  masm.loadPtr(srcPtr, copyreg);
   7382  masm.storePtr(copyreg, dstPtr);
   7383 
   7384  // Handle 32 bits architectures.
   7385  if (sizeof(Value) == 2 * sizeof(void*)) {
   7386    BaseValueIndex srcPtrLow(argvSrcBase, argvIndex,
   7387                             int32_t(argvSrcOffset) - 2 * sizeof(void*));
   7388    BaseValueIndex dstPtrLow(masm.getStackPointer(), argvIndex,
   7389                             int32_t(argvDstOffset) - 2 * sizeof(void*));
   7390    masm.loadPtr(srcPtrLow, copyreg);
   7391    masm.storePtr(copyreg, dstPtrLow);
   7392  }
   7393 
   7394  masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop);
   7395 }
   7396 
   7397 void CodeGenerator::emitRestoreStackPointerFromFP() {
   7398  // This is used to restore the stack pointer after a call with a dynamic
   7399  // number of arguments.
   7400 
   7401  MOZ_ASSERT(masm.framePushed() == frameSize());
   7402 
   7403  int32_t offset = -int32_t(frameSize());
   7404  masm.computeEffectiveAddress(Address(FramePointer, offset),
   7405                               masm.getStackPointer());
   7406 #if JS_CODEGEN_ARM64
   7407  masm.syncStackPtr();
   7408 #endif
   7409 }
   7410 
   7411 void CodeGenerator::emitPushArguments(Register argcreg, Register scratch,
   7412                                      Register copyreg, uint32_t extraFormals) {
   7413  Label end;
   7414 
   7415  // Skip the copy of arguments if there are none.
   7416  masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, &end);
   7417 
   7418  // clang-format off
   7419  //
   7420  // We are making a copy of the arguments which are above the JitFrameLayout
   7421  // of the current Ion frame.
   7422  //
   7423  // [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1] [arg0] <- dst
   7424  //
   7425  // clang-format on
   7426 
   7427  // Compute the source and destination offsets into the stack.
   7428  //
   7429  // The |extraFormals| parameter is used when copying rest-parameters and
   7430  // allows to skip the initial parameters before the actual rest-parameters.
   7431  Register argvSrcBase = FramePointer;
   7432  size_t argvSrcOffset =
   7433      JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
   7434  size_t argvDstOffset = 0;
   7435 
   7436  Register argvIndex = scratch;
   7437  masm.move32(argcreg, argvIndex);
   7438 
   7439  // Copy arguments.
   7440  emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset,
   7441                         argvDstOffset);
   7442 
   7443  // Join with all arguments copied.
   7444  masm.bind(&end);
   7445 }
   7446 
   7447 void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply) {
   7448  // Holds the function nargs.
   7449  Register funcreg = ToRegister(apply->getFunction());
   7450  Register argcreg = ToRegister(apply->getArgc());
   7451  Register copyreg = ToRegister(apply->getTempObject());
   7452  Register scratch = ToRegister(apply->getTempForArgCopy());
   7453  uint32_t extraFormals = apply->numExtraFormals();
   7454 
   7455  // Allocate space on the stack for arguments.
   7456  emitAllocateSpaceForApply(apply, funcreg, argcreg, scratch);
   7457 
   7458  emitPushArguments(argcreg, scratch, copyreg, extraFormals);
   7459 
   7460  // Push |this|.
   7461  masm.pushValue(ToValue(apply->thisValue()));
   7462 }
   7463 
   7464 void CodeGenerator::emitPushArguments(LApplyArgsObj* apply) {
   7465  Register function = ToRegister(apply->getFunction());
   7466  Register argsObj = ToRegister(apply->getArgsObj());
   7467  Register tmpArgc = ToRegister(apply->getTempObject());
   7468  Register scratch = ToRegister(apply->getTempForArgCopy());
   7469 
   7470  // argc and argsObj are mapped to the same calltemp register.
   7471  MOZ_ASSERT(argsObj == ToRegister(apply->getArgc()));
   7472 
   7473  // Load argc into tmpArgc.
   7474  masm.loadArgumentsObjectLength(argsObj, tmpArgc);
   7475 
   7476  // Allocate space on the stack for arguments.
   7477  emitAllocateSpaceForApply(apply, function, tmpArgc, scratch);
   7478 
   7479  // Load arguments data.
   7480  masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
   7481                   argsObj);
   7482  size_t argsSrcOffset = ArgumentsData::offsetOfArgs();
   7483 
   7484  // This is the end of the lifetime of argsObj.
   7485  // After this call, the argsObj register holds the argument count instead.
   7486  emitPushArrayAsArguments(tmpArgc, argsObj, scratch, argsSrcOffset);
   7487 
   7488  // Push |this|.
   7489  masm.pushValue(ToValue(apply->thisValue()));
   7490 }
   7491 
   7492 void CodeGenerator::emitPushArrayAsArguments(Register tmpArgc,
   7493                                             Register srcBaseAndArgc,
   7494                                             Register scratch,
   7495                                             size_t argvSrcOffset) {
   7496  // Preconditions:
   7497  // 1. |tmpArgc| * sizeof(Value) bytes have been allocated at the top of
   7498  //    the stack to hold arguments.
   7499  // 2. |srcBaseAndArgc| + |srcOffset| points to an array of |tmpArgc| values.
   7500  //
   7501  // Postconditions:
   7502  // 1. The arguments at |srcBaseAndArgc| + |srcOffset| have been copied into
   7503  //    the allocated space.
   7504  // 2. |srcBaseAndArgc| now contains the original value of |tmpArgc|.
   7505  //
   7506  // |scratch| is used as a temp register within this function and clobbered.
   7507 
   7508  Label noCopy, epilogue;
   7509 
   7510  // Skip the copy of arguments if there are none.
   7511  masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
   7512  {
   7513    // Copy the values. This code is skipped entirely if there are no values.
   7514    size_t argvDstOffset = 0;
   7515 
   7516    Register argvSrcBase = srcBaseAndArgc;
   7517 
   7518    // Stash away |tmpArgc| and adjust argvDstOffset accordingly.
   7519    masm.push(tmpArgc);
   7520    Register argvIndex = tmpArgc;
   7521    argvDstOffset += sizeof(void*);
   7522 
   7523    // Copy
   7524    emitCopyValuesForApply(argvSrcBase, argvIndex, scratch, argvSrcOffset,
   7525                           argvDstOffset);
   7526 
   7527    // Restore.
   7528    masm.pop(srcBaseAndArgc);  // srcBaseAndArgc now contains argc.
   7529    masm.jump(&epilogue);
   7530  }
   7531  masm.bind(&noCopy);
   7532  {
   7533    // Clear argc if we skipped the copy step.
   7534    masm.movePtr(ImmWord(0), srcBaseAndArgc);
   7535  }
   7536 
   7537  // Join with all arguments copied.
   7538  // Note, "srcBase" has become "argc".
   7539  masm.bind(&epilogue);
   7540 }
   7541 
   7542 void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply) {
   7543  Register function = ToRegister(apply->getFunction());
   7544  Register elements = ToRegister(apply->getElements());
   7545  Register tmpArgc = ToRegister(apply->getTempObject());
   7546  Register scratch = ToRegister(apply->getTempForArgCopy());
   7547 
   7548  // argc and elements are mapped to the same calltemp register.
   7549  MOZ_ASSERT(elements == ToRegister(apply->getArgc()));
   7550 
   7551  // Invariants guarded in the caller:
   7552  //  - the array is not too long
   7553  //  - the array length equals its initialized length
   7554 
   7555  // The array length is our argc for the purposes of allocating space.
   7556  masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
   7557 
   7558  // Allocate space for the values.
   7559  emitAllocateSpaceForApply(apply, function, tmpArgc, scratch);
   7560 
   7561  // After this call "elements" has become "argc".
   7562  size_t elementsOffset = 0;
   7563  emitPushArrayAsArguments(tmpArgc, elements, scratch, elementsOffset);
   7564 
   7565  // Push |this|.
   7566  masm.pushValue(ToValue(apply->thisValue()));
   7567 }
   7568 
   7569 void CodeGenerator::emitPushArguments(LConstructArgsGeneric* construct) {
   7570  // Holds the function nargs.
   7571  Register argcreg = ToRegister(construct->getArgc());
   7572  Register function = ToRegister(construct->getFunction());
   7573  Register copyreg = ToRegister(construct->getTempObject());
   7574  Register scratch = ToRegister(construct->getTempForArgCopy());
   7575  uint32_t extraFormals = construct->numExtraFormals();
   7576 
   7577  // newTarget and scratch are mapped to the same calltemp register.
   7578  MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
   7579 
   7580  // Allocate space for the values.
   7581  // After this call "newTarget" has become "scratch".
   7582  emitAllocateSpaceForConstructAndPushNewTarget(construct, function, argcreg,
   7583                                                scratch);
   7584 
   7585  emitPushArguments(argcreg, scratch, copyreg, extraFormals);
   7586 
   7587  // Push |this|.
   7588  masm.pushValue(ToValue(construct->thisValue()));
   7589 }
   7590 
   7591 void CodeGenerator::emitPushArguments(LConstructArrayGeneric* construct) {
   7592  Register function = ToRegister(construct->getFunction());
   7593  Register elements = ToRegister(construct->getElements());
   7594  Register tmpArgc = ToRegister(construct->getTempObject());
   7595  Register scratch = ToRegister(construct->getTempForArgCopy());
   7596 
   7597  // argc and elements are mapped to the same calltemp register.
   7598  MOZ_ASSERT(elements == ToRegister(construct->getArgc()));
   7599 
   7600  // newTarget and scratch are mapped to the same calltemp register.
   7601  MOZ_ASSERT(scratch == ToRegister(construct->getNewTarget()));
   7602 
   7603  // Invariants guarded in the caller:
   7604  //  - the array is not too long
   7605  //  - the array length equals its initialized length
   7606 
   7607  // The array length is our argc for the purposes of allocating space.
   7608  masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
   7609 
   7610  // Allocate space for the values.
   7611  // After this call "newTarget" has become "scratch".
   7612  emitAllocateSpaceForConstructAndPushNewTarget(construct, function, tmpArgc,
   7613                                                scratch);
   7614 
   7615  // After this call "elements" has become "argc".
   7616  size_t elementsOffset = 0;
   7617  emitPushArrayAsArguments(tmpArgc, elements, scratch, elementsOffset);
   7618 
   7619  // Push |this|.
   7620  masm.pushValue(ToValue(construct->thisValue()));
   7621 }
   7622 
   7623 template <typename T>
   7624 void CodeGenerator::emitApplyGeneric(T* apply) {
   7625  // Holds the function object.
   7626  Register calleereg = ToRegister(apply->getFunction());
   7627 
   7628  // Temporary register for modifying the function object.
   7629  Register objreg = ToRegister(apply->getTempObject());
   7630  Register scratch = ToRegister(apply->getTempForArgCopy());
   7631 
   7632  // Holds the function nargs, computed in the invoker or (for ApplyArray,
   7633  // ConstructArray, or ApplyArgsObj) in the argument pusher.
   7634  Register argcreg = ToRegister(apply->getArgc());
   7635 
   7636  // Copy the arguments of the current function.
   7637  //
   7638  // In the case of ApplyArray, ConstructArray, or ApplyArgsObj, also compute
   7639  // argc. The argc register and the elements/argsObj register are the same;
   7640  // argc must not be referenced before the call to emitPushArguments() and
   7641  // elements/argsObj must not be referenced after it returns.
   7642  //
   7643  // In the case of ConstructArray or ConstructArgs, also overwrite newTarget;
   7644  // newTarget must not be referenced after this point.
   7645  //
   7646  // objreg is dead across this call.
   7647  emitPushArguments(apply);
   7648 
   7649  masm.checkStackAlignment();
   7650 
   7651  bool constructing = apply->mir()->isConstructing();
   7652 
   7653  // If the function is native, the call is compiled through emitApplyNative.
   7654  MOZ_ASSERT_IF(apply->hasSingleTarget(),
   7655                !apply->getSingleTarget()->isNativeWithoutJitEntry());
   7656 
   7657  Label end, invoke;
   7658 
   7659  // Unless already known, guard that calleereg is actually a function object.
   7660  if (!apply->hasSingleTarget()) {
   7661    masm.branchTestObjIsFunction(Assembler::NotEqual, calleereg, objreg,
   7662                                 calleereg, &invoke);
   7663  }
   7664 
   7665  // Guard that calleereg is an interpreted function with a JSScript.
   7666  masm.branchIfFunctionHasNoJitEntry(calleereg, &invoke);
   7667 
   7668  // Guard that callee allows the [[Call]] or [[Construct]] operation required.
   7669  if (constructing) {
   7670    masm.branchTestFunctionFlags(calleereg, FunctionFlags::CONSTRUCTOR,
   7671                                 Assembler::Zero, &invoke);
   7672  } else {
   7673    masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
   7674                            calleereg, objreg, &invoke);
   7675  }
   7676 
   7677  // Use the slow path if CreateThis was unable to create the |this| object.
   7678  if (constructing) {
   7679    Address thisAddr(masm.getStackPointer(), 0);
   7680    masm.branchTestNull(Assembler::Equal, thisAddr, &invoke);
   7681  }
   7682 
   7683  // Call with an Ion frame
   7684  {
   7685    if (apply->mir()->maybeCrossRealm()) {
   7686      masm.switchToObjectRealm(calleereg, objreg);
   7687    }
   7688 
   7689    // Knowing that calleereg is a non-native function, load jitcode.
   7690    masm.loadJitCodeRaw(calleereg, objreg);
   7691 
   7692    masm.PushCalleeToken(calleereg, constructing);
   7693    masm.PushFrameDescriptorForJitCall(FrameType::IonJS, argcreg, scratch);
   7694 
   7695    // Call the function.
   7696    ensureOsiSpace();
   7697    uint32_t callOffset = masm.callJit(objreg);
   7698    markSafepointAt(callOffset, apply);
   7699 
   7700    if (apply->mir()->maybeCrossRealm()) {
   7701      static_assert(!JSReturnOperand.aliases(ReturnReg),
   7702                    "ReturnReg available as scratch after scripted calls");
   7703      masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
   7704    }
   7705 
   7706    // Discard JitFrameLayout fields still left on the stack.
   7707    masm.freeStack(sizeof(JitFrameLayout) -
   7708                   JitFrameLayout::bytesPoppedAfterCall());
   7709    masm.jump(&end);
   7710  }
   7711 
   7712  // Handle uncompiled or native functions.
   7713  {
   7714    masm.bind(&invoke);
   7715    emitCallInvokeFunction(apply);
   7716  }
   7717 
   7718  masm.bind(&end);
   7719 
   7720  // If the return value of the constructing function is Primitive, replace the
   7721  // return value with the Object from CreateThis.
   7722  if (constructing) {
   7723    Label notPrimitive;
   7724    masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
   7725                             &notPrimitive);
   7726    masm.loadValue(Address(masm.getStackPointer(), 0), JSReturnOperand);
   7727 
   7728 #ifdef DEBUG
   7729    masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
   7730                             &notPrimitive);
   7731    masm.assumeUnreachable("CreateThis creates an object");
   7732 #endif
   7733 
   7734    masm.bind(&notPrimitive);
   7735  }
   7736 
   7737  // Pop arguments and continue.
   7738  emitRestoreStackPointerFromFP();
   7739 }
   7740 
   7741 template <typename T>
   7742 void CodeGenerator::emitAlignStackForApplyNative(T* apply, Register argc) {
   7743  static_assert(JitStackAlignment % ABIStackAlignment == 0,
   7744                "aligning on JIT stack subsumes ABI alignment");
   7745 
   7746  // Align the arguments on the JitStackAlignment.
   7747  if (JitStackValueAlignment > 1) {
   7748    MOZ_ASSERT(JitStackValueAlignment == 2,
   7749               "Stack padding adds exactly one Value");
   7750    MOZ_ASSERT(frameSize() % JitStackValueAlignment == 0,
   7751               "Stack padding assumes that the frameSize is correct");
   7752 
   7753    Assembler::Condition cond;
   7754    if constexpr (T::isConstructing()) {
   7755      // If the number of arguments is even, then we do not need any padding.
   7756      //
   7757      // Also see emitAllocateSpaceForApply().
   7758      cond = Assembler::Zero;
   7759    } else {
   7760      // If the number of arguments is odd, then we do not need any padding.
   7761      //
   7762      // Also see emitAllocateSpaceForConstructAndPushNewTarget().
   7763      cond = Assembler::NonZero;
   7764    }
   7765 
   7766    Label noPaddingNeeded;
   7767    masm.branchTestPtr(cond, argc, Imm32(1), &noPaddingNeeded);
   7768    masm.pushValue(MagicValue(JS_ARG_POISON));
   7769    masm.bind(&noPaddingNeeded);
   7770  }
   7771 }
   7772 
   7773 template <typename T>
   7774 void CodeGenerator::emitPushNativeArguments(T* apply) {
   7775  Register argc = ToRegister(apply->getArgc());
   7776  Register tmpArgc = ToRegister(apply->getTempObject());
   7777  Register scratch = ToRegister(apply->getTempForArgCopy());
   7778  uint32_t extraFormals = apply->numExtraFormals();
   7779 
   7780  // Align stack.
   7781  emitAlignStackForApplyNative(apply, argc);
   7782 
   7783  // Push newTarget.
   7784  if constexpr (T::isConstructing()) {
   7785    masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getNewTarget()));
   7786  }
   7787 
   7788  // Push arguments.
   7789  Label noCopy;
   7790  masm.branchTestPtr(Assembler::Zero, argc, argc, &noCopy);
   7791  {
   7792    // Use scratch register to calculate stack space.
   7793    masm.movePtr(argc, scratch);
   7794 
   7795    // Reserve space for copying the arguments.
   7796    NativeObject::elementsSizeMustNotOverflow();
   7797    masm.lshiftPtr(Imm32(ValueShift), scratch);
   7798    masm.subFromStackPtr(scratch);
   7799 
   7800    // Compute the source and destination offsets into the stack.
   7801    Register argvSrcBase = FramePointer;
   7802    size_t argvSrcOffset =
   7803        JitFrameLayout::offsetOfActualArgs() + extraFormals * sizeof(JS::Value);
   7804    size_t argvDstOffset = 0;
   7805 
   7806    Register argvIndex = tmpArgc;
   7807    masm.move32(argc, argvIndex);
   7808 
   7809    // Copy arguments.
   7810    emitCopyValuesForApply(argvSrcBase, argvIndex, scratch, argvSrcOffset,
   7811                           argvDstOffset);
   7812  }
   7813  masm.bind(&noCopy);
   7814 
   7815  // Push |this|.
   7816  if constexpr (T::isConstructing()) {
   7817    masm.pushValue(MagicValue(JS_IS_CONSTRUCTING));
   7818  } else {
   7819    masm.pushValue(ToValue(apply->thisValue()));
   7820  }
   7821 }
   7822 
   7823 template <typename T>
   7824 void CodeGenerator::emitPushArrayAsNativeArguments(T* apply) {
   7825  Register argc = ToRegister(apply->getArgc());
   7826  Register elements = ToRegister(apply->getElements());
   7827  Register tmpArgc = ToRegister(apply->getTempObject());
   7828  Register scratch = ToRegister(apply->getTempForArgCopy());
   7829 
   7830  // NB: argc and elements are mapped to the same register.
   7831  MOZ_ASSERT(argc == elements);
   7832 
   7833  // Invariants guarded in the caller:
   7834  //  - the array is not too long
   7835  //  - the array length equals its initialized length
   7836 
   7837  // The array length is our argc.
   7838  masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
   7839 
   7840  // Align stack.
   7841  emitAlignStackForApplyNative(apply, tmpArgc);
   7842 
   7843  // Push newTarget.
   7844  if constexpr (T::isConstructing()) {
   7845    masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getNewTarget()));
   7846  }
   7847 
   7848  // Skip the copy of arguments if there are none.
   7849  Label noCopy;
   7850  masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
   7851  {
   7852    // |tmpArgc| is off-by-one, so adjust the offset accordingly.
   7853    BaseObjectElementIndex srcPtr(elements, tmpArgc,
   7854                                  -int32_t(sizeof(JS::Value)));
   7855 
   7856    Label loop;
   7857    masm.bind(&loop);
   7858    masm.pushValue(srcPtr, scratch);
   7859    masm.decBranchPtr(Assembler::NonZero, tmpArgc, Imm32(1), &loop);
   7860  }
   7861  masm.bind(&noCopy);
   7862 
   7863  // Set argc in preparation for calling the native function.
   7864  masm.load32(Address(elements, ObjectElements::offsetOfLength()), argc);
   7865 
   7866  // Push |this|.
   7867  if constexpr (T::isConstructing()) {
   7868    masm.pushValue(MagicValue(JS_IS_CONSTRUCTING));
   7869  } else {
   7870    masm.pushValue(ToValue(apply->thisValue()));
   7871  }
   7872 }
   7873 
   7874 void CodeGenerator::emitPushArguments(LApplyArgsNative* apply) {
   7875  emitPushNativeArguments(apply);
   7876 }
   7877 
   7878 void CodeGenerator::emitPushArguments(LApplyArrayNative* apply) {
   7879  emitPushArrayAsNativeArguments(apply);
   7880 }
   7881 
   7882 void CodeGenerator::emitPushArguments(LConstructArgsNative* construct) {
   7883  emitPushNativeArguments(construct);
   7884 }
   7885 
   7886 void CodeGenerator::emitPushArguments(LConstructArrayNative* construct) {
   7887  emitPushArrayAsNativeArguments(construct);
   7888 }
   7889 
   7890 void CodeGenerator::emitPushArguments(LApplyArgsObjNative* apply) {
   7891  Register argc = ToRegister(apply->getArgc());
   7892  Register argsObj = ToRegister(apply->getArgsObj());
   7893  Register tmpArgc = ToRegister(apply->getTempObject());
   7894  Register scratch = ToRegister(apply->getTempForArgCopy());
   7895  Register scratch2 = ToRegister(apply->getTempExtra());
   7896 
   7897  // NB: argc and argsObj are mapped to the same register.
   7898  MOZ_ASSERT(argc == argsObj);
   7899 
   7900  // Load argc into tmpArgc.
   7901  masm.loadArgumentsObjectLength(argsObj, tmpArgc);
   7902 
   7903  // Align stack.
   7904  emitAlignStackForApplyNative(apply, tmpArgc);
   7905 
   7906  // Push arguments.
   7907  Label noCopy, epilogue;
   7908  masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
   7909  {
   7910    // Use scratch register to calculate stack space.
   7911    masm.movePtr(tmpArgc, scratch);
   7912 
   7913    // Reserve space for copying the arguments.
   7914    NativeObject::elementsSizeMustNotOverflow();
   7915    masm.lshiftPtr(Imm32(ValueShift), scratch);
   7916    masm.subFromStackPtr(scratch);
   7917 
   7918    // Load arguments data.
   7919    Register argvSrcBase = argsObj;
   7920    masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
   7921                     argvSrcBase);
   7922    size_t argvSrcOffset = ArgumentsData::offsetOfArgs();
   7923    size_t argvDstOffset = 0;
   7924 
   7925    Register argvIndex = scratch2;
   7926    masm.move32(tmpArgc, argvIndex);
   7927 
   7928    // Copy the values.
   7929    emitCopyValuesForApply(argvSrcBase, argvIndex, scratch, argvSrcOffset,
   7930                           argvDstOffset);
   7931  }
   7932  masm.bind(&noCopy);
   7933 
   7934  // Set argc in preparation for calling the native function.
   7935  masm.movePtr(tmpArgc, argc);
   7936 
   7937  // Push |this|.
   7938  masm.pushValue(ToValue(apply->thisValue()));
   7939 }
   7940 
   7941 template <typename T>
   7942 void CodeGenerator::emitApplyNative(T* apply) {
   7943  MOZ_ASSERT(T::isConstructing() == apply->mir()->isConstructing(),
   7944             "isConstructing condition must be consistent");
   7945 
   7946  WrappedFunction* target = apply->mir()->getSingleTarget();
   7947  MOZ_ASSERT(target->isNativeWithoutJitEntry());
   7948 
   7949  JSNative native = target->native();
   7950  if (apply->mir()->ignoresReturnValue() && target->hasJitInfo()) {
   7951    const JSJitInfo* jitInfo = target->jitInfo();
   7952    if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
   7953      native = jitInfo->ignoresReturnValueMethod;
   7954    }
   7955  }
   7956 
   7957  // Push arguments, including newTarget and |this|.
   7958  emitPushArguments(apply);
   7959 
   7960  // Registers used for callWithABI() argument-passing.
   7961  Register argContextReg = ToRegister(apply->getTempObject());
   7962  Register argUintNReg = ToRegister(apply->getArgc());
   7963  Register argVpReg = ToRegister(apply->getTempForArgCopy());
   7964  Register tempReg = ToRegister(apply->getTempExtra());
   7965 
   7966  // No unused stack for variadic calls.
   7967  uint32_t unusedStack = 0;
   7968 
   7969  // Pushed arguments don't change the pushed frames amount.
   7970  MOZ_ASSERT(masm.framePushed() == frameSize());
   7971 
   7972  // Create the exit frame and call the native.
   7973  emitCallNative(apply, native, argContextReg, argUintNReg, argVpReg, tempReg,
   7974                 unusedStack);
   7975 
   7976  // The exit frame is still on the stack.
   7977  MOZ_ASSERT(masm.framePushed() == frameSize() + NativeExitFrameLayout::Size());
   7978 
   7979  // The next instruction is removing the exit frame, so there is no need for
   7980  // leaveFakeExitFrame.
   7981 
   7982  // Pop arguments and continue.
   7983  masm.setFramePushed(frameSize());
   7984  emitRestoreStackPointerFromFP();
   7985 }
   7986 
   7987 template <typename T>
   7988 void CodeGenerator::emitApplyArgsGuard(T* apply) {
   7989  LSnapshot* snapshot = apply->snapshot();
   7990  Register argcreg = ToRegister(apply->getArgc());
   7991 
   7992  // Ensure that we have a reasonable number of arguments.
   7993  bailoutCmp32(Assembler::Above, argcreg, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
   7994 }
   7995 
   7996 template <typename T>
   7997 void CodeGenerator::emitApplyArgsObjGuard(T* apply) {
   7998  Register argsObj = ToRegister(apply->getArgsObj());
   7999  Register temp = ToRegister(apply->getTempObject());
   8000 
   8001  Label bail;
   8002  masm.loadArgumentsObjectLength(argsObj, temp, &bail);
   8003  masm.branch32(Assembler::Above, temp, Imm32(JIT_ARGS_LENGTH_MAX), &bail);
   8004  bailoutFrom(&bail, apply->snapshot());
   8005 }
   8006 
   8007 template <typename T>
   8008 void CodeGenerator::emitApplyArrayGuard(T* apply) {
   8009  LSnapshot* snapshot = apply->snapshot();
   8010  Register elements = ToRegister(apply->getElements());
   8011  Register tmp = ToRegister(apply->getTempObject());
   8012 
   8013  Address length(elements, ObjectElements::offsetOfLength());
   8014  masm.load32(length, tmp);
   8015 
   8016  // Ensure that we have a reasonable number of arguments.
   8017  bailoutCmp32(Assembler::Above, tmp, Imm32(JIT_ARGS_LENGTH_MAX), snapshot);
   8018 
   8019  // Ensure that the array does not contain an uninitialized tail.
   8020 
   8021  Address initializedLength(elements,
   8022                            ObjectElements::offsetOfInitializedLength());
   8023  masm.sub32(initializedLength, tmp);
   8024  bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot);
   8025 }
   8026 
   8027 void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) {
   8028  emitApplyArgsGuard(apply);
   8029  emitApplyGeneric(apply);
   8030 }
   8031 
   8032 void CodeGenerator::visitApplyArgsObj(LApplyArgsObj* apply) {
   8033  emitApplyArgsObjGuard(apply);
   8034  emitApplyGeneric(apply);
   8035 }
   8036 
   8037 void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) {
   8038  emitApplyArrayGuard(apply);
   8039  emitApplyGeneric(apply);
   8040 }
   8041 
   8042 void CodeGenerator::visitConstructArgsGeneric(LConstructArgsGeneric* lir) {
   8043  emitApplyArgsGuard(lir);
   8044  emitApplyGeneric(lir);
   8045 }
   8046 
   8047 void CodeGenerator::visitConstructArrayGeneric(LConstructArrayGeneric* lir) {
   8048  emitApplyArrayGuard(lir);
   8049  emitApplyGeneric(lir);
   8050 }
   8051 
   8052 void CodeGenerator::visitApplyArgsNative(LApplyArgsNative* lir) {
   8053  emitApplyArgsGuard(lir);
   8054  emitApplyNative(lir);
   8055 }
   8056 
   8057 void CodeGenerator::visitApplyArgsObjNative(LApplyArgsObjNative* lir) {
   8058  emitApplyArgsObjGuard(lir);
   8059  emitApplyNative(lir);
   8060 }
   8061 
   8062 void CodeGenerator::visitApplyArrayNative(LApplyArrayNative* lir) {
   8063  emitApplyArrayGuard(lir);
   8064  emitApplyNative(lir);
   8065 }
   8066 
   8067 void CodeGenerator::visitConstructArgsNative(LConstructArgsNative* lir) {
   8068  emitApplyArgsGuard(lir);
   8069  emitApplyNative(lir);
   8070 }
   8071 
   8072 void CodeGenerator::visitConstructArrayNative(LConstructArrayNative* lir) {
   8073  emitApplyArrayGuard(lir);
   8074  emitApplyNative(lir);
   8075 }
   8076 
   8077 void CodeGenerator::visitBail(LBail* lir) { bailout(lir->snapshot()); }
   8078 
   8079 void CodeGenerator::visitUnreachable(LUnreachable* lir) {
   8080  masm.assumeUnreachable("end-of-block assumed unreachable");
   8081 }
   8082 
   8083 void CodeGenerator::visitEncodeSnapshot(LEncodeSnapshot* lir) {
   8084  encode(lir->snapshot());
   8085 }
   8086 
   8087 void CodeGenerator::visitUnreachableResultV(LUnreachableResultV* lir) {
   8088  masm.assumeUnreachable("must be unreachable");
   8089 }
   8090 
   8091 void CodeGenerator::visitUnreachableResultT(LUnreachableResultT* lir) {
   8092  masm.assumeUnreachable("must be unreachable");
   8093 }
   8094 
   8095 void CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed* lir) {
   8096  // If we don't push anything on the stack, skip the check.
   8097  if (omitOverRecursedStackCheck()) {
   8098    return;
   8099  }
   8100 
   8101  // Ensure that this frame will not cross the stack limit.
   8102  // This is a weak check, justified by Ion using the C stack: we must always
   8103  // be some distance away from the actual limit, since if the limit is
   8104  // crossed, an error must be thrown, which requires more frames.
   8105  //
   8106  // It must always be possible to trespass past the stack limit.
   8107  // Ion may legally place frames very close to the limit. Calling additional
   8108  // C functions may then violate the limit without any checking.
   8109  //
   8110  // Since Ion frames exist on the C stack, the stack limit may be
   8111  // dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota().
   8112 
   8113  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
   8114    // The OOL path is hit if the recursion depth has been exceeded.
   8115    // Throw an InternalError for over-recursion.
   8116 
   8117    // LFunctionEnvironment can appear before LCheckOverRecursed, so we have
   8118    // to save all live registers to avoid crashes if CheckOverRecursed triggers
   8119    // a GC.
   8120    saveLive(lir);
   8121 
   8122    using Fn = bool (*)(JSContext*);
   8123    callVM<Fn, CheckOverRecursed>(lir);
   8124 
   8125    restoreLive(lir);
   8126    masm.jump(ool.rejoin());
   8127  });
   8128  addOutOfLineCode(ool, lir->mir());
   8129 
   8130  // Conditional forward (unlikely) branch to failure.
   8131  const void* limitAddr = gen->runtime->addressOfJitStackLimit();
   8132  masm.branchStackPtrRhs(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr),
   8133                         ool->entry());
   8134  masm.bind(ool->rejoin());
   8135 }
   8136 
   8137 IonScriptCounts* CodeGenerator::maybeCreateScriptCounts() {
   8138  // If scripts are being profiled, create a new IonScriptCounts for the
   8139  // profiling data, which will be attached to the associated JSScript or
   8140  // wasm module after code generation finishes.
   8141  if (!gen->hasProfilingScripts()) {
   8142    return nullptr;
   8143  }
   8144 
   8145  // This test inhibits IonScriptCount creation for wasm code which is
   8146  // currently incompatible with wasm codegen for two reasons: (1) wasm code
   8147  // must be serializable and script count codegen bakes in absolute
   8148  // addresses, (2) wasm code does not have a JSScript with which to associate
   8149  // code coverage data.
   8150  JSScript* script = gen->outerInfo().script();
   8151  if (!script) {
   8152    return nullptr;
   8153  }
   8154 
   8155  auto counts = MakeUnique<IonScriptCounts>();
   8156  if (!counts || !counts->init(graph.numBlocks())) {
   8157    return nullptr;
   8158  }
   8159 
   8160  for (size_t i = 0; i < graph.numBlocks(); i++) {
   8161    MBasicBlock* block = graph.getBlock(i)->mir();
   8162 
   8163    uint32_t offset = 0;
   8164    char* description = nullptr;
   8165    if (MResumePoint* resume = block->entryResumePoint()) {
   8166      // Find a PC offset in the outermost script to use. If this
   8167      // block is from an inlined script, find a location in the
   8168      // outer script to associate information about the inlining
   8169      // with.
   8170      while (resume->caller()) {
   8171        resume = resume->caller();
   8172      }
   8173      offset = script->pcToOffset(resume->pc());
   8174 
   8175      if (block->entryResumePoint()->caller()) {
   8176        // Get the filename and line number of the inner script.
   8177        JSScript* innerScript = block->info().script();
   8178        description = js_pod_calloc<char>(200);
   8179        if (description) {
   8180          snprintf(description, 200, "%s:%u", innerScript->filename(),
   8181                   innerScript->lineno());
   8182        }
   8183      }
   8184    }
   8185 
   8186    if (!counts->block(i).init(block->id(), offset, description,
   8187                               block->numSuccessors())) {
   8188      return nullptr;
   8189    }
   8190 
   8191    for (size_t j = 0; j < block->numSuccessors(); j++) {
   8192      counts->block(i).setSuccessor(
   8193          j, skipTrivialBlocks(block->getSuccessor(j))->id());
   8194    }
   8195  }
   8196 
   8197  scriptCounts_ = counts.release();
   8198  return scriptCounts_;
   8199 }
   8200 
   8201 // Structure for managing the state tracked for a block by script counters.
   8202 struct ScriptCountBlockState {
   8203  IonBlockCounts& block;
   8204  MacroAssembler& masm;
   8205 
   8206  Sprinter printer;
   8207 
   8208 public:
   8209  ScriptCountBlockState(IonBlockCounts* block, MacroAssembler* masm)
   8210      : block(*block), masm(*masm), printer(GetJitContext()->cx, false) {}
   8211 
   8212  bool init() {
   8213    if (!printer.init()) {
   8214      return false;
   8215    }
   8216 
   8217    // Bump the hit count for the block at the start. This code is not
   8218    // included in either the text for the block or the instruction byte
   8219    // counts.
   8220    masm.inc64(AbsoluteAddress(block.addressOfHitCount()));
   8221 
   8222    // Collect human readable assembly for the code generated in the block.
   8223    masm.setPrinter(&printer);
   8224 
   8225    return true;
   8226  }
   8227 
   8228  void visitInstruction(LInstruction* ins) {
   8229 #ifdef JS_JITSPEW
   8230    // Prefix stream of assembly instructions with their LIR instruction
   8231    // name and any associated high level info.
   8232    if (const char* extra = ins->getExtraName()) {
   8233      printer.printf("[%s:%s]\n", ins->opName(), extra);
   8234    } else {
   8235      printer.printf("[%s]\n", ins->opName());
   8236    }
   8237 #endif
   8238  }
   8239 
   8240  ~ScriptCountBlockState() {
   8241    masm.setPrinter(nullptr);
   8242 
   8243    if (JS::UniqueChars str = printer.release()) {
   8244      block.setCode(str.get());
   8245    }
   8246  }
   8247 };
   8248 
   8249 void CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated) {
   8250  CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp);
   8251  masm.propagateOOM(ionScriptLabels_.append(label));
   8252 
   8253  // If IonScript::invalidationCount_ != 0, the script has been invalidated.
   8254  masm.branch32(Assembler::NotEqual,
   8255                Address(temp, IonScript::offsetOfInvalidationCount()), Imm32(0),
   8256                invalidated);
   8257 }
   8258 
   8259 #ifdef DEBUG
   8260 void CodeGenerator::emitAssertGCThingResult(Register input,
   8261                                            const MDefinition* mir) {
   8262  MIRType type = mir->type();
   8263  MOZ_ASSERT(type == MIRType::Object || type == MIRType::String ||
   8264             type == MIRType::Symbol || type == MIRType::BigInt);
   8265 
   8266  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
   8267  regs.take(input);
   8268 
   8269  Register temp = regs.takeAny();
   8270  masm.push(temp);
   8271 
   8272  // Don't check if the script has been invalidated. In that case invalid
   8273  // types are expected (until we reach the OsiPoint and bailout).
   8274  Label done;
   8275  branchIfInvalidated(temp, &done);
   8276 
   8277 #  ifndef JS_SIMULATOR
   8278  // Check that we have a valid GC pointer.
   8279  // Disable for wasm because we don't have a context on wasm compilation
   8280  // threads and this needs a context.
   8281  // Also disable for simulator builds because the C++ call is a lot slower
   8282  // there than on actual hardware.
   8283  if (JitOptions.fullDebugChecks && !IsCompilingWasm()) {
   8284    saveVolatile();
   8285    masm.setupUnalignedABICall(temp);
   8286    masm.loadJSContext(temp);
   8287    masm.passABIArg(temp);
   8288    masm.passABIArg(input);
   8289 
   8290    switch (type) {
   8291      case MIRType::Object: {
   8292        using Fn = void (*)(JSContext* cx, JSObject* obj);
   8293        masm.callWithABI<Fn, AssertValidObjectPtr>();
   8294        break;
   8295      }
   8296      case MIRType::String: {
   8297        using Fn = void (*)(JSContext* cx, JSString* str);
   8298        masm.callWithABI<Fn, AssertValidStringPtr>();
   8299        break;
   8300      }
   8301      case MIRType::Symbol: {
   8302        using Fn = void (*)(JSContext* cx, JS::Symbol* sym);
   8303        masm.callWithABI<Fn, AssertValidSymbolPtr>();
   8304        break;
   8305      }
   8306      case MIRType::BigInt: {
   8307        using Fn = void (*)(JSContext* cx, JS::BigInt* bi);
   8308        masm.callWithABI<Fn, AssertValidBigIntPtr>();
   8309        break;
   8310      }
   8311      default:
   8312        MOZ_CRASH();
   8313    }
   8314 
   8315    restoreVolatile();
   8316  }
   8317 #  endif
   8318 
   8319  masm.bind(&done);
   8320  masm.pop(temp);
   8321 }
   8322 
   8323 void CodeGenerator::emitAssertResultV(const ValueOperand input,
   8324                                      const MDefinition* mir) {
   8325  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
   8326  regs.take(input);
   8327 
   8328  Register temp1 = regs.takeAny();
   8329  Register temp2 = regs.takeAny();
   8330  masm.push(temp1);
   8331  masm.push(temp2);
   8332 
   8333  // Don't check if the script has been invalidated. In that case invalid
   8334  // types are expected (until we reach the OsiPoint and bailout).
   8335  Label done;
   8336  branchIfInvalidated(temp1, &done);
   8337 
   8338  // Check that we have a valid GC pointer.
   8339  if (JitOptions.fullDebugChecks) {
   8340    saveVolatile();
   8341 
   8342    masm.pushValue(input);
   8343    masm.moveStackPtrTo(temp1);
   8344 
   8345    using Fn = void (*)(JSContext* cx, Value* v);
   8346    masm.setupUnalignedABICall(temp2);
   8347    masm.loadJSContext(temp2);
   8348    masm.passABIArg(temp2);
   8349    masm.passABIArg(temp1);
   8350    masm.callWithABI<Fn, AssertValidValue>();
   8351    masm.popValue(input);
   8352    restoreVolatile();
   8353  }
   8354 
   8355  masm.bind(&done);
   8356  masm.pop(temp2);
   8357  masm.pop(temp1);
   8358 }
   8359 
   8360 void CodeGenerator::emitGCThingResultChecks(LInstruction* lir,
   8361                                            MDefinition* mir) {
   8362  if (lir->numDefs() == 0) {
   8363    return;
   8364  }
   8365 
   8366  MOZ_ASSERT(lir->numDefs() == 1);
   8367  if (lir->getDef(0)->isBogusTemp()) {
   8368    return;
   8369  }
   8370 
   8371  Register output = ToRegister(lir->getDef(0));
   8372  emitAssertGCThingResult(output, mir);
   8373 }
   8374 
   8375 void CodeGenerator::emitValueResultChecks(LInstruction* lir, MDefinition* mir) {
   8376  if (lir->numDefs() == 0) {
   8377    return;
   8378  }
   8379 
   8380  MOZ_ASSERT(lir->numDefs() == BOX_PIECES);
   8381  if (!lir->getDef(0)->output()->isGeneralReg()) {
   8382    return;
   8383  }
   8384 
   8385  ValueOperand output = ToOutValue(lir);
   8386 
   8387  emitAssertResultV(output, mir);
   8388 }
   8389 
   8390 void CodeGenerator::emitWasmAnyrefResultChecks(LInstruction* lir,
   8391                                               MDefinition* mir) {
   8392  MOZ_ASSERT(mir->type() == MIRType::WasmAnyRef);
   8393 
   8394  wasm::MaybeRefType destType = mir->wasmRefType();
   8395  if (!destType) {
   8396    return;
   8397  }
   8398 
   8399  if (!JitOptions.fullDebugChecks) {
   8400    return;
   8401  }
   8402 
   8403  if (lir->numDefs() == 0) {
   8404    return;
   8405  }
   8406 
   8407  MOZ_ASSERT(lir->numDefs() == 1);
   8408  if (lir->getDef(0)->isBogusTemp()) {
   8409    return;
   8410  }
   8411 
   8412  if (lir->getDef(0)->output()->isMemory()) {
   8413    return;
   8414  }
   8415  Register output = ToRegister(lir->getDef(0));
   8416 
   8417  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
   8418  regs.take(output);
   8419 
   8420  BranchWasmRefIsSubtypeRegisters needs =
   8421      MacroAssembler::regsForBranchWasmRefIsSubtype(destType.value());
   8422 
   8423  Register temp1;
   8424  Register temp2;
   8425  Register temp3;
   8426  if (needs.needSuperSTV) {
   8427    temp1 = regs.takeAny();
   8428    masm.push(temp1);
   8429  }
   8430  if (needs.needScratch1) {
   8431    temp2 = regs.takeAny();
   8432    masm.push(temp2);
   8433  }
   8434  if (needs.needScratch2) {
   8435    temp3 = regs.takeAny();
   8436    masm.push(temp3);
   8437  }
   8438 
   8439  if (needs.needSuperSTV) {
   8440    uint32_t typeIndex =
   8441        wasmCodeMeta()->types->indexOf(*destType.value().typeDef());
   8442 
   8443    // When full debug checks are enabled, we always write the callee instance
   8444    // pointer into its usual slot in the frame in our function prologue, so
   8445    // that we can get it even if the InstanceReg is currently being used for
   8446    // something else.
   8447    masm.loadPtr(
   8448        Address(FramePointer, wasm::FrameWithInstances::calleeInstanceOffset()),
   8449        temp1);
   8450    masm.loadPtr(
   8451        Address(temp1, wasm::Instance::offsetInData(
   8452                           wasmCodeMeta()->offsetOfSuperTypeVector(typeIndex))),
   8453        temp1);
   8454  }
   8455 
   8456  Label ok;
   8457  masm.branchWasmRefIsSubtype(output, wasm::MaybeRefType(), destType.value(),
   8458                              &ok, /*onSuccess=*/true,
   8459                              /*signalNullChecks=*/false, temp1, temp2, temp3);
   8460  masm.breakpoint();
   8461  masm.bind(&ok);
   8462 
   8463  if (needs.needScratch2) {
   8464    masm.pop(temp3);
   8465  }
   8466  if (needs.needScratch1) {
   8467    masm.pop(temp2);
   8468  }
   8469  if (needs.needSuperSTV) {
   8470    masm.pop(temp1);
   8471  }
   8472 
   8473 #  ifdef JS_CODEGEN_ARM64
   8474  masm.syncStackPtr();
   8475 #  endif
   8476 }
   8477 
   8478 void CodeGenerator::emitDebugResultChecks(LInstruction* ins) {
   8479  // In debug builds, check that LIR instructions return valid values.
   8480 
   8481  MDefinition* mir = ins->mirRaw();
   8482  if (!mir) {
   8483    return;
   8484  }
   8485 
   8486  switch (mir->type()) {
   8487    case MIRType::Object:
   8488    case MIRType::String:
   8489    case MIRType::Symbol:
   8490    case MIRType::BigInt:
   8491      emitGCThingResultChecks(ins, mir);
   8492      break;
   8493    case MIRType::Value:
   8494      emitValueResultChecks(ins, mir);
   8495      break;
   8496    case MIRType::WasmAnyRef:
   8497      emitWasmAnyrefResultChecks(ins, mir);
   8498      break;
   8499    default:
   8500      break;
   8501  }
   8502 }
   8503 
   8504 void CodeGenerator::emitDebugForceBailing(LInstruction* lir) {
   8505  if (MOZ_LIKELY(!gen->options.ionBailAfterEnabled())) {
   8506    return;
   8507  }
   8508  if (!lir->snapshot()) {
   8509    return;
   8510  }
   8511  if (lir->isOsiPoint()) {
   8512    return;
   8513  }
   8514 
   8515  masm.comment("emitDebugForceBailing");
   8516  const void* bailAfterCounterAddr =
   8517      gen->runtime->addressOfIonBailAfterCounter();
   8518 
   8519  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
   8520 
   8521  Label done, notBail;
   8522  masm.branch32(Assembler::Equal, AbsoluteAddress(bailAfterCounterAddr),
   8523                Imm32(0), &done);
   8524  {
   8525    Register temp = regs.takeAny();
   8526 
   8527    masm.push(temp);
   8528    masm.load32(AbsoluteAddress(bailAfterCounterAddr), temp);
   8529    masm.sub32(Imm32(1), temp);
   8530    masm.store32(temp, AbsoluteAddress(bailAfterCounterAddr));
   8531 
   8532    masm.branch32(Assembler::NotEqual, temp, Imm32(0), &notBail);
   8533    {
   8534      masm.pop(temp);
   8535      bailout(lir->snapshot());
   8536    }
   8537    masm.bind(&notBail);
   8538    masm.pop(temp);
   8539  }
   8540  masm.bind(&done);
   8541 }
   8542 #endif  // DEBUG
   8543 
   8544 bool CodeGenerator::generateBody() {
   8545  JitSpewCont(JitSpew_Codegen, "\n");
   8546  AutoCreatedBy acb(masm, "CodeGenerator::generateBody");
   8547 
   8548  JitSpew(JitSpew_Codegen, "==== BEGIN CodeGenerator::generateBody ====");
   8549  counts_ = maybeCreateScriptCounts();
   8550 
   8551  const bool compilingWasm = gen->compilingWasm();
   8552 
   8553  for (size_t i = 0; i < graph.numBlocks(); i++) {
   8554    current = graph.getBlock(i);
   8555 
   8556    // Don't emit any code for trivial blocks, containing just a goto. Such
   8557    // blocks are created to split critical edges, and if we didn't end up
   8558    // putting any instructions in them, we can skip them.
   8559    if (current->isTrivial()) {
   8560      continue;
   8561    }
   8562 
   8563    if (gen->shouldCancel("Generate Code (block loop)")) {
   8564      return false;
   8565    }
   8566 
   8567    // Skip out of line blocks for now. They will be emitted in
   8568    // generateOutOfLineBlocks.
   8569    if (current->isOutOfLine()) {
   8570      continue;
   8571    }
   8572 
   8573    // Generate a basic block
   8574    if (!generateBlock(current, i, counts_, compilingWasm)) {
   8575      return false;
   8576    }
   8577  }
   8578 
   8579  JitSpew(JitSpew_Codegen, "==== END CodeGenerator::generateBody ====\n");
   8580  return true;
   8581 }
   8582 
   8583 bool CodeGenerator::generateBlock(LBlock* current, size_t blockNumber,
   8584                                  IonScriptCounts* counts, bool compilingWasm) {
   8585 #ifdef JS_JITSPEW
   8586  const char* filename = nullptr;
   8587  size_t lineNumber = 0;
   8588  JS::LimitedColumnNumberOneOrigin columnNumber;
   8589  if (current->mir()->info().script()) {
   8590    filename = current->mir()->info().script()->filename();
   8591    if (current->mir()->pc()) {
   8592      lineNumber = PCToLineNumber(current->mir()->info().script(),
   8593                                  current->mir()->pc(), &columnNumber);
   8594    }
   8595  }
   8596  JitSpew(JitSpew_Codegen, "--------------------------------");
   8597  JitSpew(JitSpew_Codegen, "# block%zu %s:%zu:%u%s:", blockNumber,
   8598          filename ? filename : "?", lineNumber, columnNumber.oneOriginValue(),
   8599          current->mir()->isLoopHeader() ? " (loop header)" : "");
   8600 #endif
   8601 
   8602  if (current->mir()->isLoopHeader() && compilingWasm) {
   8603    masm.nopAlign(CodeAlignment);
   8604  }
   8605 
   8606  masm.bind(current->label());
   8607 
   8608  mozilla::Maybe<ScriptCountBlockState> blockCounts;
   8609  if (counts) {
   8610    blockCounts.emplace(&counts->block(blockNumber), &masm);
   8611    if (!blockCounts->init()) {
   8612      return false;
   8613    }
   8614  }
   8615 
   8616  for (LInstructionIterator iter = current->begin(); iter != current->end();
   8617       iter++) {
   8618    if (gen->shouldCancel("Generate Code (instruction loop)")) {
   8619      return false;
   8620    }
   8621    if (!alloc().ensureBallast()) {
   8622      return false;
   8623    }
   8624 
   8625    perfSpewer().recordInstruction(masm, *iter);
   8626 #ifdef JS_JITSPEW
   8627    JitSpewStart(JitSpew_Codegen, "                                # LIR=%s",
   8628                 iter->opName());
   8629    if (const char* extra = iter->getExtraName()) {
   8630      JitSpewCont(JitSpew_Codegen, ":%s", extra);
   8631    }
   8632    JitSpewFin(JitSpew_Codegen);
   8633 #endif
   8634 
   8635    if (counts) {
   8636      blockCounts->visitInstruction(*iter);
   8637    }
   8638 
   8639 #ifdef CHECK_OSIPOINT_REGISTERS
   8640    if (iter->safepoint() && !compilingWasm) {
   8641      resetOsiPointRegs(iter->safepoint());
   8642    }
   8643 #endif
   8644 
   8645    if (!compilingWasm) {
   8646      if (MDefinition* mir = iter->mirRaw()) {
   8647        if (!addNativeToBytecodeEntry(mir->trackedSite())) {
   8648          return false;
   8649        }
   8650      }
   8651    }
   8652 
   8653    setElement(*iter);  // needed to encode correct snapshot location.
   8654 
   8655 #ifdef DEBUG
   8656    emitDebugForceBailing(*iter);
   8657 #endif
   8658 
   8659    switch (iter->op()) {
   8660 #ifndef JS_CODEGEN_NONE
   8661 #  define LIROP(op)              \
   8662    case LNode::Opcode::op:      \
   8663      visit##op(iter->to##op()); \
   8664      break;
   8665      LIR_OPCODE_LIST(LIROP)
   8666 #  undef LIROP
   8667 #endif
   8668      case LNode::Opcode::Invalid:
   8669      default:
   8670        MOZ_CRASH("Invalid LIR op");
   8671    }
   8672 
   8673 #ifdef DEBUG
   8674    if (!counts) {
   8675      emitDebugResultChecks(*iter);
   8676    }
   8677 #endif
   8678  }
   8679 
   8680  return !masm.oom();
   8681 }
   8682 
   8683 bool CodeGenerator::generateOutOfLineBlocks() {
   8684  AutoCreatedBy acb(masm, "CodeGeneratorShared::generateOutOfLineBlocks");
   8685 
   8686  // Generate out of line basic blocks.
   8687  // If we are generated some blocks at the end of the function, we need
   8688  // to adjust the frame depth.
   8689  if (!gen->branchHintingEnabled()) {
   8690    return true;
   8691  }
   8692  masm.setFramePushed(frameDepth_);
   8693 
   8694  const bool compilingWasm = gen->compilingWasm();
   8695 
   8696  for (size_t i = 0; i < graph.numBlocks(); i++) {
   8697    current = graph.getBlock(i);
   8698 
   8699    if (gen->shouldCancel("Generate Code (block loop)")) {
   8700      return false;
   8701    }
   8702 
   8703    if (current->isTrivial()) {
   8704      continue;
   8705    }
   8706 
   8707    // If this block is marked as out of line, we need to generate it now.
   8708    if (!current->isOutOfLine()) {
   8709      continue;
   8710    }
   8711 
   8712    if (!generateBlock(current, i, counts_, compilingWasm)) {
   8713      return false;
   8714    }
   8715  }
   8716 
   8717  return !masm.oom();
   8718 }
   8719 
   8720 void CodeGenerator::visitNewArrayCallVM(LNewArray* lir) {
   8721  Register objReg = ToRegister(lir->output());
   8722 
   8723  MOZ_ASSERT(!lir->isCall());
   8724  saveLive(lir);
   8725 
   8726  JSObject* templateObject = lir->mir()->templateObject();
   8727 
   8728  if (templateObject) {
   8729    pushArg(ImmGCPtr(templateObject->shape()));
   8730    pushArg(Imm32(lir->mir()->length()));
   8731 
   8732    using Fn = ArrayObject* (*)(JSContext*, uint32_t, Handle<Shape*>);
   8733    callVM<Fn, NewArrayWithShape>(lir);
   8734  } else {
   8735    pushArg(Imm32(GenericObject));
   8736    pushArg(Imm32(lir->mir()->length()));
   8737 
   8738    using Fn = ArrayObject* (*)(JSContext*, uint32_t, NewObjectKind);
   8739    callVM<Fn, NewArrayOperation>(lir);
   8740  }
   8741 
   8742  masm.storeCallPointerResult(objReg);
   8743 
   8744  MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
   8745  restoreLive(lir);
   8746 }
   8747 
   8748 void CodeGenerator::visitAtan2D(LAtan2D* lir) {
   8749  FloatRegister y = ToFloatRegister(lir->y());
   8750  FloatRegister x = ToFloatRegister(lir->x());
   8751 
   8752  using Fn = double (*)(double x, double y);
   8753  masm.setupAlignedABICall();
   8754  masm.passABIArg(y, ABIType::Float64);
   8755  masm.passABIArg(x, ABIType::Float64);
   8756  masm.callWithABI<Fn, ecmaAtan2>(ABIType::Float64);
   8757 
   8758  MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
   8759 }
   8760 
   8761 void CodeGenerator::visitHypot(LHypot* lir) {
   8762  uint32_t numArgs = lir->numArgs();
   8763  masm.setupAlignedABICall();
   8764 
   8765  for (uint32_t i = 0; i < numArgs; ++i) {
   8766    masm.passABIArg(ToFloatRegister(lir->getOperand(i)), ABIType::Float64);
   8767  }
   8768 
   8769  switch (numArgs) {
   8770    case 2: {
   8771      using Fn = double (*)(double x, double y);
   8772      masm.callWithABI<Fn, ecmaHypot>(ABIType::Float64);
   8773      break;
   8774    }
   8775    case 3: {
   8776      using Fn = double (*)(double x, double y, double z);
   8777      masm.callWithABI<Fn, hypot3>(ABIType::Float64);
   8778      break;
   8779    }
   8780    case 4: {
   8781      using Fn = double (*)(double x, double y, double z, double w);
   8782      masm.callWithABI<Fn, hypot4>(ABIType::Float64);
   8783      break;
   8784    }
   8785    default:
   8786      MOZ_CRASH("Unexpected number of arguments to hypot function.");
   8787  }
   8788  MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
   8789 }
   8790 
   8791 void CodeGenerator::visitNewArray(LNewArray* lir) {
   8792  Register objReg = ToRegister(lir->output());
   8793  Register tempReg = ToRegister(lir->temp0());
   8794  DebugOnly<uint32_t> length = lir->mir()->length();
   8795 
   8796  MOZ_ASSERT(length <= NativeObject::MAX_DENSE_ELEMENTS_COUNT);
   8797 
   8798  if (lir->mir()->isVMCall()) {
   8799    visitNewArrayCallVM(lir);
   8800    return;
   8801  }
   8802 
   8803  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
   8804    visitNewArrayCallVM(lir);
   8805    masm.jump(ool.rejoin());
   8806  });
   8807  addOutOfLineCode(ool, lir->mir());
   8808  TemplateObject templateObject(lir->mir()->templateObject());
   8809 #ifdef DEBUG
   8810  size_t numInlineElements = gc::GetGCKindSlots(templateObject.getAllocKind()) -
   8811                             ObjectElements::VALUES_PER_HEADER;
   8812  MOZ_ASSERT(length <= numInlineElements,
   8813             "Inline allocation only supports inline elements");
   8814 #endif
   8815  masm.createGCObject(objReg, tempReg, templateObject,
   8816                      lir->mir()->initialHeap(), ool->entry());
   8817 
   8818  masm.bind(ool->rejoin());
   8819 }
   8820 
   8821 void CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength* lir) {
   8822  Register lengthReg = ToRegister(lir->length());
   8823  Register objReg = ToRegister(lir->output());
   8824  Register tempReg = ToRegister(lir->temp0());
   8825 
   8826  JSObject* templateObject = lir->mir()->templateObject();
   8827  gc::Heap initialHeap = lir->mir()->initialHeap();
   8828 
   8829  using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t length,
   8830                              gc::AllocSite*);
   8831  OutOfLineCode* ool = oolCallVM<Fn, ArrayConstructorOneArg>(
   8832      lir, ArgList(ImmGCPtr(templateObject), lengthReg, ImmPtr(nullptr)),
   8833      StoreRegisterTo(objReg));
   8834 
   8835  bool canInline = true;
   8836  size_t inlineLength = 0;
   8837  if (templateObject->as<ArrayObject>().hasFixedElements()) {
   8838    size_t numSlots =
   8839        gc::GetGCKindSlots(templateObject->asTenured().getAllocKind());
   8840    inlineLength = numSlots - ObjectElements::VALUES_PER_HEADER;
   8841  } else {
   8842    canInline = false;
   8843  }
   8844 
   8845  if (canInline) {
   8846    // Try to do the allocation inline if the template object is big enough
   8847    // for the length in lengthReg. If the length is bigger we could still
   8848    // use the template object and not allocate the elements, but it's more
   8849    // efficient to do a single big allocation than (repeatedly) reallocating
   8850    // the array later on when filling it.
   8851    masm.branch32(Assembler::Above, lengthReg, Imm32(inlineLength),
   8852                  ool->entry());
   8853 
   8854    TemplateObject templateObj(templateObject);
   8855    masm.createGCObject(objReg, tempReg, templateObj, initialHeap,
   8856                        ool->entry());
   8857 
   8858    size_t lengthOffset = NativeObject::offsetOfFixedElements() +
   8859                          ObjectElements::offsetOfLength();
   8860    masm.store32(lengthReg, Address(objReg, lengthOffset));
   8861  } else {
   8862    masm.jump(ool->entry());
   8863  }
   8864 
   8865  masm.bind(ool->rejoin());
   8866 }
   8867 
   8868 void CodeGenerator::visitNewIterator(LNewIterator* lir) {
   8869  Register objReg = ToRegister(lir->output());
   8870  Register tempReg = ToRegister(lir->temp0());
   8871 
   8872  OutOfLineCode* ool;
   8873  switch (lir->mir()->type()) {
   8874    case MNewIterator::ArrayIterator: {
   8875      using Fn = ArrayIteratorObject* (*)(JSContext*);
   8876      ool = oolCallVM<Fn, NewArrayIterator>(lir, ArgList(),
   8877                                            StoreRegisterTo(objReg));
   8878      break;
   8879    }
   8880    case MNewIterator::StringIterator: {
   8881      using Fn = StringIteratorObject* (*)(JSContext*);
   8882      ool = oolCallVM<Fn, NewStringIterator>(lir, ArgList(),
   8883                                             StoreRegisterTo(objReg));
   8884      break;
   8885    }
   8886    case MNewIterator::RegExpStringIterator: {
   8887      using Fn = RegExpStringIteratorObject* (*)(JSContext*);
   8888      ool = oolCallVM<Fn, NewRegExpStringIterator>(lir, ArgList(),
   8889                                                   StoreRegisterTo(objReg));
   8890      break;
   8891    }
   8892    default:
   8893      MOZ_CRASH("unexpected iterator type");
   8894  }
   8895 
   8896  TemplateObject templateObject(lir->mir()->templateObject());
   8897  masm.createGCObject(objReg, tempReg, templateObject, gc::Heap::Default,
   8898                      ool->entry());
   8899 
   8900  masm.bind(ool->rejoin());
   8901 }
   8902 
   8903 void CodeGenerator::visitNewTypedArrayInline(LNewTypedArrayInline* lir) {
   8904  Register objReg = ToRegister(lir->output());
   8905  Register tempReg = ToRegister(lir->temp0());
   8906 
   8907  auto* templateObject = lir->mir()->templateObject();
   8908  gc::Heap initialHeap = lir->mir()->initialHeap();
   8909 
   8910  size_t n = templateObject->length();
   8911  MOZ_ASSERT(n <= INT32_MAX,
   8912             "Template objects are only created for int32 lengths");
   8913 
   8914  using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t);
   8915  auto* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
   8916      lir, ArgList(ImmGCPtr(templateObject), Imm32(n)),
   8917      StoreRegisterTo(objReg));
   8918 
   8919  TemplateObject templateObj(templateObject);
   8920  masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry());
   8921 
   8922  masm.initTypedArraySlotsInline(objReg, tempReg, templateObject);
   8923 
   8924  masm.bind(ool->rejoin());
   8925 }
   8926 
   8927 void CodeGenerator::visitNewTypedArray(LNewTypedArray* lir) {
   8928  Register output = ToRegister(lir->output());
   8929  Register temp1Reg = ToRegister(lir->temp0());
   8930  Register temp2Reg = ToRegister(lir->temp1());
   8931  Register lengthReg = ToRegister(lir->temp2());
   8932  Register temp4Reg = ToRegister(lir->temp3());
   8933 
   8934  auto* templateObject = lir->mir()->templateObject();
   8935  gc::Heap initialHeap = lir->mir()->initialHeap();
   8936 
   8937  size_t n = templateObject->length();
   8938  MOZ_ASSERT(n <= INT32_MAX,
   8939             "Template objects are only created for int32 lengths");
   8940 
   8941  using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
   8942  OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
   8943      lir, ArgList(ImmGCPtr(templateObject), Imm32(n)),
   8944      StoreRegisterTo(output));
   8945 
   8946  TemplateObject templateObj(templateObject);
   8947  masm.createGCObject(temp4Reg, temp1Reg, templateObj, initialHeap,
   8948                      ool->entry());
   8949 
   8950  masm.move32(Imm32(n), lengthReg);
   8951 
   8952  masm.initTypedArraySlots(temp4Reg, lengthReg, temp1Reg, temp2Reg,
   8953                           ool->entry(), templateObject);
   8954  masm.mov(temp4Reg, output);
   8955 
   8956  masm.bind(ool->rejoin());
   8957 }
   8958 
   8959 void CodeGenerator::visitNewTypedArrayDynamicLength(
   8960    LNewTypedArrayDynamicLength* lir) {
   8961  Register lengthReg = ToRegister(lir->length());
   8962  Register output = ToRegister(lir->output());
   8963  Register temp1Reg = ToRegister(lir->temp0());
   8964  Register temp2Reg = ToRegister(lir->temp1());
   8965  Register temp3Reg = ToRegister(lir->temp2());
   8966 
   8967  JSObject* templateObject = lir->mir()->templateObject();
   8968  gc::Heap initialHeap = lir->mir()->initialHeap();
   8969 
   8970  auto* ttemplate = &templateObject->as<FixedLengthTypedArrayObject>();
   8971 
   8972  using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
   8973  OutOfLineCode* ool = oolCallVM<Fn, NewTypedArrayWithTemplateAndLength>(
   8974      lir, ArgList(ImmGCPtr(templateObject), lengthReg),
   8975      StoreRegisterTo(output));
   8976 
   8977  TemplateObject templateObj(templateObject);
   8978  masm.createGCObject(temp3Reg, temp1Reg, templateObj, initialHeap,
   8979                      ool->entry());
   8980 
   8981  masm.initTypedArraySlots(temp3Reg, lengthReg, temp1Reg, temp2Reg,
   8982                           ool->entry(), ttemplate);
   8983  masm.mov(temp3Reg, output);
   8984 
   8985  masm.bind(ool->rejoin());
   8986 }
   8987 
   8988 void CodeGenerator::visitNewTypedArrayFromArray(LNewTypedArrayFromArray* lir) {
   8989  pushArg(ToRegister(lir->array()));
   8990  pushArg(ImmGCPtr(lir->mir()->templateObject()));
   8991 
   8992  using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
   8993  callVM<Fn, js::NewTypedArrayWithTemplateAndArray>(lir);
   8994 }
   8995 
   8996 void CodeGenerator::visitNewTypedArrayFromArrayBuffer(
   8997    LNewTypedArrayFromArrayBuffer* lir) {
   8998  pushArg(ToValue(lir->length()));
   8999  pushArg(ToValue(lir->byteOffset()));
   9000  pushArg(ToRegister(lir->arrayBuffer()));
   9001  pushArg(ImmGCPtr(lir->mir()->templateObject()));
   9002 
   9003  using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
   9004                                   HandleValue, HandleValue);
   9005  callVM<Fn, js::NewTypedArrayWithTemplateAndBuffer>(lir);
   9006 }
   9007 
   9008 void CodeGenerator::visitBindFunction(LBindFunction* lir) {
   9009  Register target = ToRegister(lir->target());
   9010  Register temp1 = ToRegister(lir->temp0());
   9011  Register temp2 = ToRegister(lir->temp1());
   9012 
   9013  // Try to allocate a new BoundFunctionObject we can pass to the VM function.
   9014  // If this fails, we set temp1 to nullptr so we do the allocation in C++.
   9015  TemplateObject templateObject(lir->mir()->templateObject());
   9016  Label allocOk, allocFailed;
   9017  masm.createGCObject(temp1, temp2, templateObject, gc::Heap::Default,
   9018                      &allocFailed);
   9019  masm.jump(&allocOk);
   9020 
   9021  masm.bind(&allocFailed);
   9022  masm.movePtr(ImmWord(0), temp1);
   9023 
   9024  masm.bind(&allocOk);
   9025 
   9026  // Set temp2 to the address of the first argument on the stack.
   9027  // Note that the Value slots used for arguments are currently aligned for a
   9028  // JIT call, even though that's not strictly necessary for calling into C++.
   9029  uint32_t argc = lir->mir()->numStackArgs();
   9030  if (JitStackValueAlignment > 1) {
   9031    argc = AlignBytes(argc, JitStackValueAlignment);
   9032  }
   9033  uint32_t unusedStack = UnusedStackBytesForCall(argc);
   9034  masm.computeEffectiveAddress(Address(masm.getStackPointer(), unusedStack),
   9035                               temp2);
   9036 
   9037  pushArg(temp1);
   9038  pushArg(Imm32(lir->mir()->numStackArgs()));
   9039  pushArg(temp2);
   9040  pushArg(target);
   9041 
   9042  using Fn = BoundFunctionObject* (*)(JSContext*, Handle<JSObject*>, Value*,
   9043                                      uint32_t, Handle<BoundFunctionObject*>);
   9044  callVM<Fn, js::BoundFunctionObject::functionBindImpl>(lir);
   9045 }
   9046 
   9047 void CodeGenerator::visitNewBoundFunction(LNewBoundFunction* lir) {
   9048  Register output = ToRegister(lir->output());
   9049  Register temp = ToRegister(lir->temp0());
   9050 
   9051  JSObject* templateObj = lir->mir()->templateObj();
   9052 
   9053  using Fn = BoundFunctionObject* (*)(JSContext*, Handle<BoundFunctionObject*>);
   9054  OutOfLineCode* ool = oolCallVM<Fn, BoundFunctionObject::createWithTemplate>(
   9055      lir, ArgList(ImmGCPtr(templateObj)), StoreRegisterTo(output));
   9056 
   9057  TemplateObject templateObject(templateObj);
   9058  masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
   9059                      ool->entry());
   9060 
   9061  masm.bind(ool->rejoin());
   9062 }
   9063 
   9064 void CodeGenerator::visitNewObjectVMCall(LNewObject* lir) {
   9065  Register objReg = ToRegister(lir->output());
   9066 
   9067  MOZ_ASSERT(!lir->isCall());
   9068  saveLive(lir);
   9069 
   9070  JSObject* templateObject = lir->mir()->templateObject();
   9071 
   9072  // If we're making a new object with a class prototype (that is, an object
   9073  // that derives its class from its prototype instead of being
   9074  // PlainObject::class_'d) from self-hosted code, we need a different init
   9075  // function.
   9076  switch (lir->mir()->mode()) {
   9077    case MNewObject::ObjectLiteral: {
   9078      MOZ_ASSERT(!templateObject);
   9079      pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
   9080      pushArg(ImmGCPtr(lir->mir()->block()->info().script()));
   9081 
   9082      using Fn = JSObject* (*)(JSContext*, HandleScript, const jsbytecode* pc);
   9083      callVM<Fn, NewObjectOperation>(lir);
   9084      break;
   9085    }
   9086    case MNewObject::ObjectCreate: {
   9087      pushArg(ImmGCPtr(templateObject));
   9088 
   9089      using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
   9090      callVM<Fn, ObjectCreateWithTemplate>(lir);
   9091      break;
   9092    }
   9093  }
   9094 
   9095  masm.storeCallPointerResult(objReg);
   9096 
   9097  MOZ_ASSERT(!lir->safepoint()->liveRegs().has(objReg));
   9098  restoreLive(lir);
   9099 }
   9100 
   9101 static bool ShouldInitFixedSlots(MIRGenerator* gen, LNewPlainObject* lir,
   9102                                 const Shape* shape, uint32_t nfixed) {
   9103  // Look for StoreFixedSlot instructions following an object allocation
   9104  // that write to this object before a GC is triggered or this object is
   9105  // passed to a VM call. If all fixed slots will be initialized, the
   9106  // allocation code doesn't need to set the slots to |undefined|.
   9107 
   9108  if (nfixed == 0) {
   9109    return false;
   9110  }
   9111 
   9112 #ifdef DEBUG
   9113  // The bailAfter testing function can trigger a bailout between allocating the
   9114  // object and initializing the slots.
   9115  if (gen->options.ionBailAfterEnabled()) {
   9116    return true;
   9117  }
   9118 #endif
   9119 
   9120  // Keep track of the fixed slots that are initialized. initializedSlots is
   9121  // a bit mask with a bit for each slot.
   9122  MOZ_ASSERT(nfixed <= NativeObject::MAX_FIXED_SLOTS);
   9123  static_assert(NativeObject::MAX_FIXED_SLOTS <= 32,
   9124                "Slot bits must fit in 32 bits");
   9125  uint32_t initializedSlots = 0;
   9126  uint32_t numInitialized = 0;
   9127 
   9128  MInstruction* allocMir = lir->mir();
   9129  MBasicBlock* block = allocMir->block();
   9130 
   9131  // Skip the allocation instruction.
   9132  MInstructionIterator iter = block->begin(allocMir);
   9133  MOZ_ASSERT(*iter == allocMir);
   9134  iter++;
   9135 
   9136  // Handle the leading shape guard, if present.
   9137  for (; iter != block->end(); iter++) {
   9138    if (iter->isConstant()) {
   9139      // This instruction won't trigger a GC or read object slots.
   9140      continue;
   9141    }
   9142    if (iter->isGuardShape()) {
   9143      auto* guard = iter->toGuardShape();
   9144      if (guard->object() != allocMir || guard->shape() != shape) {
   9145        return true;
   9146      }
   9147      allocMir = guard;
   9148      iter++;
   9149    }
   9150    break;
   9151  }
   9152 
   9153  for (; iter != block->end(); iter++) {
   9154    if (iter->isConstant() || iter->isPostWriteBarrier()) {
   9155      // These instructions won't trigger a GC or read object slots.
   9156      continue;
   9157    }
   9158 
   9159    if (iter->isStoreFixedSlot()) {
   9160      MStoreFixedSlot* store = iter->toStoreFixedSlot();
   9161      if (store->object() != allocMir) {
   9162        return true;
   9163      }
   9164 
   9165      // We may not initialize this object slot on allocation, so the
   9166      // pre-barrier could read uninitialized memory. Simply disable
   9167      // the barrier for this store: the object was just initialized
   9168      // so the barrier is not necessary.
   9169      store->setNeedsBarrier(false);
   9170 
   9171      uint32_t slot = store->slot();
   9172      MOZ_ASSERT(slot < nfixed);
   9173      if ((initializedSlots & (1 << slot)) == 0) {
   9174        numInitialized++;
   9175        initializedSlots |= (1 << slot);
   9176 
   9177        if (numInitialized == nfixed) {
   9178          // All fixed slots will be initialized.
   9179          MOZ_ASSERT(mozilla::CountPopulation32(initializedSlots) == nfixed);
   9180          return false;
   9181        }
   9182      }
   9183      continue;
   9184    }
   9185 
   9186    // Unhandled instruction, assume it bails or reads object slots.
   9187    return true;
   9188  }
   9189 
   9190  MOZ_CRASH("Shouldn't get here");
   9191 }
   9192 
   9193 void CodeGenerator::visitNewObject(LNewObject* lir) {
   9194  Register objReg = ToRegister(lir->output());
   9195  Register tempReg = ToRegister(lir->temp0());
   9196 
   9197  if (lir->mir()->isVMCall()) {
   9198    visitNewObjectVMCall(lir);
   9199    return;
   9200  }
   9201 
   9202  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
   9203    visitNewObjectVMCall(lir);
   9204    masm.jump(ool.rejoin());
   9205  });
   9206  addOutOfLineCode(ool, lir->mir());
   9207 
   9208  TemplateObject templateObject(lir->mir()->templateObject());
   9209 
   9210  masm.createGCObject(objReg, tempReg, templateObject,
   9211                      lir->mir()->initialHeap(), ool->entry());
   9212 
   9213  masm.bind(ool->rejoin());
   9214 }
   9215 
   9216 void CodeGenerator::visitNewPlainObject(LNewPlainObject* lir) {
   9217  Register objReg = ToRegister(lir->output());
   9218  Register temp0Reg = ToRegister(lir->temp0());
   9219  Register temp1Reg = ToRegister(lir->temp1());
   9220  Register shapeReg = ToRegister(lir->temp2());
   9221 
   9222  auto* mir = lir->mir();
   9223  const Shape* shape = mir->shape();
   9224  gc::Heap initialHeap = mir->initialHeap();
   9225  gc::AllocKind allocKind = mir->allocKind();
   9226 
   9227  using Fn =
   9228      JSObject* (*)(JSContext*, Handle<SharedShape*>, gc::AllocKind, gc::Heap);
   9229  OutOfLineCode* ool = oolCallVM<Fn, NewPlainObjectOptimizedFallback>(
   9230      lir,
   9231      ArgList(ImmGCPtr(shape), Imm32(int32_t(allocKind)),
   9232              Imm32(int32_t(initialHeap))),
   9233      StoreRegisterTo(objReg));
   9234 
   9235  bool initContents =
   9236      ShouldInitFixedSlots(gen, lir, shape, mir->numFixedSlots());
   9237 
   9238  masm.movePtr(ImmGCPtr(shape), shapeReg);
   9239  masm.createPlainGCObject(
   9240      objReg, shapeReg, temp0Reg, temp1Reg, mir->numFixedSlots(),
   9241      mir->numDynamicSlots(), allocKind, initialHeap, ool->entry(),
   9242      AllocSiteInput(gc::CatchAllAllocSite::Optimized), initContents);
   9243 
   9244 #ifdef DEBUG
   9245  // ShouldInitFixedSlots expects that the leading GuardShape will never fail,
   9246  // so ensure the newly created object has the correct shape. Should the guard
   9247  // ever fail, we may end up with uninitialized fixed slots, which can confuse
   9248  // the GC.
   9249  Label ok;
   9250  masm.branchTestObjShape(Assembler::Equal, objReg, shape, temp0Reg, objReg,
   9251                          &ok);
   9252  masm.assumeUnreachable("Newly created object has the correct shape");
   9253  masm.bind(&ok);
   9254 #endif
   9255 
   9256  masm.bind(ool->rejoin());
   9257 }
   9258 
   9259 void CodeGenerator::visitNewArrayObject(LNewArrayObject* lir) {
   9260  Register objReg = ToRegister(lir->output());
   9261  Register temp0Reg = ToRegister(lir->temp0());
   9262  Register shapeReg = ToRegister(lir->temp1());
   9263 
   9264  auto* mir = lir->mir();
   9265  uint32_t arrayLength = mir->length();
   9266 
   9267  gc::AllocKind allocKind = GuessArrayGCKind(arrayLength);
   9268  MOZ_ASSERT(gc::GetObjectFinalizeKind(&ArrayObject::class_) ==
   9269             gc::FinalizeKind::None);
   9270  MOZ_ASSERT(!IsFinalizedKind(allocKind));
   9271 
   9272  uint32_t slotCount = GetGCKindSlots(allocKind);
   9273  MOZ_ASSERT(slotCount >= ObjectElements::VALUES_PER_HEADER);
   9274  uint32_t arrayCapacity = slotCount - ObjectElements::VALUES_PER_HEADER;
   9275 
   9276  const Shape* shape = mir->shape();
   9277 
   9278  NewObjectKind objectKind =
   9279      mir->initialHeap() == gc::Heap::Tenured ? TenuredObject : GenericObject;
   9280 
   9281  using Fn =
   9282      ArrayObject* (*)(JSContext*, uint32_t, gc::AllocKind, NewObjectKind);
   9283  OutOfLineCode* ool = oolCallVM<Fn, NewArrayObjectOptimizedFallback>(
   9284      lir,
   9285      ArgList(Imm32(arrayLength), Imm32(int32_t(allocKind)), Imm32(objectKind)),
   9286      StoreRegisterTo(objReg));
   9287 
   9288  masm.movePtr(ImmPtr(shape), shapeReg);
   9289  masm.createArrayWithFixedElements(
   9290      objReg, shapeReg, temp0Reg, InvalidReg, arrayLength, arrayCapacity, 0, 0,
   9291      allocKind, mir->initialHeap(), ool->entry(),
   9292      AllocSiteInput(gc::CatchAllAllocSite::Optimized));
   9293  masm.bind(ool->rejoin());
   9294 }
   9295 
   9296 void CodeGenerator::visitNewNamedLambdaObject(LNewNamedLambdaObject* lir) {
   9297  Register objReg = ToRegister(lir->output());
   9298  Register tempReg = ToRegister(lir->temp0());
   9299  const CompileInfo& info = lir->mir()->block()->info();
   9300  gc::Heap heap = lir->mir()->initialHeap();
   9301 
   9302  using Fn = js::NamedLambdaObject* (*)(JSContext*, HandleFunction, gc::Heap);
   9303  OutOfLineCode* ool = oolCallVM<Fn, NamedLambdaObject::createWithoutEnclosing>(
   9304      lir, ArgList(info.funMaybeLazy(), Imm32(uint32_t(heap))),
   9305      StoreRegisterTo(objReg));
   9306 
   9307  TemplateObject templateObject(lir->mir()->templateObj());
   9308 
   9309  masm.createGCObject(objReg, tempReg, templateObject, heap, ool->entry(),
   9310                      /* initContents = */ true,
   9311                      AllocSiteInput(gc::CatchAllAllocSite::Optimized));
   9312 
   9313  masm.bind(ool->rejoin());
   9314 }
   9315 
   9316 void CodeGenerator::visitNewCallObject(LNewCallObject* lir) {
   9317  Register objReg = ToRegister(lir->output());
   9318  Register tempReg = ToRegister(lir->temp0());
   9319 
   9320  CallObject* templateObj = lir->mir()->templateObject();
   9321  gc::Heap heap = lir->mir()->initialHeap();
   9322 
   9323  // todo: should get a specialized fallback that passes site
   9324  using Fn = CallObject* (*)(JSContext*, Handle<SharedShape*>, gc::Heap);
   9325  OutOfLineCode* ool = oolCallVM<Fn, CallObject::createWithShape>(
   9326      lir, ArgList(ImmGCPtr(templateObj->sharedShape()), Imm32(uint32_t(heap))),
   9327      StoreRegisterTo(objReg));
   9328 
   9329  // Inline call object creation, using the OOL path only for tricky cases.
   9330  TemplateObject templateObject(templateObj);
   9331 
   9332  masm.createGCObject(objReg, tempReg, templateObject, heap, ool->entry(),
   9333                      /* initContents = */ true,
   9334                      AllocSiteInput(gc::CatchAllAllocSite::Optimized));
   9335 
   9336  masm.bind(ool->rejoin());
   9337 }
   9338 
   9339 void CodeGenerator::visitNewMapObject(LNewMapObject* lir) {
   9340  Register output = ToRegister(lir->output());
   9341  Register temp = ToRegister(lir->temp0());
   9342 
   9343  // Note: pass nullptr for |proto| to use |Map.prototype|.
   9344  using Fn = MapObject* (*)(JSContext*, HandleObject);
   9345  auto* ool = oolCallVM<Fn, MapObject::create>(lir, ArgList(ImmPtr(nullptr)),
   9346                                               StoreRegisterTo(output));
   9347 
   9348  TemplateObject templateObject(lir->mir()->templateObject());
   9349  masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
   9350                      ool->entry());
   9351  masm.bind(ool->rejoin());
   9352 }
   9353 
   9354 void CodeGenerator::visitNewSetObject(LNewSetObject* lir) {
   9355  Register output = ToRegister(lir->output());
   9356  Register temp = ToRegister(lir->temp0());
   9357 
   9358  // Note: pass nullptr for |proto| to use |Set.prototype|.
   9359  using Fn = SetObject* (*)(JSContext*, HandleObject);
   9360  auto* ool = oolCallVM<Fn, SetObject::create>(lir, ArgList(ImmPtr(nullptr)),
   9361                                               StoreRegisterTo(output));
   9362 
   9363  TemplateObject templateObject(lir->mir()->templateObject());
   9364  masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
   9365                      ool->entry());
   9366  masm.bind(ool->rejoin());
   9367 }
   9368 
   9369 void CodeGenerator::visitNewMapObjectFromIterable(
   9370    LNewMapObjectFromIterable* lir) {
   9371  ValueOperand iterable = ToValue(lir->iterable());
   9372  Register output = ToRegister(lir->output());
   9373  Register temp1 = ToRegister(lir->temp0());
   9374  Register temp2 = ToRegister(lir->temp1());
   9375 
   9376  // Allocate a new MapObject. If this fails we pass nullptr for
   9377  // allocatedFromJit.
   9378  Label failedAlloc, vmCall, done;
   9379  TemplateObject templateObject(lir->mir()->templateObject());
   9380  masm.createGCObject(temp1, temp2, templateObject, gc::Heap::Default,
   9381                      &failedAlloc);
   9382 
   9383  // We're done if |iterable| is null or undefined.
   9384  masm.branchIfNotNullOrUndefined(iterable, &vmCall);
   9385  masm.movePtr(temp1, output);
   9386  masm.jump(&done);
   9387 
   9388  masm.bind(&failedAlloc);
   9389  masm.movePtr(ImmPtr(nullptr), temp1);
   9390 
   9391  masm.bind(&vmCall);
   9392 
   9393  pushArg(temp1);  // allocatedFromJit
   9394  pushArg(iterable);
   9395  pushArg(ImmPtr(nullptr));  // proto
   9396 
   9397  using Fn = MapObject* (*)(JSContext*, Handle<JSObject*>, Handle<Value>,
   9398                            Handle<MapObject*>);
   9399  callVM<Fn, MapObject::createFromIterable>(lir);
   9400 
   9401  masm.bind(&done);
   9402 }
   9403 
   9404 void CodeGenerator::visitNewSetObjectFromIterable(
   9405    LNewSetObjectFromIterable* lir) {
   9406  ValueOperand iterable = ToValue(lir->iterable());
   9407  Register output = ToRegister(lir->output());
   9408  Register temp1 = ToRegister(lir->temp0());
   9409  Register temp2 = ToRegister(lir->temp1());
   9410 
   9411  // Allocate a new SetObject. If this fails we pass nullptr for
   9412  // allocatedFromJit.
   9413  Label failedAlloc, vmCall, done;
   9414  TemplateObject templateObject(lir->mir()->templateObject());
   9415  masm.createGCObject(temp1, temp2, templateObject, gc::Heap::Default,
   9416                      &failedAlloc);
   9417 
   9418  // We're done if |iterable| is null or undefined.
   9419  masm.branchIfNotNullOrUndefined(iterable, &vmCall);
   9420  masm.movePtr(temp1, output);
   9421  masm.jump(&done);
   9422 
   9423  masm.bind(&failedAlloc);
   9424  masm.movePtr(ImmPtr(nullptr), temp1);
   9425 
   9426  masm.bind(&vmCall);
   9427 
   9428  pushArg(temp1);  // allocatedFromJit
   9429  pushArg(iterable);
   9430  pushArg(ImmPtr(nullptr));  // proto
   9431 
   9432  using Fn = SetObject* (*)(JSContext*, Handle<JSObject*>, Handle<Value>,
   9433                            Handle<SetObject*>);
   9434  callVM<Fn, SetObject::createFromIterable>(lir);
   9435 
   9436  masm.bind(&done);
   9437 }
   9438 
   9439 void CodeGenerator::visitNewStringObject(LNewStringObject* lir) {
   9440  Register input = ToRegister(lir->input());
   9441  Register output = ToRegister(lir->output());
   9442  Register temp = ToRegister(lir->temp0());
   9443 
   9444  StringObject* templateObj = lir->mir()->templateObj();
   9445 
   9446  using Fn = JSObject* (*)(JSContext*, HandleString);
   9447  OutOfLineCode* ool = oolCallVM<Fn, NewStringObject>(lir, ArgList(input),
   9448                                                      StoreRegisterTo(output));
   9449 
   9450  TemplateObject templateObject(templateObj);
   9451  masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
   9452                      ool->entry());
   9453 
   9454  masm.loadStringLength(input, temp);
   9455 
   9456  masm.storeValue(JSVAL_TYPE_STRING, input,
   9457                  Address(output, StringObject::offsetOfPrimitiveValue()));
   9458  masm.storeValue(JSVAL_TYPE_INT32, temp,
   9459                  Address(output, StringObject::offsetOfLength()));
   9460 
   9461  masm.bind(ool->rejoin());
   9462 }
   9463 
   9464 void CodeGenerator::visitInitElemGetterSetter(LInitElemGetterSetter* lir) {
   9465  Register obj = ToRegister(lir->object());
   9466  Register value = ToRegister(lir->value());
   9467 
   9468  pushArg(value);
   9469  pushArg(ToValue(lir->id()));
   9470  pushArg(obj);
   9471  pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
   9472 
   9473  using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject, HandleValue,
   9474                      HandleObject);
   9475  callVM<Fn, InitElemGetterSetterOperation>(lir);
   9476 }
   9477 
   9478 void CodeGenerator::visitMutateProto(LMutateProto* lir) {
   9479  Register objReg = ToRegister(lir->object());
   9480 
   9481  pushArg(ToValue(lir->value()));
   9482  pushArg(objReg);
   9483 
   9484  using Fn =
   9485      bool (*)(JSContext* cx, Handle<PlainObject*> obj, HandleValue value);
   9486  callVM<Fn, MutatePrototype>(lir);
   9487 }
   9488 
   9489 void CodeGenerator::visitInitPropGetterSetter(LInitPropGetterSetter* lir) {
   9490  Register obj = ToRegister(lir->object());
   9491  Register value = ToRegister(lir->value());
   9492 
   9493  pushArg(value);
   9494  pushArg(ImmGCPtr(lir->mir()->name()));
   9495  pushArg(obj);
   9496  pushArg(ImmPtr(lir->mir()->resumePoint()->pc()));
   9497 
   9498  using Fn = bool (*)(JSContext*, jsbytecode*, HandleObject,
   9499                      Handle<PropertyName*>, HandleObject);
   9500  callVM<Fn, InitPropGetterSetterOperation>(lir);
   9501 }
   9502 
   9503 void CodeGenerator::visitCreateThis(LCreateThis* lir) {
   9504  const LAllocation* callee = lir->callee();
   9505  const LAllocation* newTarget = lir->newTarget();
   9506 
   9507  if (newTarget->isConstant()) {
   9508    pushArg(ImmGCPtr(&newTarget->toConstant()->toObject()));
   9509  } else {
   9510    pushArg(ToRegister(newTarget));
   9511  }
   9512 
   9513  if (callee->isConstant()) {
   9514    pushArg(ImmGCPtr(&callee->toConstant()->toObject()));
   9515  } else {
   9516    pushArg(ToRegister(callee));
   9517  }
   9518 
   9519  using Fn = bool (*)(JSContext* cx, HandleObject callee,
   9520                      HandleObject newTarget, MutableHandleValue rval);
   9521  callVM<Fn, jit::CreateThisFromIon>(lir);
   9522 }
   9523 
   9524 void CodeGenerator::visitCreateArgumentsObject(LCreateArgumentsObject* lir) {
   9525  // This should be getting constructed in the first block only, and not any OSR
   9526  // entry blocks.
   9527  MOZ_ASSERT(lir->mir()->block()->id() == 0);
   9528 
   9529  Register callObj = ToRegister(lir->callObject());
   9530  Register temp0 = ToRegister(lir->temp0());
   9531  Label done;
   9532 
   9533  if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
   9534    Register objTemp = ToRegister(lir->temp1());
   9535    Register cxTemp = ToRegister(lir->temp2());
   9536 
   9537    masm.Push(callObj);
   9538 
   9539    // Try to allocate an arguments object. This will leave the reserved
   9540    // slots uninitialized, so it's important we don't GC until we
   9541    // initialize these slots in ArgumentsObject::finishForIonPure.
   9542    Label failure;
   9543    TemplateObject templateObject(templateObj);
   9544    masm.createGCObject(objTemp, temp0, templateObject, gc::Heap::Default,
   9545                        &failure,
   9546                        /* initContents = */ false);
   9547 
   9548    masm.moveStackPtrTo(temp0);
   9549    masm.addPtr(Imm32(masm.framePushed()), temp0);
   9550 
   9551    using Fn =
   9552        ArgumentsObject* (*)(JSContext * cx, jit::JitFrameLayout * frame,
   9553                             JSObject * scopeChain, ArgumentsObject * obj);
   9554    masm.setupAlignedABICall();
   9555    masm.loadJSContext(cxTemp);
   9556    masm.passABIArg(cxTemp);
   9557    masm.passABIArg(temp0);
   9558    masm.passABIArg(callObj);
   9559    masm.passABIArg(objTemp);
   9560 
   9561    masm.callWithABI<Fn, ArgumentsObject::finishForIonPure>();
   9562    masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
   9563 
   9564    // Discard saved callObj on the stack.
   9565    masm.addToStackPtr(Imm32(sizeof(uintptr_t)));
   9566    masm.jump(&done);
   9567 
   9568    masm.bind(&failure);
   9569    masm.Pop(callObj);
   9570  }
   9571 
   9572  masm.moveStackPtrTo(temp0);
   9573  masm.addPtr(Imm32(frameSize()), temp0);
   9574 
   9575  pushArg(callObj);
   9576  pushArg(temp0);
   9577 
   9578  using Fn = ArgumentsObject* (*)(JSContext*, JitFrameLayout*, HandleObject);
   9579  callVM<Fn, ArgumentsObject::createForIon>(lir);
   9580 
   9581  masm.bind(&done);
   9582 }
   9583 
   9584 void CodeGenerator::visitCreateInlinedArgumentsObject(
   9585    LCreateInlinedArgumentsObject* lir) {
   9586  Register callObj = ToRegister(lir->getCallObject());
   9587  Register callee = ToRegister(lir->getCallee());
   9588  Register argsAddress = ToRegister(lir->temp1());
   9589  Register argsObj = ToRegister(lir->temp2());
   9590 
   9591  // TODO: Do we have to worry about alignment here?
   9592 
   9593  // Create a contiguous array of values for ArgumentsObject::create
   9594  // by pushing the arguments onto the stack in reverse order.
   9595  uint32_t argc = lir->mir()->numActuals();
   9596  for (uint32_t i = 0; i < argc; i++) {
   9597    uint32_t argNum = argc - i - 1;
   9598    uint32_t index = LCreateInlinedArgumentsObject::ArgIndex(argNum);
   9599    ConstantOrRegister arg =
   9600        toConstantOrRegister(lir, index, lir->mir()->getArg(argNum)->type());
   9601    masm.Push(arg);
   9602  }
   9603  masm.moveStackPtrTo(argsAddress);
   9604 
   9605  Label done;
   9606  if (ArgumentsObject* templateObj = lir->mir()->templateObject()) {
   9607    LiveRegisterSet liveRegs;
   9608    liveRegs.add(callObj);
   9609    liveRegs.add(callee);
   9610 
   9611    masm.PushRegsInMask(liveRegs);
   9612 
   9613    // We are free to clobber all registers, as LCreateInlinedArgumentsObject is
   9614    // a call instruction.
   9615    AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
   9616    allRegs.take(callObj);
   9617    allRegs.take(callee);
   9618    allRegs.take(argsObj);
   9619    allRegs.take(argsAddress);
   9620 
   9621    Register temp3 = allRegs.takeAny();
   9622    Register temp4 = allRegs.takeAny();
   9623 
   9624    // Try to allocate an arguments object. This will leave the reserved slots
   9625    // uninitialized, so it's important we don't GC until we initialize these
   9626    // slots in ArgumentsObject::finishForIonPure.
   9627    Label failure;
   9628    TemplateObject templateObject(templateObj);
   9629    masm.createGCObject(argsObj, temp3, templateObject, gc::Heap::Default,
   9630                        &failure,
   9631                        /* initContents = */ false);
   9632 
   9633    Register numActuals = temp3;
   9634    masm.move32(Imm32(argc), numActuals);
   9635 
   9636    using Fn = ArgumentsObject* (*)(JSContext*, JSObject*, JSFunction*, Value*,
   9637                                    uint32_t, ArgumentsObject*);
   9638    masm.setupAlignedABICall();
   9639    masm.loadJSContext(temp4);
   9640    masm.passABIArg(temp4);
   9641    masm.passABIArg(callObj);
   9642    masm.passABIArg(callee);
   9643    masm.passABIArg(argsAddress);
   9644    masm.passABIArg(numActuals);
   9645    masm.passABIArg(argsObj);
   9646 
   9647    masm.callWithABI<Fn, ArgumentsObject::finishInlineForIonPure>();
   9648    masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure);
   9649 
   9650    // Discard saved callObj, callee, and values array on the stack.
   9651    masm.addToStackPtr(
   9652        Imm32(MacroAssembler::PushRegsInMaskSizeInBytes(liveRegs) +
   9653              argc * sizeof(Value)));
   9654    masm.jump(&done);
   9655 
   9656    masm.bind(&failure);
   9657    masm.PopRegsInMask(liveRegs);
   9658 
   9659    // Reload argsAddress because it may have been overridden.
   9660    masm.moveStackPtrTo(argsAddress);
   9661  }
   9662 
   9663  pushArg(Imm32(argc));
   9664  pushArg(callObj);
   9665  pushArg(callee);
   9666  pushArg(argsAddress);
   9667 
   9668  using Fn = ArgumentsObject* (*)(JSContext*, Value*, HandleFunction,
   9669                                  HandleObject, uint32_t);
   9670  callVM<Fn, ArgumentsObject::createForInlinedIon>(lir);
   9671 
   9672  // Discard the array of values.
   9673  masm.freeStack(argc * sizeof(Value));
   9674 
   9675  masm.bind(&done);
   9676 }
   9677 
   9678 template <class GetInlinedArgument>
   9679 void CodeGenerator::emitGetInlinedArgument(GetInlinedArgument* lir,
   9680                                           Register index,
   9681                                           ValueOperand output) {
   9682  uint32_t numActuals = lir->mir()->numActuals();
   9683  MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
   9684 
   9685  // The index has already been bounds-checked, so the code we
   9686  // generate here should be unreachable. We can end up in this
   9687  // situation in self-hosted code using GetArgument(), or in a
   9688  // monomorphically inlined function if we've inlined some CacheIR
   9689  // that was created for a different caller.
   9690  if (numActuals == 0) {
   9691    masm.assumeUnreachable("LGetInlinedArgument: invalid index");
   9692    return;
   9693  }
   9694 
   9695  // Check the first n-1 possible indices.
   9696  Label done;
   9697  for (uint32_t i = 0; i < numActuals - 1; i++) {
   9698    Label skip;
   9699    ConstantOrRegister arg = toConstantOrRegister(
   9700        lir, GetInlinedArgument::ArgIndex(i), lir->mir()->getArg(i)->type());
   9701    masm.branch32(Assembler::NotEqual, index, Imm32(i), &skip);
   9702    masm.moveValue(arg, output);
   9703 
   9704    masm.jump(&done);
   9705    masm.bind(&skip);
   9706  }
   9707 
   9708 #ifdef DEBUG
   9709  Label skip;
   9710  masm.branch32(Assembler::Equal, index, Imm32(numActuals - 1), &skip);
   9711  masm.assumeUnreachable("LGetInlinedArgument: invalid index");
   9712  masm.bind(&skip);
   9713 #endif
   9714 
   9715  // The index has already been bounds-checked, so load the last argument.
   9716  uint32_t lastIdx = numActuals - 1;
   9717  ConstantOrRegister arg =
   9718      toConstantOrRegister(lir, GetInlinedArgument::ArgIndex(lastIdx),
   9719                           lir->mir()->getArg(lastIdx)->type());
   9720  masm.moveValue(arg, output);
   9721  masm.bind(&done);
   9722 }
   9723 
   9724 void CodeGenerator::visitGetInlinedArgument(LGetInlinedArgument* lir) {
   9725  Register index = ToRegister(lir->getIndex());
   9726  ValueOperand output = ToOutValue(lir);
   9727 
   9728  emitGetInlinedArgument(lir, index, output);
   9729 }
   9730 
   9731 void CodeGenerator::visitGetInlinedArgumentHole(LGetInlinedArgumentHole* lir) {
   9732  Register index = ToRegister(lir->getIndex());
   9733  ValueOperand output = ToOutValue(lir);
   9734 
   9735  uint32_t numActuals = lir->mir()->numActuals();
   9736 
   9737  if (numActuals == 0) {
   9738    bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
   9739    masm.moveValue(UndefinedValue(), output);
   9740    return;
   9741  }
   9742 
   9743  Label outOfBounds, done;
   9744  masm.branch32(Assembler::AboveOrEqual, index, Imm32(numActuals),
   9745                &outOfBounds);
   9746 
   9747  emitGetInlinedArgument(lir, index, output);
   9748  masm.jump(&done);
   9749 
   9750  masm.bind(&outOfBounds);
   9751  bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
   9752  masm.moveValue(UndefinedValue(), output);
   9753 
   9754  masm.bind(&done);
   9755 }
   9756 
   9757 void CodeGenerator::visitGetArgumentsObjectArg(LGetArgumentsObjectArg* lir) {
   9758  Register temp = ToRegister(lir->temp0());
   9759  Register argsObj = ToRegister(lir->argsObject());
   9760  ValueOperand out = ToOutValue(lir);
   9761 
   9762  masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
   9763                   temp);
   9764  Address argAddr(temp, ArgumentsData::offsetOfArgs() +
   9765                            lir->mir()->argno() * sizeof(Value));
   9766  masm.loadValue(argAddr, out);
   9767 #ifdef DEBUG
   9768  Label success;
   9769  masm.branchTestMagic(Assembler::NotEqual, out, &success);
   9770  masm.assumeUnreachable(
   9771      "Result from ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
   9772  masm.bind(&success);
   9773 #endif
   9774 }
   9775 
   9776 void CodeGenerator::visitSetArgumentsObjectArg(LSetArgumentsObjectArg* lir) {
   9777  Register temp = ToRegister(lir->temp0());
   9778  Register argsObj = ToRegister(lir->argsObject());
   9779  ValueOperand value = ToValue(lir->value());
   9780 
   9781  masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()),
   9782                   temp);
   9783  Address argAddr(temp, ArgumentsData::offsetOfArgs() +
   9784                            lir->mir()->argno() * sizeof(Value));
   9785  emitPreBarrier(argAddr);
   9786 #ifdef DEBUG
   9787  Label success;
   9788  masm.branchTestMagic(Assembler::NotEqual, argAddr, &success);
   9789  masm.assumeUnreachable(
   9790      "Result in ArgumentObject shouldn't be JSVAL_TYPE_MAGIC.");
   9791  masm.bind(&success);
   9792 #endif
   9793  masm.storeValue(value, argAddr);
   9794 }
   9795 
   9796 void CodeGenerator::visitLoadArgumentsObjectArg(LLoadArgumentsObjectArg* lir) {
   9797  Register temp = ToRegister(lir->temp0());
   9798  Register argsObj = ToRegister(lir->argsObject());
   9799  Register index = ToRegister(lir->index());
   9800  ValueOperand out = ToOutValue(lir);
   9801 
   9802  Label bail;
   9803  masm.loadArgumentsObjectElement(argsObj, index, out, temp, &bail);
   9804  bailoutFrom(&bail, lir->snapshot());
   9805 }
   9806 
   9807 void CodeGenerator::visitLoadArgumentsObjectArgHole(
   9808    LLoadArgumentsObjectArgHole* lir) {
   9809  Register temp = ToRegister(lir->temp0());
   9810  Register argsObj = ToRegister(lir->argsObject());
   9811  Register index = ToRegister(lir->index());
   9812  ValueOperand out = ToOutValue(lir);
   9813 
   9814  Label bail;
   9815  masm.loadArgumentsObjectElementHole(argsObj, index, out, temp, &bail);
   9816  bailoutFrom(&bail, lir->snapshot());
   9817 }
   9818 
   9819 void CodeGenerator::visitInArgumentsObjectArg(LInArgumentsObjectArg* lir) {
   9820  Register temp = ToRegister(lir->temp0());
   9821  Register argsObj = ToRegister(lir->argsObject());
   9822  Register index = ToRegister(lir->index());
   9823  Register out = ToRegister(lir->output());
   9824 
   9825  Label bail;
   9826  masm.loadArgumentsObjectElementExists(argsObj, index, out, temp, &bail);
   9827  bailoutFrom(&bail, lir->snapshot());
   9828 }
   9829 
   9830 void CodeGenerator::visitArgumentsObjectLength(LArgumentsObjectLength* lir) {
   9831  Register argsObj = ToRegister(lir->argsObject());
   9832  Register out = ToRegister(lir->output());
   9833 
   9834  Label bail;
   9835  masm.loadArgumentsObjectLength(argsObj, out, &bail);
   9836  bailoutFrom(&bail, lir->snapshot());
   9837 }
   9838 
   9839 void CodeGenerator::visitArrayFromArgumentsObject(
   9840    LArrayFromArgumentsObject* lir) {
   9841  pushArg(ToRegister(lir->argsObject()));
   9842 
   9843  using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
   9844  callVM<Fn, js::ArrayFromArgumentsObject>(lir);
   9845 }
   9846 
   9847 void CodeGenerator::visitGuardArgumentsObjectFlags(
   9848    LGuardArgumentsObjectFlags* lir) {
   9849  Register argsObj = ToRegister(lir->argsObject());
   9850  Register temp = ToRegister(lir->temp0());
   9851 
   9852  Label bail;
   9853  masm.branchTestArgumentsObjectFlags(argsObj, temp, lir->mir()->flags(),
   9854                                      Assembler::NonZero, &bail);
   9855  bailoutFrom(&bail, lir->snapshot());
   9856 }
   9857 
   9858 void CodeGenerator::visitGuardObjectHasSameRealm(
   9859    LGuardObjectHasSameRealm* lir) {
   9860  Register obj = ToRegister(lir->object());
   9861  Register temp = ToRegister(lir->temp0());
   9862 
   9863  Label bail;
   9864  masm.guardObjectHasSameRealm(obj, temp, &bail);
   9865  bailoutFrom(&bail, lir->snapshot());
   9866 }
   9867 
   9868 void CodeGenerator::visitBoundFunctionNumArgs(LBoundFunctionNumArgs* lir) {
   9869  Register obj = ToRegister(lir->object());
   9870  Register output = ToRegister(lir->output());
   9871 
   9872  masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
   9873                  output);
   9874  masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
   9875 }
   9876 
   9877 void CodeGenerator::visitGuardBoundFunctionIsConstructor(
   9878    LGuardBoundFunctionIsConstructor* lir) {
   9879  Register obj = ToRegister(lir->object());
   9880 
   9881  Label bail;
   9882  Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
   9883  masm.branchTest32(Assembler::Zero, flagsSlot,
   9884                    Imm32(BoundFunctionObject::IsConstructorFlag), &bail);
   9885  bailoutFrom(&bail, lir->snapshot());
   9886 }
   9887 
   9888 void CodeGenerator::visitReturnFromCtor(LReturnFromCtor* lir) {
   9889  ValueOperand value = ToValue(lir->value());
   9890  Register obj = ToRegister(lir->object());
   9891  Register output = ToRegister(lir->output());
   9892 
   9893  Label valueIsObject, end;
   9894 
   9895  masm.branchTestObject(Assembler::Equal, value, &valueIsObject);
   9896 
   9897  // Value is not an object. Return that other object.
   9898  masm.movePtr(obj, output);
   9899  masm.jump(&end);
   9900 
   9901  // Value is an object. Return unbox(Value).
   9902  masm.bind(&valueIsObject);
   9903  Register payload = masm.extractObject(value, output);
   9904  if (payload != output) {
   9905    masm.movePtr(payload, output);
   9906  }
   9907 
   9908  masm.bind(&end);
   9909 }
   9910 
   9911 void CodeGenerator::visitBoxNonStrictThis(LBoxNonStrictThis* lir) {
   9912  ValueOperand value = ToValue(lir->value());
   9913  Register output = ToRegister(lir->output());
   9914 
   9915  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
   9916    Label notNullOrUndefined;
   9917    {
   9918      Label isNullOrUndefined;
   9919      ScratchTagScope tag(masm, value);
   9920      masm.splitTagForTest(value, tag);
   9921      masm.branchTestUndefined(Assembler::Equal, tag, &isNullOrUndefined);
   9922      masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
   9923      masm.bind(&isNullOrUndefined);
   9924      masm.movePtr(ImmGCPtr(lir->mir()->globalThis()), output);
   9925      masm.jump(ool.rejoin());
   9926    }
   9927 
   9928    masm.bind(&notNullOrUndefined);
   9929 
   9930    saveLive(lir);
   9931 
   9932    pushArg(value);
   9933    using Fn = JSObject* (*)(JSContext*, HandleValue);
   9934    callVM<Fn, BoxNonStrictThis>(lir);
   9935 
   9936    StoreRegisterTo(output).generate(this);
   9937    restoreLiveIgnore(lir, StoreRegisterTo(output).clobbered());
   9938 
   9939    masm.jump(ool.rejoin());
   9940  });
   9941  addOutOfLineCode(ool, lir->mir());
   9942 
   9943  masm.fallibleUnboxObject(value, output, ool->entry());
   9944  masm.bind(ool->rejoin());
   9945 }
   9946 
   9947 void CodeGenerator::visitImplicitThis(LImplicitThis* lir) {
   9948  Register env = ToRegister(lir->env());
   9949  ValueOperand output = ToOutValue(lir);
   9950 
   9951  using Fn = void (*)(JSContext*, HandleObject, MutableHandleValue);
   9952  auto* ool = oolCallVM<Fn, ImplicitThisOperation>(lir, ArgList(env),
   9953                                                   StoreValueTo(output));
   9954 
   9955  masm.computeImplicitThis(env, output, ool->entry());
   9956  masm.bind(ool->rejoin());
   9957 }
   9958 
   9959 void CodeGenerator::visitArrayLength(LArrayLength* lir) {
   9960  Register elements = ToRegister(lir->elements());
   9961  Register output = ToRegister(lir->output());
   9962 
   9963  Address length(elements, ObjectElements::offsetOfLength());
   9964  masm.load32(length, output);
   9965 
   9966  bool intact = hasSeenArrayExceedsInt32LengthFuseIntactAndDependencyNoted();
   9967 
   9968  if (intact) {
   9969 #ifdef DEBUG
   9970    Label done;
   9971    masm.branchTest32(Assembler::NotSigned, output, output, &done);
   9972    masm.assumeUnreachable("Unexpected array with length > INT32_MAX");
   9973    masm.bind(&done);
   9974 #endif
   9975  } else {
   9976    // Bail out if the length doesn't fit in int32.
   9977    bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
   9978  }
   9979 }
   9980 
   9981 static void SetLengthFromIndex(MacroAssembler& masm, const LAllocation* index,
   9982                               const Address& length) {
   9983  if (index->isConstant()) {
   9984    masm.store32(Imm32(ToInt32(index) + 1), length);
   9985  } else {
   9986    Register newLength = ToRegister(index);
   9987    masm.add32(Imm32(1), newLength);
   9988    masm.store32(newLength, length);
   9989    masm.sub32(Imm32(1), newLength);
   9990  }
   9991 }
   9992 
   9993 void CodeGenerator::visitSetArrayLength(LSetArrayLength* lir) {
   9994  Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength());
   9995  SetLengthFromIndex(masm, lir->index(), length);
   9996 }
   9997 
   9998 void CodeGenerator::visitFunctionLength(LFunctionLength* lir) {
   9999  Register function = ToRegister(lir->function());
  10000  Register output = ToRegister(lir->output());
  10001 
  10002  Label bail;
  10003 
  10004  // Get the JSFunction flags.
  10005  masm.load32(Address(function, JSFunction::offsetOfFlagsAndArgCount()),
  10006              output);
  10007 
  10008  // Functions with a SelfHostedLazyScript must be compiled with the slow-path
  10009  // before the function length is known. If the length was previously resolved,
  10010  // the length property may be shadowed.
  10011  masm.branchTest32(
  10012      Assembler::NonZero, output,
  10013      Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
  10014      &bail);
  10015 
  10016  masm.loadFunctionLength(function, output, output, &bail);
  10017 
  10018  bailoutFrom(&bail, lir->snapshot());
  10019 }
  10020 
  10021 void CodeGenerator::visitFunctionName(LFunctionName* lir) {
  10022  Register function = ToRegister(lir->function());
  10023  Register output = ToRegister(lir->output());
  10024 
  10025  Label bail;
  10026 
  10027  const JSAtomState& names = gen->runtime->names();
  10028  masm.loadFunctionName(function, output, ImmGCPtr(names.empty_), &bail);
  10029 
  10030  bailoutFrom(&bail, lir->snapshot());
  10031 }
  10032 
  10033 template <class TableObject>
  10034 static void TableIteratorLoadEntry(MacroAssembler&, Register, Register,
  10035                                   Register);
  10036 
  10037 template <>
  10038 void TableIteratorLoadEntry<MapObject>(MacroAssembler& masm, Register iter,
  10039                                       Register i, Register front) {
  10040  masm.unboxObject(Address(iter, MapIteratorObject::offsetOfTarget()), front);
  10041  masm.loadPrivate(Address(front, MapObject::offsetOfData()), front);
  10042 
  10043  static_assert(MapObject::Table::offsetOfImplDataElement() == 0,
  10044                "offsetof(Data, element) is 0");
  10045  static_assert(MapObject::Table::sizeofImplData() == 24, "sizeof(Data) is 24");
  10046  masm.mulBy3(i, i);
  10047  masm.lshiftPtr(Imm32(3), i);
  10048  masm.addPtr(i, front);
  10049 }
  10050 
  10051 template <>
  10052 void TableIteratorLoadEntry<SetObject>(MacroAssembler& masm, Register iter,
  10053                                       Register i, Register front) {
  10054  masm.unboxObject(Address(iter, SetIteratorObject::offsetOfTarget()), front);
  10055  masm.loadPrivate(Address(front, SetObject::offsetOfData()), front);
  10056 
  10057  static_assert(SetObject::Table::offsetOfImplDataElement() == 0,
  10058                "offsetof(Data, element) is 0");
  10059  static_assert(SetObject::Table::sizeofImplData() == 16, "sizeof(Data) is 16");
  10060  masm.lshiftPtr(Imm32(4), i);
  10061  masm.addPtr(i, front);
  10062 }
  10063 
  10064 template <class TableObject>
  10065 static void TableIteratorAdvance(MacroAssembler& masm, Register iter,
  10066                                 Register front, Register dataLength,
  10067                                 Register temp) {
  10068  Register i = temp;
  10069 
  10070  // Note: |count| and |index| are stored as PrivateUint32Value. We use add32
  10071  // and store32 to change the payload.
  10072  masm.add32(Imm32(1), Address(iter, TableIteratorObject::offsetOfCount()));
  10073 
  10074  masm.unboxInt32(Address(iter, TableIteratorObject::offsetOfIndex()), i);
  10075 
  10076  Label done, seek;
  10077  masm.bind(&seek);
  10078  masm.add32(Imm32(1), i);
  10079  masm.branch32(Assembler::AboveOrEqual, i, dataLength, &done);
  10080 
  10081  // We can add sizeof(Data) to |front| to select the next element, because
  10082  // |front| and |mapOrSetObject.data[i]| point to the same location.
  10083  static_assert(TableObject::Table::offsetOfImplDataElement() == 0,
  10084                "offsetof(Data, element) is 0");
  10085  masm.addPtr(Imm32(TableObject::Table::sizeofImplData()), front);
  10086 
  10087  masm.branchTestMagic(Assembler::Equal,
  10088                       Address(front, TableObject::Table::offsetOfEntryKey()),
  10089                       JS_HASH_KEY_EMPTY, &seek);
  10090 
  10091  masm.bind(&done);
  10092  masm.store32(i, Address(iter, TableIteratorObject::offsetOfIndex()));
  10093 }
  10094 
  10095 // Corresponds to TableIteratorObject::finish.
  10096 static void TableIteratorFinish(MacroAssembler& masm, Register iter,
  10097                                Register temp0, Register temp1) {
  10098  Register next = temp0;
  10099  Register prevp = temp1;
  10100  masm.loadPrivate(Address(iter, TableIteratorObject::offsetOfNext()), next);
  10101  masm.loadPrivate(Address(iter, TableIteratorObject::offsetOfPrevPtr()),
  10102                   prevp);
  10103  masm.storePtr(next, Address(prevp, 0));
  10104 
  10105  Label hasNoNext;
  10106  masm.branchTestPtr(Assembler::Zero, next, next, &hasNoNext);
  10107  masm.storePrivateValue(prevp,
  10108                         Address(next, TableIteratorObject::offsetOfPrevPtr()));
  10109  masm.bind(&hasNoNext);
  10110 
  10111  // Mark iterator inactive.
  10112  Address targetAddr(iter, TableIteratorObject::offsetOfTarget());
  10113  masm.guardedCallPreBarrier(targetAddr, MIRType::Value);
  10114  masm.storeValue(UndefinedValue(), targetAddr);
  10115 }
  10116 
  10117 template <>
  10118 void CodeGenerator::emitLoadIteratorValues<MapObject>(Register result,
  10119                                                      Register temp,
  10120                                                      Register front) {
  10121  size_t elementsOffset = NativeObject::offsetOfFixedElements();
  10122 
  10123  Address keyAddress(front, MapObject::Table::Entry::offsetOfKey());
  10124  Address valueAddress(front, MapObject::Table::Entry::offsetOfValue());
  10125  Address keyElemAddress(result, elementsOffset);
  10126  Address valueElemAddress(result, elementsOffset + sizeof(Value));
  10127  masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
  10128  masm.guardedCallPreBarrier(valueElemAddress, MIRType::Value);
  10129  masm.storeValue(keyAddress, keyElemAddress, temp);
  10130  masm.storeValue(valueAddress, valueElemAddress, temp);
  10131 
  10132  Label emitBarrier, skipBarrier;
  10133  masm.branchValueIsNurseryCell(Assembler::Equal, keyAddress, temp,
  10134                                &emitBarrier);
  10135  masm.branchValueIsNurseryCell(Assembler::NotEqual, valueAddress, temp,
  10136                                &skipBarrier);
  10137  {
  10138    masm.bind(&emitBarrier);
  10139    saveVolatile(temp);
  10140    emitPostWriteBarrier(result);
  10141    restoreVolatile(temp);
  10142  }
  10143  masm.bind(&skipBarrier);
  10144 }
  10145 
  10146 template <>
  10147 void CodeGenerator::emitLoadIteratorValues<SetObject>(Register result,
  10148                                                      Register temp,
  10149                                                      Register front) {
  10150  size_t elementsOffset = NativeObject::offsetOfFixedElements();
  10151 
  10152  Address keyAddress(front, SetObject::Table::offsetOfEntryKey());
  10153  Address keyElemAddress(result, elementsOffset);
  10154  masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
  10155  masm.storeValue(keyAddress, keyElemAddress, temp);
  10156 
  10157  Label skipBarrier;
  10158  masm.branchValueIsNurseryCell(Assembler::NotEqual, keyAddress, temp,
  10159                                &skipBarrier);
  10160  {
  10161    saveVolatile(temp);
  10162    emitPostWriteBarrier(result);
  10163    restoreVolatile(temp);
  10164  }
  10165  masm.bind(&skipBarrier);
  10166 }
  10167 
  10168 template <class IteratorObject, class TableObject>
  10169 void CodeGenerator::emitGetNextEntryForIterator(LGetNextEntryForIterator* lir) {
  10170  Register iter = ToRegister(lir->iter());
  10171  Register result = ToRegister(lir->result());
  10172  Register temp = ToRegister(lir->temp0());
  10173  Register dataLength = ToRegister(lir->temp1());
  10174  Register front = ToRegister(lir->temp2());
  10175  Register output = ToRegister(lir->output());
  10176 
  10177 #ifdef DEBUG
  10178  // Self-hosted code is responsible for ensuring GetNextEntryForIterator is
  10179  // only called with the correct iterator class. Assert here all self-
  10180  // hosted callers of GetNextEntryForIterator perform this class check.
  10181  // No Spectre mitigations are needed because this is DEBUG-only code.
  10182  Label success;
  10183  masm.branchTestObjClassNoSpectreMitigations(
  10184      Assembler::Equal, iter, &IteratorObject::class_, temp, &success);
  10185  masm.assumeUnreachable("Iterator object should have the correct class.");
  10186  masm.bind(&success);
  10187 #endif
  10188 
  10189  // If the iterator has no target, it's already done.
  10190  // See TableIteratorObject::isActive.
  10191  Label iterAlreadyDone, iterDone, done;
  10192  masm.branchTestUndefined(Assembler::Equal,
  10193                           Address(iter, IteratorObject::offsetOfTarget()),
  10194                           &iterAlreadyDone);
  10195 
  10196  // Load |iter->index| in |temp| and |iter->target->dataLength| in
  10197  // |dataLength|. Both values are stored as PrivateUint32Value.
  10198  masm.unboxInt32(Address(iter, IteratorObject::offsetOfIndex()), temp);
  10199  masm.unboxObject(Address(iter, IteratorObject::offsetOfTarget()), dataLength);
  10200  masm.unboxInt32(Address(dataLength, TableObject::offsetOfDataLength()),
  10201                  dataLength);
  10202  masm.branch32(Assembler::AboveOrEqual, temp, dataLength, &iterDone);
  10203  {
  10204    TableIteratorLoadEntry<TableObject>(masm, iter, temp, front);
  10205 
  10206    emitLoadIteratorValues<TableObject>(result, temp, front);
  10207 
  10208    TableIteratorAdvance<TableObject>(masm, iter, front, dataLength, temp);
  10209 
  10210    masm.move32(Imm32(0), output);
  10211    masm.jump(&done);
  10212  }
  10213  {
  10214    masm.bind(&iterDone);
  10215    TableIteratorFinish(masm, iter, temp, dataLength);
  10216 
  10217    masm.bind(&iterAlreadyDone);
  10218    masm.move32(Imm32(1), output);
  10219  }
  10220  masm.bind(&done);
  10221 }
  10222 
  10223 void CodeGenerator::visitGetNextEntryForIterator(
  10224    LGetNextEntryForIterator* lir) {
  10225  if (lir->mir()->mode() == MGetNextEntryForIterator::Map) {
  10226    emitGetNextEntryForIterator<MapIteratorObject, MapObject>(lir);
  10227  } else {
  10228    MOZ_ASSERT(lir->mir()->mode() == MGetNextEntryForIterator::Set);
  10229    emitGetNextEntryForIterator<SetIteratorObject, SetObject>(lir);
  10230  }
  10231 }
  10232 
  10233 // The point of these is to inform Ion of where these values already are; they
  10234 // don't normally generate (much) code.
  10235 void CodeGenerator::visitWasmRegisterPairResult(LWasmRegisterPairResult* lir) {}
  10236 void CodeGenerator::visitWasmStackResult(LWasmStackResult* lir) {}
  10237 void CodeGenerator::visitWasmStackResult64(LWasmStackResult64* lir) {}
  10238 
  10239 void CodeGenerator::visitWasmStackResultArea(LWasmStackResultArea* lir) {
  10240  LAllocation* output = lir->getDef(0)->output();
  10241  MOZ_ASSERT(output->isStackArea());
  10242  bool tempInit = false;
  10243  for (auto iter = output->toStackArea()->results(); iter; iter.next()) {
  10244    // Zero out ref stack results.
  10245    if (iter.isWasmAnyRef()) {
  10246      Register temp = ToRegister(lir->temp0());
  10247      if (!tempInit) {
  10248        masm.xorPtr(temp, temp);
  10249        tempInit = true;
  10250      }
  10251      masm.storePtr(temp, ToAddress(iter.alloc()));
  10252    }
  10253  }
  10254 }
  10255 
  10256 void CodeGenerator::visitWasmRegisterResult(LWasmRegisterResult* lir) {
  10257 #ifdef JS_64BIT
  10258  if (MWasmRegisterResult* mir = lir->mir()) {
  10259    if (mir->type() == MIRType::Int32) {
  10260      masm.widenInt32(ToRegister(lir->output()));
  10261    }
  10262  }
  10263 #endif
  10264 }
  10265 
  10266 void CodeGenerator::visitWasmSystemFloatRegisterResult(
  10267    LWasmSystemFloatRegisterResult* lir) {
  10268  MOZ_ASSERT(lir->mir()->type() == MIRType::Float32 ||
  10269             lir->mir()->type() == MIRType::Double);
  10270  MOZ_ASSERT_IF(lir->mir()->type() == MIRType::Float32,
  10271                ToFloatRegister(lir->output()) == ReturnFloat32Reg);
  10272  MOZ_ASSERT_IF(lir->mir()->type() == MIRType::Double,
  10273                ToFloatRegister(lir->output()) == ReturnDoubleReg);
  10274 
  10275 #ifdef JS_CODEGEN_ARM
  10276  MWasmSystemFloatRegisterResult* mir = lir->mir();
  10277  if (!mir->hardFP()) {
  10278    if (mir->type() == MIRType::Float32) {
  10279      // Move float32 from r0 to ReturnFloatReg.
  10280      masm.ma_vxfer(r0, ReturnFloat32Reg);
  10281    } else if (mir->type() == MIRType::Double) {
  10282      // Move double from r0/r1 to ReturnDoubleReg.
  10283      masm.ma_vxfer(r0, r1, ReturnDoubleReg);
  10284    } else {
  10285      MOZ_CRASH("SIMD type not supported");
  10286    }
  10287  }
  10288 #elif JS_CODEGEN_X86
  10289  MWasmSystemFloatRegisterResult* mir = lir->mir();
  10290  if (mir->type() == MIRType::Double) {
  10291    masm.reserveStack(sizeof(double));
  10292    masm.fstp(Operand(esp, 0));
  10293    masm.loadDouble(Operand(esp, 0), ReturnDoubleReg);
  10294    masm.freeStack(sizeof(double));
  10295  } else if (mir->type() == MIRType::Float32) {
  10296    masm.reserveStack(sizeof(float));
  10297    masm.fstp32(Operand(esp, 0));
  10298    masm.loadFloat32(Operand(esp, 0), ReturnFloat32Reg);
  10299    masm.freeStack(sizeof(float));
  10300  }
  10301 #endif
  10302 }
  10303 
  10304 void CodeGenerator::visitWasmCall(LWasmCall* lir) {
  10305  const MWasmCallBase* callBase = lir->callBase();
  10306  bool isReturnCall = lir->isReturnCall();
  10307 
  10308  // If this call is in Wasm try code block, initialise a wasm::TryNote for this
  10309  // call.
  10310  bool inTry = callBase->inTry();
  10311  if (inTry) {
  10312    size_t tryNoteIndex = callBase->tryNoteIndex();
  10313    wasm::TryNoteVector& tryNotes = masm.tryNotes();
  10314    wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
  10315    tryNote.setTryBodyBegin(masm.currentOffset());
  10316  }
  10317 
  10318  MOZ_ASSERT((sizeof(wasm::Frame) + masm.framePushed()) % WasmStackAlignment ==
  10319             0);
  10320  static_assert(
  10321      WasmStackAlignment >= ABIStackAlignment &&
  10322          WasmStackAlignment % ABIStackAlignment == 0,
  10323      "The wasm stack alignment should subsume the ABI-required alignment");
  10324 
  10325 #ifdef DEBUG
  10326  Label ok;
  10327  masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok);
  10328  masm.breakpoint();
  10329  masm.bind(&ok);
  10330 #endif
  10331 
  10332  // LWasmCallBase::isCallPreserved() assumes that all MWasmCalls preserve the
  10333  // instance and pinned regs. The only case where where we don't have to
  10334  // reload the instance and pinned regs is when the callee preserves them.
  10335  bool reloadInstance = true;
  10336  bool reloadPinnedRegs = true;
  10337  bool switchRealm = true;
  10338 
  10339  const wasm::CallSiteDesc& desc = callBase->desc();
  10340  const wasm::CalleeDesc& callee = callBase->callee();
  10341  CodeOffset retOffset;
  10342  CodeOffset secondRetOffset;
  10343  switch (callee.which()) {
  10344    case wasm::CalleeDesc::Func:
  10345      if (isReturnCall) {
  10346        ReturnCallAdjustmentInfo retCallInfo(
  10347            callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
  10348        masm.wasmReturnCall(desc, callee.funcIndex(), retCallInfo);
  10349        // The rest of the method is unnecessary for a return call.
  10350        return;
  10351      }
  10352      MOZ_ASSERT(!isReturnCall);
  10353      retOffset = masm.call(desc, callee.funcIndex());
  10354      reloadInstance = false;
  10355      reloadPinnedRegs = false;
  10356      switchRealm = false;
  10357      break;
  10358    case wasm::CalleeDesc::Import:
  10359      if (isReturnCall) {
  10360        ReturnCallAdjustmentInfo retCallInfo(
  10361            callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
  10362        masm.wasmReturnCallImport(desc, callee, retCallInfo);
  10363        // The rest of the method is unnecessary for a return call.
  10364        return;
  10365      }
  10366      MOZ_ASSERT(!isReturnCall);
  10367      retOffset = masm.wasmCallImport(desc, callee);
  10368      break;
  10369    case wasm::CalleeDesc::AsmJSTable:
  10370      retOffset = masm.asmCallIndirect(desc, callee);
  10371      break;
  10372    case wasm::CalleeDesc::WasmTable: {
  10373      Label* nullCheckFailed = nullptr;
  10374 #ifndef WASM_HAS_HEAPREG
  10375      {
  10376        auto* ool = new (
  10377            alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  10378          masm.wasmTrap(wasm::Trap::IndirectCallToNull, desc.toTrapSiteDesc());
  10379        });
  10380        if (lir->isCatchable()) {
  10381          addOutOfLineCode(ool, lir->mirCatchable());
  10382        } else if (isReturnCall) {
  10383          addOutOfLineCode(ool, lir->mirReturnCall());
  10384        } else {
  10385          addOutOfLineCode(ool, lir->mirUncatchable());
  10386        }
  10387        nullCheckFailed = ool->entry();
  10388      }
  10389 #endif
  10390      if (isReturnCall) {
  10391        ReturnCallAdjustmentInfo retCallInfo(
  10392            callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
  10393        masm.wasmReturnCallIndirect(desc, callee, nullCheckFailed, retCallInfo);
  10394        // The rest of the method is unnecessary for a return call.
  10395        return;
  10396      }
  10397      MOZ_ASSERT(!isReturnCall);
  10398      masm.wasmCallIndirect(desc, callee, nullCheckFailed, &retOffset,
  10399                            &secondRetOffset);
  10400      // Register reloading and realm switching are handled dynamically inside
  10401      // wasmCallIndirect.  There are two return offsets, one for each call
  10402      // instruction (fast path and slow path).
  10403      reloadInstance = false;
  10404      reloadPinnedRegs = false;
  10405      switchRealm = false;
  10406      break;
  10407    }
  10408    case wasm::CalleeDesc::Builtin:
  10409      retOffset = masm.call(desc, callee.builtin());
  10410      // The builtin ABI preserves the instance and pinned registers. However,
  10411      // builtins may grow the memory which requires us to reload the pinned
  10412      // registers.
  10413      reloadInstance = false;
  10414      reloadPinnedRegs = true;
  10415      switchRealm = false;
  10416      break;
  10417    case wasm::CalleeDesc::BuiltinInstanceMethod:
  10418      retOffset = masm.wasmCallBuiltinInstanceMethod(
  10419          desc, callBase->instanceArg(), callee.builtin(),
  10420          callBase->builtinMethodFailureMode(),
  10421          callBase->builtinMethodFailureTrap());
  10422      // The builtin ABI preserves the instance and pinned registers. However,
  10423      // builtins may grow the memory which requires us to reload the pinned
  10424      // registers.
  10425      reloadInstance = false;
  10426      reloadPinnedRegs = true;
  10427      switchRealm = false;
  10428      break;
  10429    case wasm::CalleeDesc::FuncRef:
  10430      if (isReturnCall) {
  10431        ReturnCallAdjustmentInfo retCallInfo(
  10432            callBase->stackArgAreaSizeUnaligned(), inboundStackArgBytes_);
  10433        masm.wasmReturnCallRef(desc, callee, retCallInfo);
  10434        // The rest of the method is unnecessary for a return call.
  10435        return;
  10436      }
  10437      MOZ_ASSERT(!isReturnCall);
  10438      // Register reloading and realm switching are handled dynamically inside
  10439      // wasmCallRef.  There are two return offsets, one for each call
  10440      // instruction (fast path and slow path).
  10441      masm.wasmCallRef(desc, callee, &retOffset, &secondRetOffset);
  10442      reloadInstance = false;
  10443      reloadPinnedRegs = false;
  10444      switchRealm = false;
  10445      break;
  10446  }
  10447 
  10448  // Note the assembler offset for the associated LSafePoint.
  10449  MOZ_ASSERT(!isReturnCall);
  10450  markSafepointAt(retOffset.offset(), lir);
  10451 
  10452  // Now that all the outbound in-memory args are on the stack, note the
  10453  // required lower boundary point of the associated StackMap.
  10454  uint32_t framePushedAtStackMapBase =
  10455      masm.framePushed() -
  10456      wasm::AlignStackArgAreaSize(callBase->stackArgAreaSizeUnaligned());
  10457  lir->safepoint()->setFramePushedAtStackMapBase(framePushedAtStackMapBase);
  10458  MOZ_ASSERT(lir->safepoint()->wasmSafepointKind() ==
  10459             WasmSafepointKind::LirCall);
  10460 
  10461  // Note the assembler offset and framePushed for use by the adjunct
  10462  // LSafePoint, see visitor for LWasmCallIndirectAdjunctSafepoint below.
  10463  if (callee.which() == wasm::CalleeDesc::WasmTable ||
  10464      callee.which() == wasm::CalleeDesc::FuncRef) {
  10465    lir->adjunctSafepoint()->recordSafepointInfo(secondRetOffset,
  10466                                                 framePushedAtStackMapBase);
  10467  }
  10468 
  10469  if (reloadInstance) {
  10470    masm.loadPtr(
  10471        Address(masm.getStackPointer(), WasmCallerInstanceOffsetBeforeCall),
  10472        InstanceReg);
  10473    if (switchRealm) {
  10474      masm.switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
  10475    }
  10476  } else {
  10477    MOZ_ASSERT(!switchRealm);
  10478  }
  10479  if (reloadPinnedRegs) {
  10480    masm.loadWasmPinnedRegsFromInstance(mozilla::Nothing());
  10481  }
  10482 
  10483  switch (callee.which()) {
  10484    case wasm::CalleeDesc::Func:
  10485    case wasm::CalleeDesc::Import:
  10486    case wasm::CalleeDesc::WasmTable:
  10487    case wasm::CalleeDesc::FuncRef:
  10488      // Stack allocation could change during Wasm (return) calls,
  10489      // recover pre-call state.
  10490      masm.freeStackTo(masm.framePushed());
  10491      break;
  10492    default:
  10493      break;
  10494  }
  10495 
  10496  if (inTry) {
  10497    // Set the end of the try note range
  10498    size_t tryNoteIndex = callBase->tryNoteIndex();
  10499    wasm::TryNoteVector& tryNotes = masm.tryNotes();
  10500    wasm::TryNote& tryNote = tryNotes[tryNoteIndex];
  10501 
  10502    // Don't set the end of the try note if we've OOM'ed, as the above
  10503    // instructions may not have been emitted, which will trigger an assert
  10504    // about zero-length try-notes. This is okay as this compilation will be
  10505    // thrown away.
  10506    if (!masm.oom()) {
  10507      tryNote.setTryBodyEnd(masm.currentOffset());
  10508    }
  10509 
  10510    // This instruction or the adjunct safepoint must be the last instruction
  10511    // in the block. No other instructions may be inserted.
  10512    LBlock* block = lir->block();
  10513    MOZ_RELEASE_ASSERT(*block->rbegin() == lir ||
  10514                       (block->rbegin()->isWasmCallIndirectAdjunctSafepoint() &&
  10515                        *(++block->rbegin()) == lir));
  10516 
  10517    // Jump to the fallthrough block
  10518    jumpToBlock(lir->mirCatchable()->getSuccessor(
  10519        MWasmCallCatchable::FallthroughBranchIndex));
  10520  }
  10521 }
  10522 
  10523 #ifdef ENABLE_WASM_JSPI
  10524 void CodeGenerator::callWasmUpdateSuspenderState(
  10525    wasm::UpdateSuspenderStateAction kind, Register suspender, Register temp) {
  10526  masm.Push(InstanceReg);
  10527  int32_t framePushedAfterInstance = masm.framePushed();
  10528 
  10529  masm.move32(Imm32(uint32_t(kind)), temp);
  10530 
  10531  masm.setupWasmABICall(wasm::SymbolicAddress::UpdateSuspenderState);
  10532  masm.passABIArg(InstanceReg);
  10533  masm.passABIArg(suspender);
  10534  masm.passABIArg(temp);
  10535  int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
  10536  masm.callWithABI(wasm::BytecodeOffset(0),
  10537                   wasm::SymbolicAddress::UpdateSuspenderState,
  10538                   mozilla::Some(instanceOffset));
  10539 
  10540  masm.Pop(InstanceReg);
  10541 }
  10542 
  10543 void CodeGenerator::prepareWasmStackSwitchTrampolineCall(Register suspender,
  10544                                                         Register data) {
  10545  // Reserve stack space for the wasm call.
  10546  unsigned argDecrement;
  10547  {
  10548    ABIArgGenerator abi(ABIKind::Wasm);
  10549    ABIArg arg;
  10550    arg = abi.next(MIRType::Pointer);
  10551    arg = abi.next(MIRType::Pointer);
  10552    argDecrement = StackDecrementForCall(WasmStackAlignment, 0,
  10553                                         abi.stackBytesConsumedSoFar());
  10554  }
  10555  masm.reserveStack(argDecrement);
  10556 
  10557  // Pass the suspender and data params through the wasm function ABI registers.
  10558  ABIArgGenerator abi(ABIKind::Wasm);
  10559  ABIArg arg;
  10560  arg = abi.next(MIRType::Pointer);
  10561  if (arg.kind() == ABIArg::GPR) {
  10562    masm.movePtr(suspender, arg.gpr());
  10563  } else {
  10564    MOZ_ASSERT(arg.kind() == ABIArg::Stack);
  10565    masm.storePtr(suspender,
  10566                  Address(masm.getStackPointer(), arg.offsetFromArgBase()));
  10567  }
  10568  arg = abi.next(MIRType::Pointer);
  10569  if (arg.kind() == ABIArg::GPR) {
  10570    masm.movePtr(data, arg.gpr());
  10571  } else {
  10572    MOZ_ASSERT(arg.kind() == ABIArg::Stack);
  10573    masm.storePtr(data,
  10574                  Address(masm.getStackPointer(), arg.offsetFromArgBase()));
  10575  }
  10576 
  10577  masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
  10578                                     WasmCallerInstanceOffsetBeforeCall));
  10579 }
  10580 #endif  // ENABLE_WASM_JSPI
  10581 
  10582 void CodeGenerator::visitWasmStackSwitchToSuspendable(
  10583    LWasmStackSwitchToSuspendable* lir) {
  10584 #ifdef ENABLE_WASM_JSPI
  10585  const Register SuspenderReg = lir->suspender()->toGeneralReg()->reg();
  10586  const Register FnReg = lir->fn()->toGeneralReg()->reg();
  10587  const Register DataReg = lir->data()->toGeneralReg()->reg();
  10588 
  10589 #  ifdef JS_CODEGEN_ARM64
  10590  vixl::UseScratchRegisterScope temps(&masm);
  10591  const Register ScratchReg1 = temps.AcquireX().asUnsized();
  10592 #  elif defined(JS_CODEGEN_X86)
  10593  const Register ScratchReg1 = ABINonArgReg3;
  10594 #  elif defined(JS_CODEGEN_X64)
  10595  const Register ScratchReg1 = ScratchReg;
  10596 #  elif defined(JS_CODEGEN_ARM)
  10597  const Register ScratchReg1 = ABINonArgReturnVolatileReg;
  10598 #  elif defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64) || \
  10599      defined(JS_CODEGEN_MIPS64)
  10600  UseScratchRegisterScope temps(masm);
  10601  const Register ScratchReg1 = temps.Acquire();
  10602 #  else
  10603 #    error "NYI: scratch register"
  10604 #  endif
  10605 
  10606  masm.Push(SuspenderReg);
  10607  masm.Push(FnReg);
  10608  masm.Push(DataReg);
  10609 
  10610  callWasmUpdateSuspenderState(wasm::UpdateSuspenderStateAction::Enter,
  10611                               SuspenderReg, ScratchReg1);
  10612  masm.Pop(DataReg);
  10613  masm.Pop(FnReg);
  10614  masm.Pop(SuspenderReg);
  10615 
  10616  masm.Push(SuspenderReg);
  10617  int32_t framePushedAtSuspender = masm.framePushed();
  10618  masm.Push(InstanceReg);
  10619 
  10620  wasm::CallSiteDesc desc(wasm::CallSiteKind::StackSwitch);
  10621  CodeLabel returnCallsite;
  10622 
  10623  // Aligning stack before trampoline call.
  10624  uint32_t reserve = ComputeByteAlignment(
  10625      masm.framePushed() - sizeof(wasm::Frame), WasmStackAlignment);
  10626  masm.reserveStack(reserve);
  10627 
  10628  // Switch stacks to suspendable, keep original FP to maintain
  10629  // frames chain between main and suspendable stack segments.
  10630  masm.storeStackPtrToPrivateValue(
  10631      Address(SuspenderReg, wasm::SuspenderObject::offsetOfMainSP()));
  10632  masm.storePrivateValue(
  10633      FramePointer,
  10634      Address(SuspenderReg, wasm::SuspenderObject::offsetOfMainFP()));
  10635 
  10636  masm.loadStackPtrFromPrivateValue(
  10637      Address(SuspenderReg, wasm::SuspenderObject::offsetOfSuspendableSP()));
  10638 
  10639  masm.assertStackAlignment(WasmStackAlignment);
  10640 
  10641  // The FramePointer is not changed for SwitchToSuspendable.
  10642  uint32_t framePushed = masm.framePushed();
  10643 
  10644  // On different stack, reset framePushed. FramePointer is not valid here.
  10645  masm.setFramePushed(0);
  10646 
  10647  prepareWasmStackSwitchTrampolineCall(SuspenderReg, DataReg);
  10648 
  10649  // Get wasm instance pointer for callee.
  10650  size_t instanceSlotOffset = FunctionExtended::offsetOfExtendedSlot(
  10651      FunctionExtended::WASM_INSTANCE_SLOT);
  10652  masm.loadPtr(Address(FnReg, instanceSlotOffset), InstanceReg);
  10653 
  10654  masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
  10655                                     WasmCalleeInstanceOffsetBeforeCall));
  10656  masm.loadWasmPinnedRegsFromInstance(mozilla::Nothing());
  10657 
  10658  masm.assertStackAlignment(WasmStackAlignment);
  10659 
  10660  const Register ReturnAddressReg = ScratchReg1;
  10661 
  10662  // DataReg is not needed anymore, using it as a scratch register.
  10663  const Register ScratchReg2 = DataReg;
  10664 
  10665  // Save future of suspendable stack exit frame pointer.
  10666  masm.computeEffectiveAddress(
  10667      Address(masm.getStackPointer(), -int32_t(sizeof(wasm::Frame))),
  10668      ScratchReg2);
  10669  masm.storePrivateValue(
  10670      ScratchReg2, Address(SuspenderReg,
  10671                           wasm::SuspenderObject::offsetOfSuspendableExitFP()));
  10672 
  10673  masm.mov(&returnCallsite, ReturnAddressReg);
  10674 
  10675  // Call wasm function fast.
  10676 #  ifdef JS_USE_LINK_REGISTER
  10677 #    if defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64) || \
  10678        defined(JS_CODEGEN_MIPS64)
  10679  masm.mov(ReturnAddressReg, ra);
  10680 #    else
  10681  masm.mov(ReturnAddressReg, lr);
  10682 #    endif
  10683 #  else
  10684  masm.Push(ReturnAddressReg);
  10685 #  endif
  10686  // Get funcUncheckedCallEntry() from the function's
  10687  // WASM_FUNC_UNCHECKED_ENTRY_SLOT extended slot.
  10688  size_t uncheckedEntrySlotOffset = FunctionExtended::offsetOfExtendedSlot(
  10689      FunctionExtended::WASM_FUNC_UNCHECKED_ENTRY_SLOT);
  10690  masm.loadPtr(Address(FnReg, uncheckedEntrySlotOffset), ScratchReg2);
  10691  masm.jump(ScratchReg2);
  10692 
  10693  // About to use valid FramePointer -- restore framePushed.
  10694  masm.setFramePushed(framePushed);
  10695 
  10696  // For IsPlausibleStackMapKey check for the following callsite.
  10697  masm.wasmTrapInstruction();
  10698 
  10699  // Callsite for return from main stack.
  10700  masm.bind(&returnCallsite);
  10701  masm.append(desc, *returnCallsite.target());
  10702  masm.addCodeLabel(returnCallsite);
  10703 
  10704  masm.assertStackAlignment(WasmStackAlignment);
  10705 
  10706  markSafepointAt(returnCallsite.target()->offset(), lir);
  10707  lir->safepoint()->setFramePushedAtStackMapBase(framePushed);
  10708  lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::StackSwitch);
  10709  // Rooting SuspenderReg.
  10710  masm.propagateOOM(
  10711      lir->safepoint()->addWasmAnyRefSlot(true, framePushedAtSuspender));
  10712 
  10713  masm.freeStackTo(framePushed);
  10714 
  10715  masm.freeStack(reserve);
  10716  masm.Pop(InstanceReg);
  10717  masm.Pop(SuspenderReg);
  10718 
  10719  masm.switchToWasmInstanceRealm(ScratchReg1, ScratchReg2);
  10720 
  10721  callWasmUpdateSuspenderState(wasm::UpdateSuspenderStateAction::Leave,
  10722                               SuspenderReg, ScratchReg1);
  10723 #else
  10724  MOZ_CRASH("NYI");
  10725 #endif  // ENABLE_WASM_JSPI
  10726 }
  10727 
  10728 void CodeGenerator::visitWasmStackSwitchToMain(LWasmStackSwitchToMain* lir) {
  10729 #ifdef ENABLE_WASM_JSPI
  10730  const Register SuspenderReg = lir->suspender()->toGeneralReg()->reg();
  10731  const Register FnReg = lir->fn()->toGeneralReg()->reg();
  10732  const Register DataReg = lir->data()->toGeneralReg()->reg();
  10733 
  10734 #  ifdef JS_CODEGEN_ARM64
  10735  vixl::UseScratchRegisterScope temps(&masm);
  10736  const Register ScratchReg1 = temps.AcquireX().asUnsized();
  10737 #  elif defined(JS_CODEGEN_X86)
  10738  const Register ScratchReg1 = ABINonArgReg3;
  10739 #  elif defined(JS_CODEGEN_X64)
  10740  const Register ScratchReg1 = ScratchReg;
  10741 #  elif defined(JS_CODEGEN_ARM)
  10742  const Register ScratchReg1 = ABINonArgReturnVolatileReg;
  10743 #  elif defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64) || \
  10744      defined(JS_CODEGEN_MIPS64)
  10745  UseScratchRegisterScope temps(masm);
  10746  const Register ScratchReg1 = temps.Acquire();
  10747 #  else
  10748 #    error "NYI: scratch register"
  10749 #  endif
  10750 
  10751  masm.Push(SuspenderReg);
  10752  masm.Push(FnReg);
  10753  masm.Push(DataReg);
  10754 
  10755  callWasmUpdateSuspenderState(wasm::UpdateSuspenderStateAction::Suspend,
  10756                               SuspenderReg, ScratchReg1);
  10757 
  10758  masm.Pop(DataReg);
  10759  masm.Pop(FnReg);
  10760  masm.Pop(SuspenderReg);
  10761 
  10762  masm.Push(SuspenderReg);
  10763  int32_t framePushedAtSuspender = masm.framePushed();
  10764  masm.Push(InstanceReg);
  10765 
  10766  wasm::CallSiteDesc desc(wasm::CallSiteKind::StackSwitch);
  10767  CodeLabel returnCallsite;
  10768 
  10769  // Aligning stack before trampoline call.
  10770  uint32_t reserve = ComputeByteAlignment(
  10771      masm.framePushed() - sizeof(wasm::Frame), WasmStackAlignment);
  10772  masm.reserveStack(reserve);
  10773 
  10774  masm.movePtr(SuspenderReg, SuspenderReg);
  10775 
  10776  // Switch stacks to main.
  10777  masm.storeStackPtrToPrivateValue(
  10778      Address(SuspenderReg, wasm::SuspenderObject::offsetOfSuspendableSP()));
  10779  masm.storePrivateValue(
  10780      FramePointer,
  10781      Address(SuspenderReg, wasm::SuspenderObject::offsetOfSuspendableFP()));
  10782 
  10783  masm.loadStackPtrFromPrivateValue(
  10784      Address(SuspenderReg, wasm::SuspenderObject::offsetOfMainSP()));
  10785  masm.loadPrivate(
  10786      Address(SuspenderReg, wasm::SuspenderObject::offsetOfMainFP()),
  10787      FramePointer);
  10788 
  10789  // Set main_ra field to returnCallsite.
  10790  masm.mov(&returnCallsite, ScratchReg1);
  10791  masm.storePrivateValue(
  10792      ScratchReg1,
  10793      Address(SuspenderReg,
  10794              wasm::SuspenderObject::offsetOfSuspendedReturnAddress()));
  10795 
  10796  masm.assertStackAlignment(WasmStackAlignment);
  10797 
  10798  // The FramePointer is pointing to the same
  10799  // place as before switch happened.
  10800  uint32_t framePushed = masm.framePushed();
  10801 
  10802  // On different stack, reset framePushed. FramePointer is not valid here.
  10803  masm.setFramePushed(0);
  10804 
  10805  prepareWasmStackSwitchTrampolineCall(SuspenderReg, DataReg);
  10806 
  10807  // Get wasm instance pointer for callee.
  10808  size_t instanceSlotOffset = FunctionExtended::offsetOfExtendedSlot(
  10809      FunctionExtended::WASM_INSTANCE_SLOT);
  10810  masm.loadPtr(Address(FnReg, instanceSlotOffset), InstanceReg);
  10811 
  10812  masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
  10813                                     WasmCalleeInstanceOffsetBeforeCall));
  10814  masm.loadWasmPinnedRegsFromInstance(mozilla::Nothing());
  10815 
  10816  masm.assertStackAlignment(WasmStackAlignment);
  10817 
  10818  const Register ReturnAddressReg = ScratchReg1;
  10819  // DataReg is not needed anymore, using it as a scratch register.
  10820  const Register ScratchReg2 = DataReg;
  10821 
  10822  // Save future of main stack exit frame pointer.
  10823  masm.computeEffectiveAddress(
  10824      Address(masm.getStackPointer(), -int32_t(sizeof(wasm::Frame))),
  10825      ScratchReg2);
  10826  masm.storePrivateValue(
  10827      ScratchReg2,
  10828      Address(SuspenderReg, wasm::SuspenderObject::offsetOfMainExitFP()));
  10829 
  10830  // Load InstanceReg from suspendable stack exit frame.
  10831  masm.loadPrivate(
  10832      Address(SuspenderReg, wasm::SuspenderObject::offsetOfSuspendableExitFP()),
  10833      ScratchReg2);
  10834  masm.loadPtr(
  10835      Address(ScratchReg2, wasm::FrameWithInstances::callerInstanceOffset()),
  10836      ScratchReg2);
  10837  masm.storePtr(ScratchReg2, Address(masm.getStackPointer(),
  10838                                     WasmCallerInstanceOffsetBeforeCall));
  10839 
  10840  // Load RA from suspendable stack exit frame.
  10841  masm.loadPrivate(
  10842      Address(SuspenderReg, wasm::SuspenderObject::offsetOfSuspendableExitFP()),
  10843      ScratchReg1);
  10844  masm.loadPtr(Address(ScratchReg1, wasm::Frame::returnAddressOffset()),
  10845               ReturnAddressReg);
  10846 
  10847  // Call wasm function fast.
  10848 #  ifdef JS_USE_LINK_REGISTER
  10849 #    if defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64) || \
  10850        defined(JS_CODEGEN_MIPS64)
  10851  masm.mov(ReturnAddressReg, ra);
  10852 #    else
  10853  masm.mov(ReturnAddressReg, lr);
  10854 #    endif
  10855 #  else
  10856  masm.Push(ReturnAddressReg);
  10857 #  endif
  10858  // Get funcUncheckedCallEntry() from the function's
  10859  // WASM_FUNC_UNCHECKED_ENTRY_SLOT extended slot.
  10860  size_t uncheckedEntrySlotOffset = FunctionExtended::offsetOfExtendedSlot(
  10861      FunctionExtended::WASM_FUNC_UNCHECKED_ENTRY_SLOT);
  10862  masm.loadPtr(Address(FnReg, uncheckedEntrySlotOffset), ScratchReg2);
  10863  masm.jump(ScratchReg2);
  10864 
  10865  // About to use valid FramePointer -- restore framePushed.
  10866  masm.setFramePushed(framePushed);
  10867 
  10868  // For IsPlausibleStackMapKey check for the following callsite.
  10869  masm.wasmTrapInstruction();
  10870 
  10871  // Callsite for return from suspendable stack.
  10872  masm.bind(&returnCallsite);
  10873  masm.append(desc, *returnCallsite.target());
  10874  masm.addCodeLabel(returnCallsite);
  10875 
  10876  masm.assertStackAlignment(WasmStackAlignment);
  10877 
  10878  markSafepointAt(returnCallsite.target()->offset(), lir);
  10879  lir->safepoint()->setFramePushedAtStackMapBase(framePushed);
  10880  lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::StackSwitch);
  10881  // Rooting SuspenderReg.
  10882  masm.propagateOOM(
  10883      lir->safepoint()->addWasmAnyRefSlot(true, framePushedAtSuspender));
  10884 
  10885  masm.freeStackTo(framePushed);
  10886 
  10887  // Push ReturnReg that is passed from ContinueOnSuspended on the stack after,
  10888  // the SuspenderReg has been restored (see ScratchReg1 push below).
  10889  // (On some platforms SuspenderReg == ReturnReg)
  10890  masm.mov(ReturnReg, ScratchReg1);
  10891 
  10892  masm.freeStack(reserve);
  10893  masm.Pop(InstanceReg);
  10894  masm.Pop(SuspenderReg);
  10895 
  10896  masm.Push(ScratchReg1);
  10897 
  10898  masm.switchToWasmInstanceRealm(ScratchReg1, ScratchReg2);
  10899 
  10900  callWasmUpdateSuspenderState(wasm::UpdateSuspenderStateAction::Resume,
  10901                               SuspenderReg, ScratchReg1);
  10902 
  10903  masm.Pop(ToRegister(lir->output()));
  10904 
  10905 #else
  10906  MOZ_CRASH("NYI");
  10907 #endif  // ENABLE_WASM_JSPI
  10908 }
  10909 
  10910 void CodeGenerator::visitWasmStackContinueOnSuspendable(
  10911    LWasmStackContinueOnSuspendable* lir) {
  10912 #ifdef ENABLE_WASM_JSPI
  10913  MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
  10914  Register suspender = ToRegister(lir->suspender());
  10915  Register result = ToRegister(lir->result());
  10916  Register temp1 = ToRegister(lir->temp0());
  10917  Register temp2 = ToRegister(lir->temp1());
  10918 
  10919  masm.Push(suspender);
  10920  int32_t framePushedAtSuspender = masm.framePushed();
  10921  masm.Push(InstanceReg);
  10922 
  10923  wasm::CallSiteDesc desc(wasm::CallSiteKind::StackSwitch);
  10924  CodeLabel returnCallsite;
  10925 
  10926  // Aligning stack before trampoline call.
  10927  uint32_t reserve = ComputeByteAlignment(
  10928      masm.framePushed() - sizeof(wasm::Frame), WasmStackAlignment);
  10929  masm.reserveStack(reserve);
  10930 
  10931  masm.storeStackPtrToPrivateValue(
  10932      Address(suspender, wasm::SuspenderObject::offsetOfMainSP()));
  10933  masm.storePrivateValue(
  10934      FramePointer,
  10935      Address(suspender, wasm::SuspenderObject::offsetOfMainFP()));
  10936 
  10937  // Adjust exit frame FP.
  10938  masm.loadPrivate(
  10939      Address(suspender, wasm::SuspenderObject::offsetOfSuspendableExitFP()),
  10940      temp1);
  10941  masm.storePtr(FramePointer, Address(temp1, wasm::Frame::callerFPOffset()));
  10942 
  10943  // Adjust exit frame RA.
  10944  masm.mov(&returnCallsite, temp2);
  10945 
  10946  masm.storePtr(temp2, Address(temp1, wasm::Frame::returnAddressOffset()));
  10947  // Adjust exit frame caller instance slot.
  10948  masm.storePtr(
  10949      InstanceReg,
  10950      Address(temp1, wasm::FrameWithInstances::callerInstanceOffset()));
  10951 
  10952  // Switch stacks to suspendable.
  10953  masm.loadStackPtrFromPrivateValue(
  10954      Address(suspender, wasm::SuspenderObject::offsetOfSuspendableSP()));
  10955  masm.loadPrivate(
  10956      Address(suspender, wasm::SuspenderObject::offsetOfSuspendableFP()),
  10957      FramePointer);
  10958 
  10959  masm.assertStackAlignment(WasmStackAlignment);
  10960 
  10961  // The FramePointer is pointing to the same
  10962  // place as before switch happened.
  10963  uint32_t framePushed = masm.framePushed();
  10964 
  10965  // On different stack, reset framePushed. FramePointer is not valid here.
  10966  masm.setFramePushed(0);
  10967 
  10968  // Restore shadow stack area and instance slots.
  10969  ABIArgGenerator abi(ABIKind::Wasm);
  10970  unsigned reserveBeforeCall = abi.stackBytesConsumedSoFar();
  10971  MOZ_ASSERT(masm.framePushed() == 0);
  10972  unsigned argDecrement =
  10973      StackDecrementForCall(WasmStackAlignment, 0, reserveBeforeCall);
  10974  masm.reserveStack(argDecrement);
  10975 
  10976  masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
  10977                                     WasmCallerInstanceOffsetBeforeCall));
  10978  masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
  10979                                     WasmCalleeInstanceOffsetBeforeCall));
  10980 
  10981  masm.assertStackAlignment(WasmStackAlignment);
  10982 
  10983  // Transfer results to ReturnReg so it will appear at SwitchToMain return.
  10984  // temp2 is fixed to be the ReturnReg, and so use it here.
  10985  MOZ_ASSERT(temp2 == ReturnReg);
  10986  masm.mov(result, temp2);
  10987 
  10988  // Pretend we just returned from the function.
  10989  masm.loadPrivate(
  10990      Address(suspender,
  10991              wasm::SuspenderObject::offsetOfSuspendedReturnAddress()),
  10992      temp1);
  10993  masm.jump(temp1);
  10994 
  10995  // About to use valid FramePointer -- restore framePushed.
  10996  masm.setFramePushed(framePushed);
  10997 
  10998  // For IsPlausibleStackMapKey check for the following callsite.
  10999  masm.wasmTrapInstruction();
  11000 
  11001  // Callsite for return from suspendable stack.
  11002  masm.bind(&returnCallsite);
  11003  masm.append(desc, *returnCallsite.target());
  11004  masm.addCodeLabel(returnCallsite);
  11005 
  11006  masm.assertStackAlignment(WasmStackAlignment);
  11007 
  11008  markSafepointAt(returnCallsite.target()->offset(), lir);
  11009  lir->safepoint()->setFramePushedAtStackMapBase(framePushed);
  11010  lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::StackSwitch);
  11011  // Rooting SuspenderReg.
  11012  masm.propagateOOM(
  11013      lir->safepoint()->addWasmAnyRefSlot(true, framePushedAtSuspender));
  11014 
  11015  masm.freeStackTo(framePushed);
  11016 
  11017  masm.freeStack(reserve);
  11018  masm.Pop(InstanceReg);
  11019  masm.Pop(suspender);
  11020 
  11021  masm.switchToWasmInstanceRealm(temp1, temp2);
  11022 
  11023  callWasmUpdateSuspenderState(wasm::UpdateSuspenderStateAction::Leave,
  11024                               suspender, temp1);
  11025 #else
  11026  MOZ_CRASH("NYI");
  11027 #endif  // ENABLE_WASM_JSPI
  11028 }
  11029 
  11030 void CodeGenerator::visitWasmCallLandingPrePad(LWasmCallLandingPrePad* lir) {
  11031  LBlock* block = lir->block();
  11032  MWasmCallLandingPrePad* mir = lir->mir();
  11033  MBasicBlock* mirBlock = mir->block();
  11034  MBasicBlock* callMirBlock = mir->callBlock();
  11035 
  11036  // This block must be the pre-pad successor of the call block. No blocks may
  11037  // be inserted between us, such as for critical edge splitting.
  11038  MOZ_RELEASE_ASSERT(mirBlock == callMirBlock->getSuccessor(
  11039                                     MWasmCallCatchable::PrePadBranchIndex));
  11040 
  11041  // This instruction or a move group must be the first instruction in the
  11042  // block. No other instructions may be inserted.
  11043  MOZ_RELEASE_ASSERT(*block->begin() == lir || (block->begin()->isMoveGroup() &&
  11044                                                *(++block->begin()) == lir));
  11045 
  11046  wasm::TryNoteVector& tryNotes = masm.tryNotes();
  11047  wasm::TryNote& tryNote = tryNotes[mir->tryNoteIndex()];
  11048  // Set the entry point for the call try note to be the beginning of this
  11049  // block. The above assertions (and assertions in visitWasmCall) guarantee
  11050  // that we are not skipping over instructions that should be executed.
  11051  tryNote.setLandingPad(block->label()->offset(), masm.framePushed());
  11052 }
  11053 
  11054 void CodeGenerator::visitWasmCallIndirectAdjunctSafepoint(
  11055    LWasmCallIndirectAdjunctSafepoint* lir) {
  11056  markSafepointAt(lir->safepointLocation().offset(), lir);
  11057  lir->safepoint()->setFramePushedAtStackMapBase(
  11058      lir->framePushedAtStackMapBase());
  11059 }
  11060 
  11061 template <typename InstructionWithMaybeTrapSite>
  11062 void EmitSignalNullCheckTrapSite(MacroAssembler& masm,
  11063                                 InstructionWithMaybeTrapSite* ins,
  11064                                 FaultingCodeOffset fco,
  11065                                 wasm::TrapMachineInsn tmi) {
  11066  if (!ins->maybeTrap()) {
  11067    return;
  11068  }
  11069  masm.append(wasm::Trap::NullPointerDereference, tmi, fco.get(),
  11070              *ins->maybeTrap());
  11071 }
  11072 
  11073 template <typename InstructionWithMaybeTrapSite, class AddressOrBaseIndexT>
  11074 void CodeGenerator::emitWasmValueLoad(InstructionWithMaybeTrapSite* ins,
  11075                                      MIRType type, MWideningOp wideningOp,
  11076                                      AddressOrBaseIndexT addr,
  11077                                      AnyRegister dst) {
  11078  FaultingCodeOffset fco;
  11079  switch (type) {
  11080    case MIRType::Int32:
  11081      switch (wideningOp) {
  11082        case MWideningOp::None:
  11083          fco = masm.load32(addr, dst.gpr());
  11084          EmitSignalNullCheckTrapSite(masm, ins, fco,
  11085                                      wasm::TrapMachineInsn::Load32);
  11086          break;
  11087        case MWideningOp::FromU16:
  11088          fco = masm.load16ZeroExtend(addr, dst.gpr());
  11089          EmitSignalNullCheckTrapSite(masm, ins, fco,
  11090                                      wasm::TrapMachineInsn::Load16);
  11091          break;
  11092        case MWideningOp::FromS16:
  11093          fco = masm.load16SignExtend(addr, dst.gpr());
  11094          EmitSignalNullCheckTrapSite(masm, ins, fco,
  11095                                      wasm::TrapMachineInsn::Load16);
  11096          break;
  11097        case MWideningOp::FromU8:
  11098          fco = masm.load8ZeroExtend(addr, dst.gpr());
  11099          EmitSignalNullCheckTrapSite(masm, ins, fco,
  11100                                      wasm::TrapMachineInsn::Load8);
  11101          break;
  11102        case MWideningOp::FromS8:
  11103          fco = masm.load8SignExtend(addr, dst.gpr());
  11104          EmitSignalNullCheckTrapSite(masm, ins, fco,
  11105                                      wasm::TrapMachineInsn::Load8);
  11106          break;
  11107        default:
  11108          MOZ_CRASH("unexpected widening op in ::visitWasmLoadElement");
  11109      }
  11110      break;
  11111    case MIRType::Float32:
  11112      MOZ_ASSERT(wideningOp == MWideningOp::None);
  11113      fco = masm.loadFloat32(addr, dst.fpu());
  11114      EmitSignalNullCheckTrapSite(masm, ins, fco,
  11115                                  wasm::TrapMachineInsn::Load32);
  11116      break;
  11117    case MIRType::Double:
  11118      MOZ_ASSERT(wideningOp == MWideningOp::None);
  11119      fco = masm.loadDouble(addr, dst.fpu());
  11120      EmitSignalNullCheckTrapSite(masm, ins, fco,
  11121                                  wasm::TrapMachineInsn::Load64);
  11122      break;
  11123    case MIRType::Pointer:
  11124    case MIRType::WasmAnyRef:
  11125    case MIRType::WasmStructData:
  11126    case MIRType::WasmArrayData:
  11127      MOZ_ASSERT(wideningOp == MWideningOp::None);
  11128      fco = masm.loadPtr(addr, dst.gpr());
  11129      EmitSignalNullCheckTrapSite(masm, ins, fco,
  11130                                  wasm::TrapMachineInsnForLoadWord());
  11131      break;
  11132    default:
  11133      MOZ_CRASH("unexpected type in ::emitWasmValueLoad");
  11134  }
  11135 }
  11136 
  11137 template <typename InstructionWithMaybeTrapSite, class AddressOrBaseIndexT>
  11138 void CodeGenerator::emitWasmValueStore(InstructionWithMaybeTrapSite* ins,
  11139                                       MIRType type, MNarrowingOp narrowingOp,
  11140                                       AnyRegister src,
  11141                                       AddressOrBaseIndexT addr) {
  11142  FaultingCodeOffset fco;
  11143  switch (type) {
  11144    case MIRType::Int32:
  11145      switch (narrowingOp) {
  11146        case MNarrowingOp::None:
  11147          fco = masm.store32(src.gpr(), addr);
  11148          EmitSignalNullCheckTrapSite(masm, ins, fco,
  11149                                      wasm::TrapMachineInsn::Store32);
  11150          break;
  11151        case MNarrowingOp::To16:
  11152          fco = masm.store16(src.gpr(), addr);
  11153          EmitSignalNullCheckTrapSite(masm, ins, fco,
  11154                                      wasm::TrapMachineInsn::Store16);
  11155          break;
  11156        case MNarrowingOp::To8:
  11157          fco = masm.store8(src.gpr(), addr);
  11158          EmitSignalNullCheckTrapSite(masm, ins, fco,
  11159                                      wasm::TrapMachineInsn::Store8);
  11160          break;
  11161        default:
  11162          MOZ_CRASH();
  11163      }
  11164      break;
  11165    case MIRType::Float32:
  11166      fco = masm.storeFloat32(src.fpu(), addr);
  11167      EmitSignalNullCheckTrapSite(masm, ins, fco,
  11168                                  wasm::TrapMachineInsn::Store32);
  11169      break;
  11170    case MIRType::Double:
  11171      fco = masm.storeDouble(src.fpu(), addr);
  11172      EmitSignalNullCheckTrapSite(masm, ins, fco,
  11173                                  wasm::TrapMachineInsn::Store64);
  11174      break;
  11175    case MIRType::Pointer:
  11176      // This could be correct, but it would be a new usage, so check carefully.
  11177      MOZ_CRASH("Unexpected type in ::emitWasmValueStore.");
  11178    case MIRType::WasmAnyRef:
  11179      MOZ_CRASH("Bad type in ::emitWasmValueStore. Use LWasmStoreElementRef.");
  11180    default:
  11181      MOZ_CRASH("unexpected type in ::emitWasmValueStore");
  11182  }
  11183 }
  11184 
  11185 void CodeGenerator::visitWasmLoadSlot(LWasmLoadSlot* ins) {
  11186  MIRType type = ins->type();
  11187  MWideningOp wideningOp = ins->wideningOp();
  11188  Register container = ToRegister(ins->containerRef());
  11189  Address addr(container, ins->offset());
  11190  AnyRegister dst = ToAnyRegister(ins->output());
  11191 
  11192 #ifdef ENABLE_WASM_SIMD
  11193  if (type == MIRType::Simd128) {
  11194    MOZ_ASSERT(wideningOp == MWideningOp::None);
  11195    FaultingCodeOffset fco = masm.loadUnalignedSimd128(addr, dst.fpu());
  11196    EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load128);
  11197    return;
  11198  }
  11199 #endif
  11200  emitWasmValueLoad(ins, type, wideningOp, addr, dst);
  11201 }
  11202 
  11203 void CodeGenerator::visitWasmLoadElement(LWasmLoadElement* ins) {
  11204  MIRType type = ins->type();
  11205  MWideningOp wideningOp = ins->wideningOp();
  11206  Scale scale = ins->scale();
  11207  Register base = ToRegister(ins->base());
  11208  Register index = ToRegister(ins->index());
  11209  AnyRegister dst = ToAnyRegister(ins->output());
  11210 
  11211 #ifdef ENABLE_WASM_SIMD
  11212  if (type == MIRType::Simd128) {
  11213    MOZ_ASSERT(wideningOp == MWideningOp::None);
  11214    FaultingCodeOffset fco;
  11215    Register temp = ToRegister(ins->temp0());
  11216    masm.lshiftPtr(Imm32(4), index, temp);
  11217    fco = masm.loadUnalignedSimd128(BaseIndex(base, temp, Scale::TimesOne),
  11218                                    dst.fpu());
  11219    EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load128);
  11220    return;
  11221  }
  11222 #endif
  11223  emitWasmValueLoad(ins, type, wideningOp, BaseIndex(base, index, scale), dst);
  11224 }
  11225 
  11226 void CodeGenerator::visitWasmStoreSlot(LWasmStoreSlot* ins) {
  11227  MIRType type = ins->type();
  11228  MNarrowingOp narrowingOp = ins->narrowingOp();
  11229  Register container = ToRegister(ins->containerRef());
  11230  Address addr(container, ins->offset());
  11231  AnyRegister src = ToAnyRegister(ins->value());
  11232  if (type != MIRType::Int32) {
  11233    MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
  11234  }
  11235 
  11236 #ifdef ENABLE_WASM_SIMD
  11237  if (type == MIRType::Simd128) {
  11238    FaultingCodeOffset fco = masm.storeUnalignedSimd128(src.fpu(), addr);
  11239    EmitSignalNullCheckTrapSite(masm, ins, fco,
  11240                                wasm::TrapMachineInsn::Store128);
  11241    return;
  11242  }
  11243 #endif
  11244  emitWasmValueStore(ins, type, narrowingOp, src, addr);
  11245 }
  11246 
  11247 void CodeGenerator::visitWasmStoreStackResult(LWasmStoreStackResult* ins) {
  11248  const LAllocation* value = ins->value();
  11249  Address addr(ToRegister(ins->stackResultsArea()), ins->offset());
  11250 
  11251  switch (ins->type()) {
  11252    case MIRType::Int32:
  11253      masm.storePtr(ToRegister(value), addr);
  11254      break;
  11255    case MIRType::Float32:
  11256      masm.storeFloat32(ToFloatRegister(value), addr);
  11257      break;
  11258    case MIRType::Double:
  11259      masm.storeDouble(ToFloatRegister(value), addr);
  11260      break;
  11261 #ifdef ENABLE_WASM_SIMD
  11262    case MIRType::Simd128:
  11263      masm.storeUnalignedSimd128(ToFloatRegister(value), addr);
  11264      break;
  11265 #endif
  11266    case MIRType::WasmAnyRef:
  11267      masm.storePtr(ToRegister(value), addr);
  11268      break;
  11269    default:
  11270      MOZ_CRASH("unexpected type in ::visitWasmStoreStackResult");
  11271  }
  11272 }
  11273 
  11274 void CodeGenerator::visitWasmStoreStackResultI64(
  11275    LWasmStoreStackResultI64* ins) {
  11276  masm.store64(ToRegister64(ins->value()),
  11277               Address(ToRegister(ins->stackResultsArea()), ins->offset()));
  11278 }
  11279 
  11280 void CodeGenerator::visitWasmStoreElement(LWasmStoreElement* ins) {
  11281  MIRType type = ins->type();
  11282  MNarrowingOp narrowingOp = ins->narrowingOp();
  11283  Scale scale = ins->scale();
  11284  Register base = ToRegister(ins->base());
  11285  Register index = ToRegister(ins->index());
  11286  AnyRegister src = ToAnyRegister(ins->value());
  11287  if (type != MIRType::Int32) {
  11288    MOZ_RELEASE_ASSERT(narrowingOp == MNarrowingOp::None);
  11289  }
  11290 
  11291 #ifdef ENABLE_WASM_SIMD
  11292  if (type == MIRType::Simd128) {
  11293    Register temp = ToRegister(ins->temp0());
  11294    masm.lshiftPtr(Imm32(4), index, temp);
  11295    FaultingCodeOffset fco = masm.storeUnalignedSimd128(
  11296        src.fpu(), BaseIndex(base, temp, Scale::TimesOne));
  11297    EmitSignalNullCheckTrapSite(masm, ins, fco,
  11298                                wasm::TrapMachineInsn::Store128);
  11299    return;
  11300  }
  11301 #endif
  11302  emitWasmValueStore(ins, type, narrowingOp, src,
  11303                     BaseIndex(base, index, scale));
  11304 }
  11305 
  11306 void CodeGenerator::visitWasmLoadTableElement(LWasmLoadTableElement* ins) {
  11307  Register elements = ToRegister(ins->elements());
  11308  Register index = ToRegister(ins->index());
  11309  Register output = ToRegister(ins->output());
  11310  masm.loadPtr(BaseIndex(elements, index, ScalePointer), output);
  11311 }
  11312 
  11313 void CodeGenerator::visitWasmDerivedPointer(LWasmDerivedPointer* ins) {
  11314  masm.movePtr(ToRegister(ins->base()), ToRegister(ins->output()));
  11315  masm.addPtr(Imm32(int32_t(ins->mir()->offset())), ToRegister(ins->output()));
  11316 }
  11317 
  11318 void CodeGenerator::visitWasmDerivedIndexPointer(
  11319    LWasmDerivedIndexPointer* ins) {
  11320  Register base = ToRegister(ins->base());
  11321  Register index = ToRegister(ins->index());
  11322  Register output = ToRegister(ins->output());
  11323  masm.computeEffectiveAddress(BaseIndex(base, index, ins->mir()->scale()),
  11324                               output);
  11325 }
  11326 
  11327 void CodeGenerator::visitWasmStoreRef(LWasmStoreRef* ins) {
  11328  Register instance = ToRegister(ins->instance());
  11329  Register valueBase = ToRegister(ins->valueBase());
  11330  size_t offset = ins->offset();
  11331  Register value = ToRegister(ins->value());
  11332  Register temp = ToRegister(ins->temp0());
  11333 
  11334  if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
  11335    Label skipPreBarrier;
  11336    wasm::EmitWasmPreBarrierGuard(masm, instance, temp,
  11337                                  Address(valueBase, offset), &skipPreBarrier,
  11338                                  ins->maybeTrap());
  11339    wasm::EmitWasmPreBarrierCallImmediate(masm, instance, temp, valueBase,
  11340                                          offset);
  11341    masm.bind(&skipPreBarrier);
  11342  }
  11343 
  11344  FaultingCodeOffset fco = masm.storePtr(value, Address(valueBase, offset));
  11345  EmitSignalNullCheckTrapSite(masm, ins, fco,
  11346                              wasm::TrapMachineInsnForStoreWord());
  11347  // The postbarrier is handled separately.
  11348 }
  11349 
  11350 void CodeGenerator::visitWasmStoreElementRef(LWasmStoreElementRef* ins) {
  11351  Register instance = ToRegister(ins->instance());
  11352  Register base = ToRegister(ins->base());
  11353  Register index = ToRegister(ins->index());
  11354  Register value = ToRegister(ins->value());
  11355  Register temp0 = ToTempRegisterOrInvalid(ins->temp0());
  11356  Register temp1 = ToTempRegisterOrInvalid(ins->temp1());
  11357 
  11358  BaseIndex addr(base, index, ScalePointer);
  11359 
  11360  if (ins->preBarrierKind() == WasmPreBarrierKind::Normal) {
  11361    Label skipPreBarrier;
  11362    wasm::EmitWasmPreBarrierGuard(masm, instance, temp0, addr, &skipPreBarrier,
  11363                                  ins->maybeTrap());
  11364    wasm::EmitWasmPreBarrierCallIndex(masm, instance, temp0, temp1, addr);
  11365    masm.bind(&skipPreBarrier);
  11366  }
  11367 
  11368  FaultingCodeOffset fco = masm.storePtr(value, addr);
  11369  EmitSignalNullCheckTrapSite(masm, ins, fco,
  11370                              wasm::TrapMachineInsnForStoreWord());
  11371  // The postbarrier is handled separately.
  11372 }
  11373 
  11374 void CodeGenerator::visitWasmPostWriteBarrierWholeCell(
  11375    LWasmPostWriteBarrierWholeCell* lir) {
  11376  Register object = ToRegister(lir->object());
  11377  Register value = ToRegister(lir->value());
  11378  Register temp = ToRegister(lir->temp0());
  11379  MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
  11380  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  11381    // Skip the barrier if this object was previously added to the store buffer.
  11382    // We perform this check out of line because in practice the prior guards
  11383    // eliminate most calls to the barrier.
  11384    wasm::CheckWholeCellLastElementCache(masm, InstanceReg, object, temp,
  11385                                         ool.rejoin());
  11386 
  11387    saveLive(lir);
  11388    masm.Push(InstanceReg);
  11389    int32_t framePushedAfterInstance = masm.framePushed();
  11390 
  11391    // Call Instance::postBarrierWholeCell
  11392    masm.setupWasmABICall(wasm::SymbolicAddress::PostBarrierWholeCell);
  11393    masm.passABIArg(InstanceReg);
  11394    masm.passABIArg(object);
  11395    int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
  11396    masm.callWithABI(wasm::BytecodeOffset(0),
  11397                     wasm::SymbolicAddress::PostBarrierWholeCell,
  11398                     mozilla::Some(instanceOffset), ABIType::General);
  11399 
  11400    masm.Pop(InstanceReg);
  11401    restoreLive(lir);
  11402 
  11403    masm.jump(ool.rejoin());
  11404  });
  11405  addOutOfLineCode(ool, lir->mir());
  11406 
  11407  wasm::EmitWasmPostBarrierGuard(masm, mozilla::Some(object), temp, value,
  11408                                 ool->rejoin());
  11409  masm.jump(ool->entry());
  11410  masm.bind(ool->rejoin());
  11411 }
  11412 
  11413 void CodeGenerator::visitWasmPostWriteBarrierEdgeAtIndex(
  11414    LWasmPostWriteBarrierEdgeAtIndex* lir) {
  11415  Register object = ToRegister(lir->object());
  11416  Register value = ToRegister(lir->value());
  11417  Register valueBase = ToRegister(lir->valueBase());
  11418  Register index = ToRegister(lir->index());
  11419  Register temp = ToRegister(lir->temp0());
  11420  MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
  11421  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  11422    saveLive(lir);
  11423    masm.Push(InstanceReg);
  11424    int32_t framePushedAfterInstance = masm.framePushed();
  11425 
  11426    // Fold the value offset into the value base
  11427    if (lir->elemSize() == 16) {
  11428      masm.lshiftPtr(Imm32(4), index, temp);
  11429      masm.addPtr(valueBase, temp);
  11430    } else {
  11431      masm.computeEffectiveAddress(
  11432          BaseIndex(valueBase, index, ScaleFromElemWidth(lir->elemSize())),
  11433          temp);
  11434    }
  11435 
  11436    // Call Instance::postBarrier
  11437    masm.setupWasmABICall(wasm::SymbolicAddress::PostBarrierEdge);
  11438    masm.passABIArg(InstanceReg);
  11439    masm.passABIArg(temp);
  11440    int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
  11441    masm.callWithABI(wasm::BytecodeOffset(0),
  11442                     wasm::SymbolicAddress::PostBarrierEdge,
  11443                     mozilla::Some(instanceOffset), ABIType::General);
  11444 
  11445    masm.Pop(InstanceReg);
  11446    restoreLive(lir);
  11447 
  11448    masm.jump(ool.rejoin());
  11449  });
  11450  addOutOfLineCode(ool, lir->mir());
  11451 
  11452  wasm::EmitWasmPostBarrierGuard(masm, mozilla::Some(object), temp, value,
  11453                                 ool->rejoin());
  11454  masm.jump(ool->entry());
  11455  masm.bind(ool->rejoin());
  11456 }
  11457 
  11458 void CodeGenerator::visitWasmLoadSlotI64(LWasmLoadSlotI64* ins) {
  11459  Register container = ToRegister(ins->containerRef());
  11460  Address addr(container, ins->offset());
  11461  Register64 output = ToOutRegister64(ins);
  11462  // Either 1 or 2 words.  On a 32-bit target, it is hard to argue that one
  11463  // transaction will always trap before the other, so it seems safest to
  11464  // register both of them as potentially trapping.
  11465 #ifdef JS_64BIT
  11466  FaultingCodeOffset fco = masm.load64(addr, output);
  11467  EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load64);
  11468 #else
  11469  FaultingCodeOffsetPair fcop = masm.load64(addr, output);
  11470  EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
  11471                              wasm::TrapMachineInsn::Load32);
  11472  EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
  11473                              wasm::TrapMachineInsn::Load32);
  11474 #endif
  11475 }
  11476 
  11477 void CodeGenerator::visitWasmLoadElementI64(LWasmLoadElementI64* ins) {
  11478  Register base = ToRegister(ins->base());
  11479  Register index = ToRegister(ins->index());
  11480  BaseIndex addr(base, index, Scale::TimesEight);
  11481  Register64 output = ToOutRegister64(ins);
  11482  // Either 1 or 2 words.  On a 32-bit target, it is hard to argue that one
  11483  // transaction will always trap before the other, so it seems safest to
  11484  // register both of them as potentially trapping.
  11485 #ifdef JS_64BIT
  11486  FaultingCodeOffset fco = masm.load64(addr, output);
  11487  EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Load64);
  11488 #else
  11489  FaultingCodeOffsetPair fcop = masm.load64(addr, output);
  11490  EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
  11491                              wasm::TrapMachineInsn::Load32);
  11492  EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
  11493                              wasm::TrapMachineInsn::Load32);
  11494 #endif
  11495 }
  11496 
  11497 void CodeGenerator::visitWasmStoreSlotI64(LWasmStoreSlotI64* ins) {
  11498  Register container = ToRegister(ins->containerRef());
  11499  Address addr(container, ins->offset());
  11500  Register64 value = ToRegister64(ins->value());
  11501  // Either 1 or 2 words.  As above we register both transactions in the
  11502  // 2-word case.
  11503 #ifdef JS_64BIT
  11504  FaultingCodeOffset fco = masm.store64(value, addr);
  11505  EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Store64);
  11506 #else
  11507  FaultingCodeOffsetPair fcop = masm.store64(value, addr);
  11508  EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
  11509                              wasm::TrapMachineInsn::Store32);
  11510  EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
  11511                              wasm::TrapMachineInsn::Store32);
  11512 #endif
  11513 }
  11514 
  11515 void CodeGenerator::visitWasmStoreElementI64(LWasmStoreElementI64* ins) {
  11516  Register base = ToRegister(ins->base());
  11517  Register index = ToRegister(ins->index());
  11518  BaseIndex addr(base, index, Scale::TimesEight);
  11519  Register64 value = ToRegister64(ins->value());
  11520  // Either 1 or 2 words.  As above we register both transactions in the
  11521  // 2-word case.
  11522 #ifdef JS_64BIT
  11523  FaultingCodeOffset fco = masm.store64(value, addr);
  11524  EmitSignalNullCheckTrapSite(masm, ins, fco, wasm::TrapMachineInsn::Store64);
  11525 #else
  11526  FaultingCodeOffsetPair fcop = masm.store64(value, addr);
  11527  EmitSignalNullCheckTrapSite(masm, ins, fcop.first,
  11528                              wasm::TrapMachineInsn::Store32);
  11529  EmitSignalNullCheckTrapSite(masm, ins, fcop.second,
  11530                              wasm::TrapMachineInsn::Store32);
  11531 #endif
  11532 }
  11533 
  11534 void CodeGenerator::visitWasmClampTable64Address(
  11535    LWasmClampTable64Address* lir) {
  11536  Register64 address = ToRegister64(lir->address());
  11537  Register out = ToRegister(lir->output());
  11538  masm.wasmClampTable64Address(address, out);
  11539 }
  11540 
  11541 void CodeGenerator::visitArrayBufferByteLength(LArrayBufferByteLength* lir) {
  11542  Register obj = ToRegister(lir->object());
  11543  Register out = ToRegister(lir->output());
  11544  masm.loadArrayBufferByteLengthIntPtr(obj, out);
  11545 }
  11546 
  11547 void CodeGenerator::visitArrayBufferViewLength(LArrayBufferViewLength* lir) {
  11548  Register obj = ToRegister(lir->object());
  11549  Register out = ToRegister(lir->output());
  11550  masm.loadArrayBufferViewLengthIntPtr(obj, out);
  11551 }
  11552 
  11553 void CodeGenerator::visitArrayBufferViewByteOffset(
  11554    LArrayBufferViewByteOffset* lir) {
  11555  Register obj = ToRegister(lir->object());
  11556  Register out = ToRegister(lir->output());
  11557  masm.loadArrayBufferViewByteOffsetIntPtr(obj, out);
  11558 }
  11559 
  11560 void CodeGenerator::visitArrayBufferViewElements(
  11561    LArrayBufferViewElements* lir) {
  11562  Register obj = ToRegister(lir->object());
  11563  Register out = ToRegister(lir->output());
  11564  masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), out);
  11565 }
  11566 
  11567 void CodeGenerator::visitArrayBufferViewElementsWithOffset(
  11568    LArrayBufferViewElementsWithOffset* lir) {
  11569  Register object = ToRegister(lir->object());
  11570  Register out = ToRegister(lir->output());
  11571  Scalar::Type elementType = lir->mir()->elementType();
  11572 
  11573  masm.loadPtr(Address(object, ArrayBufferViewObject::dataOffset()), out);
  11574 
  11575  if (lir->offset()->isConstant()) {
  11576    Address source = ToAddress(out, lir->offset(), elementType);
  11577    if (source.offset != 0) {
  11578      masm.computeEffectiveAddress(source, out);
  11579    }
  11580  } else {
  11581    BaseIndex source(out, ToRegister(lir->offset()),
  11582                     ScaleFromScalarType(elementType));
  11583    masm.computeEffectiveAddress(source, out);
  11584  }
  11585 }
  11586 
  11587 void CodeGenerator::visitTypedArrayElementSize(LTypedArrayElementSize* lir) {
  11588  Register obj = ToRegister(lir->object());
  11589  Register out = ToRegister(lir->output());
  11590 
  11591  masm.typedArrayElementSize(obj, out);
  11592 }
  11593 
  11594 void CodeGenerator::visitResizableTypedArrayLength(
  11595    LResizableTypedArrayLength* lir) {
  11596  Register obj = ToRegister(lir->object());
  11597  Register out = ToRegister(lir->output());
  11598  Register temp = ToRegister(lir->temp0());
  11599 
  11600  auto sync = SynchronizeLoad(lir->mir()->requiresMemoryBarrier());
  11601  masm.loadResizableTypedArrayLengthIntPtr(sync, obj, out, temp);
  11602 }
  11603 
  11604 void CodeGenerator::visitResizableDataViewByteLength(
  11605    LResizableDataViewByteLength* lir) {
  11606  Register obj = ToRegister(lir->object());
  11607  Register out = ToRegister(lir->output());
  11608  Register temp = ToRegister(lir->temp0());
  11609 
  11610  auto sync = SynchronizeLoad(lir->mir()->requiresMemoryBarrier());
  11611  masm.loadResizableDataViewByteLengthIntPtr(sync, obj, out, temp);
  11612 }
  11613 
  11614 void CodeGenerator::visitGrowableSharedArrayBufferByteLength(
  11615    LGrowableSharedArrayBufferByteLength* lir) {
  11616  Register obj = ToRegister(lir->object());
  11617  Register out = ToRegister(lir->output());
  11618 
  11619  // Explicit |byteLength| accesses are seq-consistent atomic loads.
  11620  auto sync = Synchronization::Load();
  11621 
  11622  masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, out);
  11623 }
  11624 
  11625 void CodeGenerator::visitGuardResizableArrayBufferViewInBounds(
  11626    LGuardResizableArrayBufferViewInBounds* lir) {
  11627  Register obj = ToRegister(lir->object());
  11628  Register temp = ToRegister(lir->temp0());
  11629 
  11630  Label bail;
  11631  masm.branchIfResizableArrayBufferViewOutOfBounds(obj, temp, &bail);
  11632  bailoutFrom(&bail, lir->snapshot());
  11633 }
  11634 
  11635 void CodeGenerator::visitGuardResizableArrayBufferViewInBoundsOrDetached(
  11636    LGuardResizableArrayBufferViewInBoundsOrDetached* lir) {
  11637  Register obj = ToRegister(lir->object());
  11638  Register temp = ToRegister(lir->temp0());
  11639 
  11640  Label done, bail;
  11641  masm.branchIfResizableArrayBufferViewInBounds(obj, temp, &done);
  11642  masm.branchIfHasAttachedArrayBuffer(obj, temp, &bail);
  11643  masm.bind(&done);
  11644  bailoutFrom(&bail, lir->snapshot());
  11645 }
  11646 
  11647 void CodeGenerator::visitGuardHasAttachedArrayBuffer(
  11648    LGuardHasAttachedArrayBuffer* lir) {
  11649  Register obj = ToRegister(lir->object());
  11650  Register temp = ToRegister(lir->temp0());
  11651 
  11652  Label bail;
  11653  masm.branchIfHasDetachedArrayBuffer(obj, temp, &bail);
  11654  bailoutFrom(&bail, lir->snapshot());
  11655 }
  11656 
  11657 void CodeGenerator::visitGuardTypedArraySetOffset(
  11658    LGuardTypedArraySetOffset* lir) {
  11659  Register offset = ToRegister(lir->offset());
  11660  Register targetLength = ToRegister(lir->targetLength());
  11661  Register sourceLength = ToRegister(lir->sourceLength());
  11662  Register temp = ToRegister(lir->temp0());
  11663 
  11664  Label bail;
  11665 
  11666  // Ensure `offset <= target.length`.
  11667  masm.movePtr(targetLength, temp);
  11668  masm.branchSubPtr(Assembler::Signed, offset, temp, &bail);
  11669 
  11670  // Ensure `source.length <= (target.length - offset)`.
  11671  masm.branchPtr(Assembler::GreaterThan, sourceLength, temp, &bail);
  11672 
  11673  bailoutFrom(&bail, lir->snapshot());
  11674 }
  11675 
  11676 void CodeGenerator::visitTypedArrayFill(LTypedArrayFill* lir) {
  11677  auto elementType = lir->mir()->elementType();
  11678  MOZ_ASSERT(!Scalar::isBigIntType(elementType));
  11679 
  11680  masm.setupAlignedABICall();
  11681  masm.passABIArg(ToRegister(lir->object()));
  11682  if (elementType == Scalar::Float64) {
  11683    masm.passABIArg(ToFloatRegister(lir->value()), ABIType::Float64);
  11684  } else if (elementType == Scalar::Float32 || elementType == Scalar::Float16) {
  11685    masm.passABIArg(ToFloatRegister(lir->value()), ABIType::Float32);
  11686  } else {
  11687    MOZ_ASSERT(!Scalar::isFloatingType(elementType));
  11688    masm.passABIArg(ToRegister(lir->value()));
  11689  }
  11690  masm.passABIArg(ToRegister(lir->start()));
  11691  masm.passABIArg(ToRegister(lir->end()));
  11692 
  11693  if (elementType == Scalar::Float64) {
  11694    using Fn = void (*)(TypedArrayObject*, double, intptr_t, intptr_t);
  11695    masm.callWithABI<Fn, js::TypedArrayFillDouble>();
  11696  } else if (elementType == Scalar::Float32 || elementType == Scalar::Float16) {
  11697    using Fn = void (*)(TypedArrayObject*, float, intptr_t, intptr_t);
  11698    masm.callWithABI<Fn, js::TypedArrayFillFloat32>();
  11699  } else {
  11700    // All other types are managed using int32.
  11701    MOZ_ASSERT(Scalar::byteSize(elementType) <= sizeof(int32_t));
  11702 
  11703    using Fn = void (*)(TypedArrayObject*, int32_t, intptr_t, intptr_t);
  11704    masm.callWithABI<Fn, js::TypedArrayFillInt32>();
  11705  }
  11706 }
  11707 
  11708 void CodeGenerator::visitTypedArrayFill64(LTypedArrayFill64* lir) {
  11709  MOZ_ASSERT(Scalar::isBigIntType(lir->mir()->elementType()));
  11710 
  11711  masm.setupAlignedABICall();
  11712  masm.passABIArg(ToRegister(lir->object()));
  11713  masm.passABIArg(ToRegister64(lir->value()));
  11714  masm.passABIArg(ToRegister(lir->start()));
  11715  masm.passABIArg(ToRegister(lir->end()));
  11716 
  11717  using Fn = void (*)(TypedArrayObject*, int64_t, intptr_t, intptr_t);
  11718  masm.callWithABI<Fn, js::TypedArrayFillInt64>();
  11719 }
  11720 
  11721 void CodeGenerator::visitTypedArraySet(LTypedArraySet* lir) {
  11722  Register target = ToRegister(lir->target());
  11723  Register source = ToRegister(lir->source());
  11724  Register offset = ToRegister(lir->offset());
  11725 
  11726  // Bit-wise copying is infallible because it doesn't need to allocate any
  11727  // temporary memory, even if the underlying buffers are the same.
  11728  if (lir->mir()->canUseBitwiseCopy()) {
  11729    masm.setupAlignedABICall();
  11730    masm.passABIArg(target);
  11731    masm.passABIArg(source);
  11732    masm.passABIArg(offset);
  11733 
  11734    using Fn = void (*)(TypedArrayObject*, TypedArrayObject*, intptr_t);
  11735    masm.callWithABI<Fn, js::TypedArraySetInfallible>();
  11736  } else {
  11737    pushArg(offset);
  11738    pushArg(source);
  11739    pushArg(target);
  11740 
  11741    using Fn =
  11742        bool (*)(JSContext*, TypedArrayObject*, TypedArrayObject*, intptr_t);
  11743    callVM<Fn, js::TypedArraySet>(lir);
  11744  }
  11745 }
  11746 
  11747 void CodeGenerator::visitTypedArraySetFromSubarray(
  11748    LTypedArraySetFromSubarray* lir) {
  11749  Register target = ToRegister(lir->target());
  11750  Register source = ToRegister(lir->source());
  11751  Register offset = ToRegister(lir->offset());
  11752  Register sourceOffset = ToRegister(lir->sourceOffset());
  11753  Register sourceLength = ToRegister(lir->sourceLength());
  11754 
  11755  // Bit-wise copying is infallible because it doesn't need to allocate any
  11756  // temporary memory, even if the underlying buffers are the same.
  11757  if (lir->mir()->canUseBitwiseCopy()) {
  11758    masm.setupAlignedABICall();
  11759    masm.passABIArg(target);
  11760    masm.passABIArg(source);
  11761    masm.passABIArg(offset);
  11762    masm.passABIArg(sourceOffset);
  11763    masm.passABIArg(sourceLength);
  11764 
  11765    using Fn = void (*)(TypedArrayObject*, TypedArrayObject*, intptr_t,
  11766                        intptr_t, intptr_t);
  11767    masm.callWithABI<Fn, js::TypedArraySetFromSubarrayInfallible>();
  11768  } else {
  11769    pushArg(sourceLength);
  11770    pushArg(sourceOffset);
  11771    pushArg(offset);
  11772    pushArg(source);
  11773    pushArg(target);
  11774 
  11775    using Fn = bool (*)(JSContext*, TypedArrayObject*, TypedArrayObject*,
  11776                        intptr_t, intptr_t, intptr_t);
  11777    callVM<Fn, js::TypedArraySetFromSubarray>(lir);
  11778  }
  11779 }
  11780 
  11781 void CodeGenerator::visitTypedArraySubarray(LTypedArraySubarray* lir) {
  11782  pushArg(ToRegister(lir->length()));
  11783  pushArg(ToRegister(lir->start()));
  11784  pushArg(ToRegister(lir->object()));
  11785 
  11786  using Fn = TypedArrayObject* (*)(JSContext*, Handle<TypedArrayObject*>,
  11787                                   intptr_t, intptr_t);
  11788  callVM<Fn, js::TypedArraySubarrayWithLength>(lir);
  11789 }
  11790 
  11791 void CodeGenerator::visitToIntegerIndex(LToIntegerIndex* lir) {
  11792  Register index = ToRegister(lir->index());
  11793  Register length = ToRegister(lir->length());
  11794  Register output = ToRegister(lir->output());
  11795 
  11796  masm.movePtr(index, output);
  11797 
  11798  Label done, notNegative;
  11799  masm.branchTestPtr(Assembler::NotSigned, index, index, &notNegative);
  11800  {
  11801    masm.branchAddPtr(Assembler::NotSigned, length, output, &done);
  11802    masm.movePtr(ImmWord(0), output);
  11803    masm.jump(&done);
  11804  }
  11805  masm.bind(&notNegative);
  11806  {
  11807    masm.cmpPtrMovePtr(Assembler::GreaterThan, index, length, length, output);
  11808  }
  11809  masm.bind(&done);
  11810 }
  11811 
  11812 void CodeGenerator::visitGuardNumberToIntPtrIndex(
  11813    LGuardNumberToIntPtrIndex* lir) {
  11814  FloatRegister input = ToFloatRegister(lir->input());
  11815  Register output = ToRegister(lir->output());
  11816 
  11817  if (!lir->mir()->supportOOB()) {
  11818    Label bail;
  11819    masm.convertDoubleToPtr(input, output, &bail, false);
  11820    bailoutFrom(&bail, lir->snapshot());
  11821    return;
  11822  }
  11823 
  11824  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  11825    // Substitute the invalid index with an arbitrary out-of-bounds index.
  11826    masm.movePtr(ImmWord(-1), output);
  11827    masm.jump(ool.rejoin());
  11828  });
  11829  addOutOfLineCode(ool, lir->mir());
  11830 
  11831  masm.convertDoubleToPtr(input, output, ool->entry(), false);
  11832  masm.bind(ool->rejoin());
  11833 }
  11834 
  11835 void CodeGenerator::visitStringLength(LStringLength* lir) {
  11836  Register input = ToRegister(lir->string());
  11837  Register output = ToRegister(lir->output());
  11838 
  11839  masm.loadStringLength(input, output);
  11840 }
  11841 
  11842 void CodeGenerator::visitMinMaxI(LMinMaxI* ins) {
  11843  Register first = ToRegister(ins->first());
  11844  Register output = ToRegister(ins->output());
  11845 
  11846  MOZ_ASSERT(first == output);
  11847 
  11848  if (ins->second()->isConstant()) {
  11849    auto second = Imm32(ToInt32(ins->second()));
  11850 
  11851    if (ins->mir()->isMax()) {
  11852      masm.max32(first, second, output);
  11853    } else {
  11854      masm.min32(first, second, output);
  11855    }
  11856  } else {
  11857    Register second = ToRegister(ins->second());
  11858 
  11859    if (ins->mir()->isMax()) {
  11860      masm.max32(first, second, output);
  11861    } else {
  11862      masm.min32(first, second, output);
  11863    }
  11864  }
  11865 }
  11866 
  11867 void CodeGenerator::visitMinMaxIntPtr(LMinMaxIntPtr* ins) {
  11868  Register first = ToRegister(ins->first());
  11869  Register output = ToRegister(ins->output());
  11870 
  11871  MOZ_ASSERT(first == output);
  11872 
  11873  if (ins->second()->isConstant()) {
  11874    auto second = ImmWord(ToIntPtr(ins->second()));
  11875 
  11876    if (ins->mir()->isMax()) {
  11877      masm.maxPtr(first, second, output);
  11878    } else {
  11879      masm.minPtr(first, second, output);
  11880    }
  11881  } else {
  11882    Register second = ToRegister(ins->second());
  11883 
  11884    if (ins->mir()->isMax()) {
  11885      masm.maxPtr(first, second, output);
  11886    } else {
  11887      masm.minPtr(first, second, output);
  11888    }
  11889  }
  11890 }
  11891 
  11892 void CodeGenerator::visitMinMaxArrayI(LMinMaxArrayI* ins) {
  11893  Register array = ToRegister(ins->array());
  11894  Register output = ToRegister(ins->output());
  11895  Register temp1 = ToRegister(ins->temp0());
  11896  Register temp2 = ToRegister(ins->temp1());
  11897  Register temp3 = ToRegister(ins->temp2());
  11898  bool isMax = ins->mir()->isMax();
  11899 
  11900  Label bail;
  11901  masm.minMaxArrayInt32(array, output, temp1, temp2, temp3, isMax, &bail);
  11902  bailoutFrom(&bail, ins->snapshot());
  11903 }
  11904 
  11905 void CodeGenerator::visitMinMaxArrayD(LMinMaxArrayD* ins) {
  11906  Register array = ToRegister(ins->array());
  11907  FloatRegister output = ToFloatRegister(ins->output());
  11908  FloatRegister floatTemp = ToFloatRegister(ins->temp0());
  11909  Register temp1 = ToRegister(ins->temp1());
  11910  Register temp2 = ToRegister(ins->temp2());
  11911  bool isMax = ins->mir()->isMax();
  11912 
  11913  Label bail;
  11914  masm.minMaxArrayNumber(array, output, floatTemp, temp1, temp2, isMax, &bail);
  11915  bailoutFrom(&bail, ins->snapshot());
  11916 }
  11917 
  11918 // For Abs*, lowering will have tied input to output on platforms where that is
  11919 // sensible, and otherwise left them untied.
  11920 
  11921 void CodeGenerator::visitAbsI(LAbsI* ins) {
  11922  Register input = ToRegister(ins->input());
  11923  Register output = ToRegister(ins->output());
  11924 
  11925  if (ins->mir()->fallible()) {
  11926    Label positive;
  11927    if (input != output) {
  11928      masm.move32(input, output);
  11929    }
  11930    masm.branchTest32(Assembler::NotSigned, output, output, &positive);
  11931    Label bail;
  11932    masm.branchNeg32(Assembler::Overflow, output, &bail);
  11933    bailoutFrom(&bail, ins->snapshot());
  11934    masm.bind(&positive);
  11935  } else {
  11936    masm.abs32(input, output);
  11937  }
  11938 }
  11939 
  11940 void CodeGenerator::visitAbsD(LAbsD* ins) {
  11941  masm.absDouble(ToFloatRegister(ins->input()), ToFloatRegister(ins->output()));
  11942 }
  11943 
  11944 void CodeGenerator::visitAbsF(LAbsF* ins) {
  11945  masm.absFloat32(ToFloatRegister(ins->input()),
  11946                  ToFloatRegister(ins->output()));
  11947 }
  11948 
  11949 void CodeGenerator::visitPowII(LPowII* ins) {
  11950  Register value = ToRegister(ins->value());
  11951  Register power = ToRegister(ins->power());
  11952  Register output = ToRegister(ins->output());
  11953  Register temp0 = ToRegister(ins->temp0());
  11954  Register temp1 = ToRegister(ins->temp1());
  11955 
  11956  Label bailout;
  11957  masm.pow32(value, power, output, temp0, temp1, &bailout);
  11958  bailoutFrom(&bailout, ins->snapshot());
  11959 }
  11960 
  11961 void CodeGenerator::visitPowI(LPowI* ins) {
  11962  FloatRegister value = ToFloatRegister(ins->value());
  11963  Register power = ToRegister(ins->power());
  11964 
  11965  using Fn = double (*)(double x, int32_t y);
  11966  masm.setupAlignedABICall();
  11967  masm.passABIArg(value, ABIType::Float64);
  11968  masm.passABIArg(power);
  11969 
  11970  masm.callWithABI<Fn, js::powi>(ABIType::Float64);
  11971  MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
  11972 }
  11973 
  11974 void CodeGenerator::visitPowD(LPowD* ins) {
  11975  FloatRegister value = ToFloatRegister(ins->value());
  11976  FloatRegister power = ToFloatRegister(ins->power());
  11977 
  11978  using Fn = double (*)(double x, double y);
  11979  masm.setupAlignedABICall();
  11980  masm.passABIArg(value, ABIType::Float64);
  11981  masm.passABIArg(power, ABIType::Float64);
  11982  masm.callWithABI<Fn, ecmaPow>(ABIType::Float64);
  11983 
  11984  MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
  11985 }
  11986 
  11987 void CodeGenerator::visitPowOfTwoI(LPowOfTwoI* ins) {
  11988  Register power = ToRegister(ins->power());
  11989  Register output = ToRegister(ins->output());
  11990 
  11991  uint32_t base = ins->base();
  11992  MOZ_ASSERT(mozilla::IsPowerOfTwo(base));
  11993 
  11994  uint32_t n = mozilla::FloorLog2(base);
  11995  MOZ_ASSERT(n != 0);
  11996 
  11997  // Hacker's Delight, 2nd edition, theorem D2.
  11998  auto ceilingDiv = [](uint32_t x, uint32_t y) { return (x + y - 1) / y; };
  11999 
  12000  // Take bailout if |power| is greater-or-equals |log_y(2^31)| or is negative.
  12001  // |2^(n*y) < 2^31| must hold, hence |n*y < 31| resp. |y < 31/n|.
  12002  //
  12003  // Note: it's important for this condition to match the code in CacheIR.cpp
  12004  // (CanAttachInt32Pow) to prevent failure loops.
  12005  bailoutCmp32(Assembler::AboveOrEqual, power, Imm32(ceilingDiv(31, n)),
  12006               ins->snapshot());
  12007 
  12008  // Compute (2^n)^y as 2^(n*y) using repeated shifts. We could directly scale
  12009  // |power| and perform a single shift, but due to the lack of necessary
  12010  // MacroAssembler functionality, like multiplying a register with an
  12011  // immediate, we restrict the number of generated shift instructions when
  12012  // lowering this operation.
  12013  masm.move32(Imm32(1), output);
  12014  do {
  12015    masm.lshift32(power, output);
  12016    n--;
  12017  } while (n > 0);
  12018 }
  12019 
  12020 void CodeGenerator::visitSqrtD(LSqrtD* ins) {
  12021  FloatRegister input = ToFloatRegister(ins->input());
  12022  FloatRegister output = ToFloatRegister(ins->output());
  12023  masm.sqrtDouble(input, output);
  12024 }
  12025 
  12026 void CodeGenerator::visitSqrtF(LSqrtF* ins) {
  12027  FloatRegister input = ToFloatRegister(ins->input());
  12028  FloatRegister output = ToFloatRegister(ins->output());
  12029  masm.sqrtFloat32(input, output);
  12030 }
  12031 
  12032 void CodeGenerator::visitSignI(LSignI* ins) {
  12033  Register input = ToRegister(ins->input());
  12034  Register output = ToRegister(ins->output());
  12035  masm.signInt32(input, output);
  12036 }
  12037 
  12038 void CodeGenerator::visitSignD(LSignD* ins) {
  12039  FloatRegister input = ToFloatRegister(ins->input());
  12040  FloatRegister output = ToFloatRegister(ins->output());
  12041  masm.signDouble(input, output);
  12042 }
  12043 
  12044 void CodeGenerator::visitSignDI(LSignDI* ins) {
  12045  FloatRegister input = ToFloatRegister(ins->input());
  12046  FloatRegister temp = ToFloatRegister(ins->temp0());
  12047  Register output = ToRegister(ins->output());
  12048 
  12049  Label bail;
  12050  masm.signDoubleToInt32(input, output, temp, &bail);
  12051  bailoutFrom(&bail, ins->snapshot());
  12052 }
  12053 
  12054 void CodeGenerator::visitSignID(LSignID* ins) {
  12055  Register input = ToRegister(ins->input());
  12056  Register temp = ToRegister(ins->temp0());
  12057  FloatRegister output = ToFloatRegister(ins->output());
  12058 
  12059  masm.signInt32(input, temp);
  12060  masm.convertInt32ToDouble(temp, output);
  12061 }
  12062 
  12063 void CodeGenerator::visitMathFunctionD(LMathFunctionD* ins) {
  12064  FloatRegister input = ToFloatRegister(ins->input());
  12065  MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
  12066 
  12067  UnaryMathFunction fun = ins->mir()->function();
  12068  UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
  12069 
  12070  masm.setupAlignedABICall();
  12071 
  12072  masm.passABIArg(input, ABIType::Float64);
  12073  masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
  12074                   ABIType::Float64);
  12075 }
  12076 
  12077 void CodeGenerator::visitMathFunctionF(LMathFunctionF* ins) {
  12078  FloatRegister input = ToFloatRegister(ins->input());
  12079  MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnFloat32Reg);
  12080 
  12081  masm.setupAlignedABICall();
  12082  masm.passABIArg(input, ABIType::Float32);
  12083 
  12084  using Fn = float (*)(float x);
  12085  Fn funptr = nullptr;
  12086  CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check;
  12087  switch (ins->mir()->function()) {
  12088    case UnaryMathFunction::Floor:
  12089      funptr = std::floor;
  12090      check = CheckUnsafeCallWithABI::DontCheckOther;
  12091      break;
  12092    case UnaryMathFunction::Round:
  12093      funptr = math_roundf_impl;
  12094      break;
  12095    case UnaryMathFunction::Trunc:
  12096      funptr = std::trunc;
  12097      check = CheckUnsafeCallWithABI::DontCheckOther;
  12098      break;
  12099    case UnaryMathFunction::Ceil:
  12100      funptr = std::ceil;
  12101      check = CheckUnsafeCallWithABI::DontCheckOther;
  12102      break;
  12103    default:
  12104      MOZ_CRASH("Unknown or unsupported float32 math function");
  12105  }
  12106 
  12107  masm.callWithABI(DynamicFunction<Fn>(funptr), ABIType::Float32, check);
  12108 }
  12109 
  12110 void CodeGenerator::visitModD(LModD* ins) {
  12111  MOZ_ASSERT(!gen->compilingWasm());
  12112 
  12113  FloatRegister lhs = ToFloatRegister(ins->lhs());
  12114  FloatRegister rhs = ToFloatRegister(ins->rhs());
  12115 
  12116  MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
  12117 
  12118  using Fn = double (*)(double a, double b);
  12119  masm.setupAlignedABICall();
  12120  masm.passABIArg(lhs, ABIType::Float64);
  12121  masm.passABIArg(rhs, ABIType::Float64);
  12122  masm.callWithABI<Fn, NumberMod>(ABIType::Float64);
  12123 }
  12124 
  12125 void CodeGenerator::visitModPowTwoD(LModPowTwoD* ins) {
  12126  FloatRegister lhs = ToFloatRegister(ins->lhs());
  12127  uint32_t divisor = ins->divisor();
  12128  MOZ_ASSERT(mozilla::IsPowerOfTwo(divisor));
  12129 
  12130  FloatRegister output = ToFloatRegister(ins->output());
  12131 
  12132  // Compute |n % d| using |copysign(n - (d * trunc(n / d)), n)|.
  12133  //
  12134  // This doesn't work if |d| isn't a power of two, because we may lose too much
  12135  // precision. For example |Number.MAX_VALUE % 3 == 2|, but
  12136  // |3 * trunc(Number.MAX_VALUE / 3) == Infinity|.
  12137 
  12138  Label done;
  12139  {
  12140    ScratchDoubleScope scratch(masm);
  12141 
  12142    // Subnormals can lead to performance degradation, which can make calling
  12143    // |fmod| faster than this inline implementation. Work around this issue by
  12144    // directly returning the input for any value in the interval ]-1, +1[.
  12145    Label notSubnormal;
  12146    masm.loadConstantDouble(1.0, scratch);
  12147    masm.loadConstantDouble(-1.0, output);
  12148    masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, lhs, scratch,
  12149                      &notSubnormal);
  12150    masm.branchDouble(Assembler::DoubleLessThanOrEqual, lhs, output,
  12151                      &notSubnormal);
  12152 
  12153    masm.moveDouble(lhs, output);
  12154    masm.jump(&done);
  12155 
  12156    masm.bind(&notSubnormal);
  12157 
  12158    if (divisor == 1) {
  12159      // The pattern |n % 1 == 0| is used to detect integer numbers. We can skip
  12160      // the multiplication by one in this case.
  12161      masm.moveDouble(lhs, output);
  12162      masm.nearbyIntDouble(RoundingMode::TowardsZero, output, scratch);
  12163      masm.subDouble(scratch, output);
  12164    } else {
  12165      masm.loadConstantDouble(1.0 / double(divisor), scratch);
  12166      masm.loadConstantDouble(double(divisor), output);
  12167 
  12168      masm.mulDouble(lhs, scratch);
  12169      masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
  12170      masm.mulDouble(output, scratch);
  12171 
  12172      masm.moveDouble(lhs, output);
  12173      masm.subDouble(scratch, output);
  12174    }
  12175  }
  12176 
  12177  masm.copySignDouble(output, lhs, output);
  12178  masm.bind(&done);
  12179 }
  12180 
  12181 void CodeGenerator::visitWasmBuiltinModD(LWasmBuiltinModD* ins) {
  12182  masm.Push(InstanceReg);
  12183  int32_t framePushedAfterInstance = masm.framePushed();
  12184 
  12185  FloatRegister lhs = ToFloatRegister(ins->lhs());
  12186  FloatRegister rhs = ToFloatRegister(ins->rhs());
  12187 
  12188  MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
  12189 
  12190  masm.setupWasmABICall(wasm::SymbolicAddress::ModD);
  12191  masm.passABIArg(lhs, ABIType::Float64);
  12192  masm.passABIArg(rhs, ABIType::Float64);
  12193 
  12194  int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
  12195  masm.callWithABI(ins->mir()->bytecodeOffset(), wasm::SymbolicAddress::ModD,
  12196                   mozilla::Some(instanceOffset), ABIType::Float64);
  12197 
  12198  masm.Pop(InstanceReg);
  12199 }
  12200 
  12201 void CodeGenerator::visitClzI(LClzI* ins) {
  12202  Register input = ToRegister(ins->input());
  12203  Register output = ToRegister(ins->output());
  12204  bool knownNotZero = ins->mir()->operandIsNeverZero();
  12205 
  12206  masm.clz32(input, output, knownNotZero);
  12207 }
  12208 
  12209 void CodeGenerator::visitCtzI(LCtzI* ins) {
  12210  Register input = ToRegister(ins->input());
  12211  Register output = ToRegister(ins->output());
  12212  bool knownNotZero = ins->mir()->operandIsNeverZero();
  12213 
  12214  masm.ctz32(input, output, knownNotZero);
  12215 }
  12216 
  12217 void CodeGenerator::visitPopcntI(LPopcntI* ins) {
  12218  Register input = ToRegister(ins->input());
  12219  Register output = ToRegister(ins->output());
  12220  Register temp = ToRegister(ins->temp0());
  12221 
  12222  masm.popcnt32(input, output, temp);
  12223 }
  12224 
  12225 void CodeGenerator::visitClzI64(LClzI64* ins) {
  12226  Register64 input = ToRegister64(ins->input());
  12227  Register64 output = ToOutRegister64(ins);
  12228 
  12229  masm.clz64(input, output);
  12230 }
  12231 
  12232 void CodeGenerator::visitCtzI64(LCtzI64* ins) {
  12233  Register64 input = ToRegister64(ins->input());
  12234  Register64 output = ToOutRegister64(ins);
  12235 
  12236  masm.ctz64(input, output);
  12237 }
  12238 
  12239 void CodeGenerator::visitPopcntI64(LPopcntI64* ins) {
  12240  Register64 input = ToRegister64(ins->input());
  12241  Register64 output = ToOutRegister64(ins);
  12242  Register temp = ToRegister(ins->temp0());
  12243 
  12244  masm.popcnt64(input, output, temp);
  12245 }
  12246 
  12247 void CodeGenerator::visitBigIntAdd(LBigIntAdd* ins) {
  12248  pushArg(ToRegister(ins->rhs()));
  12249  pushArg(ToRegister(ins->lhs()));
  12250 
  12251  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
  12252  callVM<Fn, BigInt::add>(ins);
  12253 }
  12254 
  12255 void CodeGenerator::visitBigIntSub(LBigIntSub* ins) {
  12256  pushArg(ToRegister(ins->rhs()));
  12257  pushArg(ToRegister(ins->lhs()));
  12258 
  12259  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
  12260  callVM<Fn, BigInt::sub>(ins);
  12261 }
  12262 
  12263 void CodeGenerator::visitBigIntMul(LBigIntMul* ins) {
  12264  pushArg(ToRegister(ins->rhs()));
  12265  pushArg(ToRegister(ins->lhs()));
  12266 
  12267  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
  12268  callVM<Fn, BigInt::mul>(ins);
  12269 }
  12270 
  12271 void CodeGenerator::visitBigIntDiv(LBigIntDiv* ins) {
  12272  pushArg(ToRegister(ins->rhs()));
  12273  pushArg(ToRegister(ins->lhs()));
  12274 
  12275  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
  12276  callVM<Fn, BigInt::div>(ins);
  12277 }
  12278 
  12279 void CodeGenerator::visitBigIntMod(LBigIntMod* ins) {
  12280  pushArg(ToRegister(ins->rhs()));
  12281  pushArg(ToRegister(ins->lhs()));
  12282 
  12283  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
  12284  callVM<Fn, BigInt::mod>(ins);
  12285 }
  12286 
  12287 void CodeGenerator::visitBigIntPow(LBigIntPow* ins) {
  12288  pushArg(ToRegister(ins->rhs()));
  12289  pushArg(ToRegister(ins->lhs()));
  12290 
  12291  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
  12292  callVM<Fn, BigInt::pow>(ins);
  12293 }
  12294 
  12295 void CodeGenerator::visitBigIntBitAnd(LBigIntBitAnd* ins) {
  12296  pushArg(ToRegister(ins->rhs()));
  12297  pushArg(ToRegister(ins->lhs()));
  12298 
  12299  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
  12300  callVM<Fn, BigInt::bitAnd>(ins);
  12301 }
  12302 
  12303 void CodeGenerator::visitBigIntBitOr(LBigIntBitOr* ins) {
  12304  pushArg(ToRegister(ins->rhs()));
  12305  pushArg(ToRegister(ins->lhs()));
  12306 
  12307  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
  12308  callVM<Fn, BigInt::bitOr>(ins);
  12309 }
  12310 
  12311 void CodeGenerator::visitBigIntBitXor(LBigIntBitXor* ins) {
  12312  pushArg(ToRegister(ins->rhs()));
  12313  pushArg(ToRegister(ins->lhs()));
  12314 
  12315  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
  12316  callVM<Fn, BigInt::bitXor>(ins);
  12317 }
  12318 
  12319 void CodeGenerator::visitBigIntLsh(LBigIntLsh* ins) {
  12320  pushArg(ToRegister(ins->rhs()));
  12321  pushArg(ToRegister(ins->lhs()));
  12322 
  12323  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
  12324  callVM<Fn, BigInt::lsh>(ins);
  12325 }
  12326 
  12327 void CodeGenerator::visitBigIntRsh(LBigIntRsh* ins) {
  12328  pushArg(ToRegister(ins->rhs()));
  12329  pushArg(ToRegister(ins->lhs()));
  12330 
  12331  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
  12332  callVM<Fn, BigInt::rsh>(ins);
  12333 }
  12334 
  12335 void CodeGenerator::visitBigIntIncrement(LBigIntIncrement* ins) {
  12336  pushArg(ToRegister(ins->input()));
  12337 
  12338  using Fn = BigInt* (*)(JSContext*, HandleBigInt);
  12339  callVM<Fn, BigInt::inc>(ins);
  12340 }
  12341 
  12342 void CodeGenerator::visitBigIntDecrement(LBigIntDecrement* ins) {
  12343  pushArg(ToRegister(ins->input()));
  12344 
  12345  using Fn = BigInt* (*)(JSContext*, HandleBigInt);
  12346  callVM<Fn, BigInt::dec>(ins);
  12347 }
  12348 
  12349 void CodeGenerator::visitBigIntNegate(LBigIntNegate* ins) {
  12350  Register input = ToRegister(ins->input());
  12351  Register temp = ToRegister(ins->temp0());
  12352  Register output = ToRegister(ins->output());
  12353 
  12354  using Fn = BigInt* (*)(JSContext*, HandleBigInt);
  12355  auto* ool =
  12356      oolCallVM<Fn, BigInt::neg>(ins, ArgList(input), StoreRegisterTo(output));
  12357 
  12358  // -0n == 0n
  12359  Label lhsNonZero;
  12360  masm.branchIfBigIntIsNonZero(input, &lhsNonZero);
  12361  masm.movePtr(input, output);
  12362  masm.jump(ool->rejoin());
  12363  masm.bind(&lhsNonZero);
  12364 
  12365  // Call into the VM when the input uses heap digits.
  12366  masm.copyBigIntWithInlineDigits(input, output, temp, initialBigIntHeap(),
  12367                                  ool->entry());
  12368 
  12369  // Flip the sign bit.
  12370  masm.xor32(Imm32(BigInt::signBitMask()),
  12371             Address(output, BigInt::offsetOfFlags()));
  12372 
  12373  masm.bind(ool->rejoin());
  12374 }
  12375 
  12376 void CodeGenerator::visitBigIntBitNot(LBigIntBitNot* ins) {
  12377  pushArg(ToRegister(ins->input()));
  12378 
  12379  using Fn = BigInt* (*)(JSContext*, HandleBigInt);
  12380  callVM<Fn, BigInt::bitNot>(ins);
  12381 }
  12382 
  12383 void CodeGenerator::visitBigIntToIntPtr(LBigIntToIntPtr* ins) {
  12384  Register input = ToRegister(ins->input());
  12385  Register output = ToRegister(ins->output());
  12386 
  12387  Label bail;
  12388  masm.loadBigIntPtr(input, output, &bail);
  12389  bailoutFrom(&bail, ins->snapshot());
  12390 }
  12391 
  12392 void CodeGenerator::visitIntPtrToBigInt(LIntPtrToBigInt* ins) {
  12393  Register input = ToRegister(ins->input());
  12394  Register temp = ToRegister(ins->temp0());
  12395  Register output = ToRegister(ins->output());
  12396 
  12397  using Fn = BigInt* (*)(JSContext*, intptr_t);
  12398  auto* ool = oolCallVM<Fn, JS::BigInt::createFromIntPtr>(
  12399      ins, ArgList(input), StoreRegisterTo(output));
  12400 
  12401  masm.newGCBigInt(output, temp, initialBigIntHeap(), ool->entry());
  12402  masm.movePtr(input, temp);
  12403  masm.initializeBigIntPtr(output, temp);
  12404 
  12405  masm.bind(ool->rejoin());
  12406 }
  12407 
  12408 void CodeGenerator::visitBigIntPtrAdd(LBigIntPtrAdd* ins) {
  12409  Register lhs = ToRegister(ins->lhs());
  12410  const LAllocation* rhs = ins->rhs();
  12411  Register output = ToRegister(ins->output());
  12412 
  12413  if (rhs->isConstant()) {
  12414    masm.movePtr(ImmWord(ToIntPtr(rhs)), output);
  12415  } else {
  12416    masm.movePtr(ToRegister(rhs), output);
  12417  }
  12418 
  12419  Label bail;
  12420  masm.branchAddPtr(Assembler::Overflow, lhs, output, &bail);
  12421  bailoutFrom(&bail, ins->snapshot());
  12422 }
  12423 
  12424 void CodeGenerator::visitBigIntPtrSub(LBigIntPtrSub* ins) {
  12425  Register lhs = ToRegister(ins->lhs());
  12426  Register rhs = ToRegister(ins->rhs());
  12427  Register output = ToRegister(ins->output());
  12428 
  12429  Label bail;
  12430  masm.movePtr(lhs, output);
  12431  masm.branchSubPtr(Assembler::Overflow, rhs, output, &bail);
  12432  bailoutFrom(&bail, ins->snapshot());
  12433 }
  12434 
  12435 void CodeGenerator::visitBigIntPtrMul(LBigIntPtrMul* ins) {
  12436  Register lhs = ToRegister(ins->lhs());
  12437  const LAllocation* rhs = ins->rhs();
  12438  Register output = ToRegister(ins->output());
  12439 
  12440  if (rhs->isConstant()) {
  12441    masm.movePtr(ImmWord(ToIntPtr(rhs)), output);
  12442  } else {
  12443    masm.movePtr(ToRegister(rhs), output);
  12444  }
  12445 
  12446  Label bail;
  12447  masm.branchMulPtr(Assembler::Overflow, lhs, output, &bail);
  12448  bailoutFrom(&bail, ins->snapshot());
  12449 }
  12450 
  12451 void CodeGenerator::visitBigIntPtrDiv(LBigIntPtrDiv* ins) {
  12452  Register lhs = ToRegister(ins->lhs());
  12453  Register rhs = ToRegister(ins->rhs());
  12454  Register output = ToRegister(ins->output());
  12455 
  12456  // x / 0 throws an error.
  12457  Label bail;
  12458  if (ins->mir()->canBeDivideByZero()) {
  12459    masm.branchPtr(Assembler::Equal, rhs, Imm32(0), &bail);
  12460  }
  12461 
  12462  static constexpr auto DigitMin = std::numeric_limits<
  12463      mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
  12464 
  12465  // Handle an integer overflow from INT{32,64}_MIN / -1.
  12466  Label notOverflow;
  12467  masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(DigitMin), &notOverflow);
  12468  masm.branchPtr(Assembler::Equal, rhs, Imm32(-1), &bail);
  12469  masm.bind(&notOverflow);
  12470 
  12471  emitBigIntPtrDiv(ins, lhs, rhs, output);
  12472 
  12473  bailoutFrom(&bail, ins->snapshot());
  12474 }
  12475 
  12476 void CodeGenerator::visitBigIntPtrDivPowTwo(LBigIntPtrDivPowTwo* ins) {
  12477  Register lhs = ToRegister(ins->lhs());
  12478  Register output = ToRegister(ins->output());
  12479  int32_t shift = ins->shift();
  12480  bool negativeDivisor = ins->negativeDivisor();
  12481 
  12482  masm.movePtr(lhs, output);
  12483 
  12484  if (shift) {
  12485    // Adjust the value so that shifting produces a correctly rounded result
  12486    // when the numerator is negative.
  12487    // See 10-1 "Signed Division by a Known Power of 2" in Henry S. Warren,
  12488    // Jr.'s Hacker's Delight.
  12489 
  12490    constexpr size_t bits = BigInt::DigitBits;
  12491 
  12492    if (shift > 1) {
  12493      // Copy the sign bit of the numerator. (= (2^bits - 1) or 0)
  12494      masm.rshiftPtrArithmetic(Imm32(bits - 1), output);
  12495    }
  12496 
  12497    // Divide by 2^(bits - shift)
  12498    // i.e. (= (2^bits - 1) / 2^(bits - shift) or 0)
  12499    // i.e. (= (2^shift - 1) or 0)
  12500    masm.rshiftPtr(Imm32(bits - shift), output);
  12501 
  12502    // If signed, make any 1 bit below the shifted bits to bubble up, such that
  12503    // once shifted the value would be rounded towards 0.
  12504    masm.addPtr(lhs, output);
  12505 
  12506    masm.rshiftPtrArithmetic(Imm32(shift), output);
  12507 
  12508    if (negativeDivisor) {
  12509      masm.negPtr(output);
  12510    }
  12511  } else if (negativeDivisor) {
  12512    Label bail;
  12513    masm.branchNegPtr(Assembler::Overflow, output, &bail);
  12514    bailoutFrom(&bail, ins->snapshot());
  12515  }
  12516 }
  12517 
  12518 void CodeGenerator::visitBigIntPtrMod(LBigIntPtrMod* ins) {
  12519  Register lhs = ToRegister(ins->lhs());
  12520  Register rhs = ToRegister(ins->rhs());
  12521  Register output = ToRegister(ins->output());
  12522  Register temp = ToRegister(ins->temp0());
  12523 
  12524  // x % 0 throws an error.
  12525  if (ins->mir()->canBeDivideByZero()) {
  12526    bailoutCmpPtr(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
  12527  }
  12528 
  12529  static constexpr auto DigitMin = std::numeric_limits<
  12530      mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
  12531 
  12532  masm.movePtr(lhs, temp);
  12533 
  12534  // Handle an integer overflow from INT{32,64}_MIN / -1.
  12535  Label notOverflow;
  12536  masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(DigitMin), &notOverflow);
  12537  masm.branchPtr(Assembler::NotEqual, rhs, Imm32(-1), &notOverflow);
  12538  masm.movePtr(ImmWord(0), temp);
  12539  masm.bind(&notOverflow);
  12540 
  12541  emitBigIntPtrMod(ins, temp, rhs, output);
  12542 }
  12543 
  12544 void CodeGenerator::visitBigIntPtrModPowTwo(LBigIntPtrModPowTwo* ins) {
  12545  Register lhs = ToRegister(ins->lhs());
  12546  Register output = ToRegister(ins->output());
  12547  Register temp = ToRegister(ins->temp0());
  12548  int32_t shift = ins->shift();
  12549 
  12550  masm.movePtr(lhs, output);
  12551  masm.movePtr(ImmWord((uintptr_t(1) << shift) - uintptr_t(1)), temp);
  12552 
  12553  // Switch based on sign of the lhs.
  12554 
  12555  // Positive numbers are just a bitmask.
  12556  Label negative;
  12557  masm.branchTestPtr(Assembler::Signed, lhs, lhs, &negative);
  12558 
  12559  masm.andPtr(temp, output);
  12560 
  12561  Label done;
  12562  masm.jump(&done);
  12563 
  12564  // Negative numbers need a negate, bitmask, negate
  12565  masm.bind(&negative);
  12566 
  12567  masm.negPtr(output);
  12568  masm.andPtr(temp, output);
  12569  masm.negPtr(output);
  12570 
  12571  masm.bind(&done);
  12572 }
  12573 
  12574 void CodeGenerator::visitBigIntPtrPow(LBigIntPtrPow* ins) {
  12575  Register lhs = ToRegister(ins->lhs());
  12576  Register rhs = ToRegister(ins->rhs());
  12577  Register output = ToRegister(ins->output());
  12578  Register temp0 = ToRegister(ins->temp0());
  12579  Register temp1 = ToRegister(ins->temp1());
  12580 
  12581  Label bail;
  12582  masm.powPtr(lhs, rhs, output, temp0, temp1, &bail);
  12583  bailoutFrom(&bail, ins->snapshot());
  12584 }
  12585 
  12586 void CodeGenerator::visitBigIntPtrBitAnd(LBigIntPtrBitAnd* ins) {
  12587  Register lhs = ToRegister(ins->lhs());
  12588  const LAllocation* rhs = ins->rhs();
  12589  Register output = ToRegister(ins->output());
  12590 
  12591  if (rhs->isConstant()) {
  12592    masm.movePtr(ImmWord(ToIntPtr(rhs)), output);
  12593  } else {
  12594    masm.movePtr(ToRegister(rhs), output);
  12595  }
  12596  masm.andPtr(lhs, output);
  12597 }
  12598 
  12599 void CodeGenerator::visitBigIntPtrBitOr(LBigIntPtrBitOr* ins) {
  12600  Register lhs = ToRegister(ins->lhs());
  12601  const LAllocation* rhs = ins->rhs();
  12602  Register output = ToRegister(ins->output());
  12603 
  12604  if (rhs->isConstant()) {
  12605    masm.movePtr(ImmWord(ToIntPtr(rhs)), output);
  12606  } else {
  12607    masm.movePtr(ToRegister(rhs), output);
  12608  }
  12609  masm.orPtr(lhs, output);
  12610 }
  12611 
  12612 void CodeGenerator::visitBigIntPtrBitXor(LBigIntPtrBitXor* ins) {
  12613  Register lhs = ToRegister(ins->lhs());
  12614  const LAllocation* rhs = ins->rhs();
  12615  Register output = ToRegister(ins->output());
  12616 
  12617  if (rhs->isConstant()) {
  12618    masm.movePtr(ImmWord(ToIntPtr(rhs)), output);
  12619  } else {
  12620    masm.movePtr(ToRegister(rhs), output);
  12621  }
  12622  masm.xorPtr(lhs, output);
  12623 }
  12624 
  12625 void CodeGenerator::visitBigIntPtrLsh(LBigIntPtrLsh* ins) {
  12626  Register lhs = ToRegister(ins->lhs());
  12627  Register output = ToRegister(ins->output());
  12628  Register temp = ToTempRegisterOrInvalid(ins->temp0());
  12629  Register tempShift = ToTempRegisterOrInvalid(ins->temp1());
  12630 
  12631  if (ins->rhs()->isConstant()) {
  12632    intptr_t rhs = ToIntPtr(ins->rhs());
  12633 
  12634    if (rhs >= intptr_t(BigInt::DigitBits)) {
  12635      MOZ_ASSERT(ins->mir()->fallible());
  12636 
  12637      // x << DigitBits with x != 0n always exceeds pointer-sized storage.
  12638      masm.movePtr(ImmWord(0), output);
  12639      bailoutCmpPtr(Assembler::NotEqual, lhs, Imm32(0), ins->snapshot());
  12640    } else if (rhs <= -intptr_t(BigInt::DigitBits)) {
  12641      MOZ_ASSERT(!ins->mir()->fallible());
  12642 
  12643      // x << -DigitBits == x >> DigitBits, which is either 0n or -1n.
  12644      masm.rshiftPtrArithmetic(Imm32(BigInt::DigitBits - 1), lhs, output);
  12645    } else if (rhs <= 0) {
  12646      MOZ_ASSERT(!ins->mir()->fallible());
  12647 
  12648      // |x << -y| is computed as |x >> y|.
  12649      masm.rshiftPtrArithmetic(Imm32(-rhs), lhs, output);
  12650    } else {
  12651      MOZ_ASSERT(ins->mir()->fallible());
  12652 
  12653      masm.lshiftPtr(Imm32(rhs), lhs, output);
  12654 
  12655      // Check for overflow: ((lhs << rhs) >> rhs) == lhs.
  12656      masm.rshiftPtrArithmetic(Imm32(rhs), output, temp);
  12657      bailoutCmpPtr(Assembler::NotEqual, temp, lhs, ins->snapshot());
  12658    }
  12659  } else {
  12660    Register rhs = ToRegister(ins->rhs());
  12661 
  12662    Label done, bail;
  12663    MOZ_ASSERT(ins->mir()->fallible());
  12664 
  12665    masm.movePtr(lhs, output);
  12666 
  12667    // 0n << x == 0n
  12668    masm.branchPtr(Assembler::Equal, lhs, Imm32(0), &done);
  12669 
  12670    // x << DigitBits with x != 0n always exceeds pointer-sized storage.
  12671    masm.branchPtr(Assembler::GreaterThanOrEqual, rhs, Imm32(BigInt::DigitBits),
  12672                   &bail);
  12673 
  12674    // x << -DigitBits == x >> DigitBits, which is either 0n or -1n.
  12675    Label shift;
  12676    masm.branchPtr(Assembler::GreaterThan, rhs,
  12677                   Imm32(-int32_t(BigInt::DigitBits)), &shift);
  12678    {
  12679      masm.rshiftPtrArithmetic(Imm32(BigInt::DigitBits - 1), output);
  12680      masm.jump(&done);
  12681    }
  12682    masm.bind(&shift);
  12683 
  12684    // Move |rhs| into the designated shift register.
  12685    masm.movePtr(rhs, tempShift);
  12686 
  12687    // |x << -y| is computed as |x >> y|.
  12688    Label leftShift;
  12689    masm.branchPtr(Assembler::GreaterThanOrEqual, rhs, Imm32(0), &leftShift);
  12690    {
  12691      masm.negPtr(tempShift);
  12692      masm.rshiftPtrArithmetic(tempShift, output);
  12693      masm.jump(&done);
  12694    }
  12695    masm.bind(&leftShift);
  12696 
  12697    masm.lshiftPtr(tempShift, output);
  12698 
  12699    // Check for overflow: ((lhs << rhs) >> rhs) == lhs.
  12700    masm.movePtr(output, temp);
  12701    masm.rshiftPtrArithmetic(tempShift, temp);
  12702    masm.branchPtr(Assembler::NotEqual, temp, lhs, &bail);
  12703 
  12704    masm.bind(&done);
  12705    bailoutFrom(&bail, ins->snapshot());
  12706  }
  12707 }
  12708 
  12709 void CodeGenerator::visitBigIntPtrRsh(LBigIntPtrRsh* ins) {
  12710  Register lhs = ToRegister(ins->lhs());
  12711  Register output = ToRegister(ins->output());
  12712  Register temp = ToTempRegisterOrInvalid(ins->temp0());
  12713  Register tempShift = ToTempRegisterOrInvalid(ins->temp1());
  12714 
  12715  if (ins->rhs()->isConstant()) {
  12716    intptr_t rhs = ToIntPtr(ins->rhs());
  12717 
  12718    if (rhs <= -intptr_t(BigInt::DigitBits)) {
  12719      MOZ_ASSERT(ins->mir()->fallible());
  12720 
  12721      // x >> -DigitBits == x << DigitBits, which exceeds pointer-sized storage.
  12722      masm.movePtr(ImmWord(0), output);
  12723      bailoutCmpPtr(Assembler::NotEqual, lhs, Imm32(0), ins->snapshot());
  12724    } else if (rhs >= intptr_t(BigInt::DigitBits)) {
  12725      MOZ_ASSERT(!ins->mir()->fallible());
  12726 
  12727      // x >> DigitBits is either 0n or -1n.
  12728      masm.rshiftPtrArithmetic(Imm32(BigInt::DigitBits - 1), lhs, output);
  12729    } else if (rhs < 0) {
  12730      MOZ_ASSERT(ins->mir()->fallible());
  12731 
  12732      // |x >> -y| is computed as |x << y|.
  12733      masm.lshiftPtr(Imm32(-rhs), lhs, output);
  12734 
  12735      // Check for overflow: ((lhs << rhs) >> rhs) == lhs.
  12736      masm.rshiftPtrArithmetic(Imm32(-rhs), output, temp);
  12737      bailoutCmpPtr(Assembler::NotEqual, temp, lhs, ins->snapshot());
  12738    } else {
  12739      MOZ_ASSERT(!ins->mir()->fallible());
  12740 
  12741      masm.rshiftPtrArithmetic(Imm32(rhs), lhs, output);
  12742    }
  12743  } else {
  12744    Register rhs = ToRegister(ins->rhs());
  12745 
  12746    Label done, bail;
  12747    MOZ_ASSERT(ins->mir()->fallible());
  12748 
  12749    masm.movePtr(lhs, output);
  12750 
  12751    // 0n >> x == 0n
  12752    masm.branchPtr(Assembler::Equal, lhs, Imm32(0), &done);
  12753 
  12754    // x >> -DigitBits == x << DigitBits, which exceeds pointer-sized storage.
  12755    masm.branchPtr(Assembler::LessThanOrEqual, rhs,
  12756                   Imm32(-int32_t(BigInt::DigitBits)), &bail);
  12757 
  12758    // x >> DigitBits is either 0n or -1n.
  12759    Label shift;
  12760    masm.branchPtr(Assembler::LessThan, rhs, Imm32(BigInt::DigitBits), &shift);
  12761    {
  12762      masm.rshiftPtrArithmetic(Imm32(BigInt::DigitBits - 1), output);
  12763      masm.jump(&done);
  12764    }
  12765    masm.bind(&shift);
  12766 
  12767    // Move |rhs| into the designated shift register.
  12768    masm.movePtr(rhs, tempShift);
  12769 
  12770    // |x >> -y| is computed as |x << y|.
  12771    Label rightShift;
  12772    masm.branchPtr(Assembler::GreaterThanOrEqual, rhs, Imm32(0), &rightShift);
  12773    {
  12774      masm.negPtr(tempShift);
  12775      masm.lshiftPtr(tempShift, output);
  12776 
  12777      // Check for overflow: ((lhs << rhs) >> rhs) == lhs.
  12778      masm.movePtr(output, temp);
  12779      masm.rshiftPtrArithmetic(tempShift, temp);
  12780      masm.branchPtr(Assembler::NotEqual, temp, lhs, &bail);
  12781 
  12782      masm.jump(&done);
  12783    }
  12784    masm.bind(&rightShift);
  12785 
  12786    masm.rshiftPtrArithmetic(tempShift, output);
  12787 
  12788    masm.bind(&done);
  12789    bailoutFrom(&bail, ins->snapshot());
  12790  }
  12791 }
  12792 
  12793 void CodeGenerator::visitBigIntPtrBitNot(LBigIntPtrBitNot* ins) {
  12794  Register input = ToRegister(ins->input());
  12795  Register output = ToRegister(ins->output());
  12796 
  12797  masm.movePtr(input, output);
  12798  masm.notPtr(output);
  12799 }
  12800 
  12801 void CodeGenerator::visitInt32ToStringWithBase(LInt32ToStringWithBase* lir) {
  12802  Register input = ToRegister(lir->input());
  12803  RegisterOrInt32 base = ToRegisterOrInt32(lir->base());
  12804  Register output = ToRegister(lir->output());
  12805  Register temp0 = ToRegister(lir->temp0());
  12806  Register temp1 = ToRegister(lir->temp1());
  12807 
  12808  bool lowerCase = lir->mir()->lowerCase();
  12809 
  12810  using Fn = JSLinearString* (*)(JSContext*, int32_t, int32_t, bool);
  12811  if (base.is<Register>()) {
  12812    auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase<CanGC>>(
  12813        lir, ArgList(input, base.as<Register>(), Imm32(lowerCase)),
  12814        StoreRegisterTo(output));
  12815 
  12816    LiveRegisterSet liveRegs = liveVolatileRegs(lir);
  12817    masm.loadInt32ToStringWithBase(input, base.as<Register>(), output, temp0,
  12818                                   temp1, gen->runtime->staticStrings(),
  12819                                   liveRegs, lowerCase, ool->entry());
  12820    masm.bind(ool->rejoin());
  12821  } else {
  12822    auto* ool = oolCallVM<Fn, js::Int32ToStringWithBase<CanGC>>(
  12823        lir, ArgList(input, Imm32(base.as<int32_t>()), Imm32(lowerCase)),
  12824        StoreRegisterTo(output));
  12825 
  12826    masm.loadInt32ToStringWithBase(input, base.as<int32_t>(), output, temp0,
  12827                                   temp1, gen->runtime->staticStrings(),
  12828                                   lowerCase, ool->entry());
  12829    masm.bind(ool->rejoin());
  12830  }
  12831 }
  12832 
  12833 void CodeGenerator::visitNumberParseInt(LNumberParseInt* lir) {
  12834  Register string = ToRegister(lir->string());
  12835  Register radix = ToRegister(lir->radix());
  12836  ValueOperand output = ToOutValue(lir);
  12837  Register temp = ToRegister(lir->temp0());
  12838 
  12839 #ifdef DEBUG
  12840  Label ok;
  12841  masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
  12842  masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
  12843  masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
  12844  masm.bind(&ok);
  12845 #endif
  12846 
  12847  // Use indexed value as fast path if possible.
  12848  Label vmCall, done;
  12849  masm.loadStringIndexValue(string, temp, &vmCall);
  12850  masm.tagValue(JSVAL_TYPE_INT32, temp, output);
  12851  masm.jump(&done);
  12852  {
  12853    masm.bind(&vmCall);
  12854 
  12855    pushArg(radix);
  12856    pushArg(string);
  12857 
  12858    using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
  12859    callVM<Fn, js::NumberParseInt>(lir);
  12860  }
  12861  masm.bind(&done);
  12862 }
  12863 
  12864 void CodeGenerator::visitDoubleParseInt(LDoubleParseInt* lir) {
  12865  FloatRegister number = ToFloatRegister(lir->number());
  12866  Register output = ToRegister(lir->output());
  12867  FloatRegister temp = ToFloatRegister(lir->temp0());
  12868 
  12869  Label bail;
  12870  masm.branchDouble(Assembler::DoubleUnordered, number, number, &bail);
  12871  masm.branchTruncateDoubleToInt32(number, output, &bail);
  12872 
  12873  Label ok;
  12874  masm.branch32(Assembler::NotEqual, output, Imm32(0), &ok);
  12875  {
  12876    // Accept both +0 and -0 and return 0.
  12877    masm.loadConstantDouble(0.0, temp);
  12878    masm.branchDouble(Assembler::DoubleEqual, number, temp, &ok);
  12879 
  12880    // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
  12881    masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, temp);
  12882    masm.branchDouble(Assembler::DoubleLessThan, number, temp, &bail);
  12883  }
  12884  masm.bind(&ok);
  12885 
  12886  bailoutFrom(&bail, lir->snapshot());
  12887 }
  12888 
  12889 void CodeGenerator::visitFloor(LFloor* lir) {
  12890  FloatRegister input = ToFloatRegister(lir->input());
  12891  Register output = ToRegister(lir->output());
  12892 
  12893  Label bail;
  12894  masm.floorDoubleToInt32(input, output, &bail);
  12895  bailoutFrom(&bail, lir->snapshot());
  12896 }
  12897 
  12898 void CodeGenerator::visitFloorF(LFloorF* lir) {
  12899  FloatRegister input = ToFloatRegister(lir->input());
  12900  Register output = ToRegister(lir->output());
  12901 
  12902  Label bail;
  12903  masm.floorFloat32ToInt32(input, output, &bail);
  12904  bailoutFrom(&bail, lir->snapshot());
  12905 }
  12906 
  12907 void CodeGenerator::visitCeil(LCeil* lir) {
  12908  FloatRegister input = ToFloatRegister(lir->input());
  12909  Register output = ToRegister(lir->output());
  12910 
  12911  Label bail;
  12912  masm.ceilDoubleToInt32(input, output, &bail);
  12913  bailoutFrom(&bail, lir->snapshot());
  12914 }
  12915 
  12916 void CodeGenerator::visitCeilF(LCeilF* lir) {
  12917  FloatRegister input = ToFloatRegister(lir->input());
  12918  Register output = ToRegister(lir->output());
  12919 
  12920  Label bail;
  12921  masm.ceilFloat32ToInt32(input, output, &bail);
  12922  bailoutFrom(&bail, lir->snapshot());
  12923 }
  12924 
  12925 void CodeGenerator::visitRound(LRound* lir) {
  12926  FloatRegister input = ToFloatRegister(lir->input());
  12927  FloatRegister temp = ToFloatRegister(lir->temp0());
  12928  Register output = ToRegister(lir->output());
  12929 
  12930  Label bail;
  12931  masm.roundDoubleToInt32(input, output, temp, &bail);
  12932  bailoutFrom(&bail, lir->snapshot());
  12933 }
  12934 
  12935 void CodeGenerator::visitRoundF(LRoundF* lir) {
  12936  FloatRegister input = ToFloatRegister(lir->input());
  12937  FloatRegister temp = ToFloatRegister(lir->temp0());
  12938  Register output = ToRegister(lir->output());
  12939 
  12940  Label bail;
  12941  masm.roundFloat32ToInt32(input, output, temp, &bail);
  12942  bailoutFrom(&bail, lir->snapshot());
  12943 }
  12944 
  12945 void CodeGenerator::visitTrunc(LTrunc* lir) {
  12946  FloatRegister input = ToFloatRegister(lir->input());
  12947  Register output = ToRegister(lir->output());
  12948 
  12949  Label bail;
  12950  masm.truncDoubleToInt32(input, output, &bail);
  12951  bailoutFrom(&bail, lir->snapshot());
  12952 }
  12953 
  12954 void CodeGenerator::visitTruncF(LTruncF* lir) {
  12955  FloatRegister input = ToFloatRegister(lir->input());
  12956  Register output = ToRegister(lir->output());
  12957 
  12958  Label bail;
  12959  masm.truncFloat32ToInt32(input, output, &bail);
  12960  bailoutFrom(&bail, lir->snapshot());
  12961 }
  12962 
  12963 void CodeGenerator::visitNearbyInt(LNearbyInt* lir) {
  12964  FloatRegister input = ToFloatRegister(lir->input());
  12965  FloatRegister output = ToFloatRegister(lir->output());
  12966 
  12967  RoundingMode roundingMode = lir->mir()->roundingMode();
  12968  masm.nearbyIntDouble(roundingMode, input, output);
  12969 }
  12970 
  12971 void CodeGenerator::visitNearbyIntF(LNearbyIntF* lir) {
  12972  FloatRegister input = ToFloatRegister(lir->input());
  12973  FloatRegister output = ToFloatRegister(lir->output());
  12974 
  12975  RoundingMode roundingMode = lir->mir()->roundingMode();
  12976  masm.nearbyIntFloat32(roundingMode, input, output);
  12977 }
  12978 
  12979 void CodeGenerator::visitRoundToDouble(LRoundToDouble* lir) {
  12980  FloatRegister input = ToFloatRegister(lir->input());
  12981  FloatRegister output = ToFloatRegister(lir->output());
  12982 
  12983  masm.roundDouble(input, output);
  12984 }
  12985 
  12986 void CodeGenerator::visitRoundToFloat32(LRoundToFloat32* lir) {
  12987  FloatRegister input = ToFloatRegister(lir->input());
  12988  FloatRegister output = ToFloatRegister(lir->output());
  12989 
  12990  masm.roundFloat32(input, output);
  12991 }
  12992 
  12993 void CodeGenerator::visitCopySignF(LCopySignF* lir) {
  12994  FloatRegister lhs = ToFloatRegister(lir->lhs());
  12995  FloatRegister rhs = ToFloatRegister(lir->rhs());
  12996  FloatRegister out = ToFloatRegister(lir->output());
  12997 
  12998  if (lhs == rhs) {
  12999    if (lhs != out) {
  13000      masm.moveFloat32(lhs, out);
  13001    }
  13002    return;
  13003  }
  13004 
  13005  masm.copySignFloat32(lhs, rhs, out);
  13006 }
  13007 
  13008 void CodeGenerator::visitCopySignD(LCopySignD* lir) {
  13009  FloatRegister lhs = ToFloatRegister(lir->lhs());
  13010  FloatRegister rhs = ToFloatRegister(lir->rhs());
  13011  FloatRegister out = ToFloatRegister(lir->output());
  13012 
  13013  if (lhs == rhs) {
  13014    if (lhs != out) {
  13015      masm.moveDouble(lhs, out);
  13016    }
  13017    return;
  13018  }
  13019 
  13020  masm.copySignDouble(lhs, rhs, out);
  13021 }
  13022 
  13023 void CodeGenerator::visitCompareS(LCompareS* lir) {
  13024  JSOp op = lir->mir()->jsop();
  13025  Register left = ToRegister(lir->left());
  13026  Register right = ToRegister(lir->right());
  13027  Register output = ToRegister(lir->output());
  13028 
  13029  OutOfLineCode* ool = nullptr;
  13030 
  13031  using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
  13032  if (op == JSOp::Eq || op == JSOp::StrictEq) {
  13033    ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
  13034        lir, ArgList(left, right), StoreRegisterTo(output));
  13035  } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
  13036    ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
  13037        lir, ArgList(left, right), StoreRegisterTo(output));
  13038  } else if (op == JSOp::Lt) {
  13039    ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
  13040        lir, ArgList(left, right), StoreRegisterTo(output));
  13041  } else if (op == JSOp::Le) {
  13042    // Push the operands in reverse order for JSOp::Le:
  13043    // - |left <= right| is implemented as |right >= left|.
  13044    ool =
  13045        oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
  13046            lir, ArgList(right, left), StoreRegisterTo(output));
  13047  } else if (op == JSOp::Gt) {
  13048    // Push the operands in reverse order for JSOp::Gt:
  13049    // - |left > right| is implemented as |right < left|.
  13050    ool = oolCallVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(
  13051        lir, ArgList(right, left), StoreRegisterTo(output));
  13052  } else {
  13053    MOZ_ASSERT(op == JSOp::Ge);
  13054    ool =
  13055        oolCallVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(
  13056            lir, ArgList(left, right), StoreRegisterTo(output));
  13057  }
  13058 
  13059  masm.compareStrings(op, left, right, output, ool->entry());
  13060 
  13061  masm.bind(ool->rejoin());
  13062 }
  13063 
  13064 void CodeGenerator::visitCompareSInline(LCompareSInline* lir) {
  13065  JSOp op = lir->mir()->jsop();
  13066  MOZ_ASSERT(IsEqualityOp(op));
  13067 
  13068  Register input = ToRegister(lir->input());
  13069  Register output = ToRegister(lir->output());
  13070 
  13071  const JSOffThreadAtom* str = lir->constant();
  13072  MOZ_ASSERT(str->length() > 0);
  13073 
  13074  OutOfLineCode* ool = nullptr;
  13075 
  13076  using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
  13077  if (op == JSOp::Eq || op == JSOp::StrictEq) {
  13078    ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(
  13079        lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
  13080  } else {
  13081    MOZ_ASSERT(op == JSOp::Ne || op == JSOp::StrictNe);
  13082    ool = oolCallVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(
  13083        lir, ArgList(ImmGCPtr(str), input), StoreRegisterTo(output));
  13084  }
  13085 
  13086  Label compareChars;
  13087  {
  13088    Label notPointerEqual;
  13089 
  13090    // If operands point to the same instance, the strings are trivially equal.
  13091    masm.branchPtr(Assembler::NotEqual, input, ImmGCPtr(str), &notPointerEqual);
  13092    masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
  13093    masm.jump(ool->rejoin());
  13094 
  13095    masm.bind(&notPointerEqual);
  13096 
  13097    Label setNotEqualResult;
  13098 
  13099    if (str->isAtom()) {
  13100      // Atoms cannot be equal to each other if they point to different strings.
  13101      Imm32 atomBit(JSString::ATOM_BIT);
  13102      masm.branchTest32(Assembler::NonZero,
  13103                        Address(input, JSString::offsetOfFlags()), atomBit,
  13104                        &setNotEqualResult);
  13105    }
  13106 
  13107    if (str->hasTwoByteChars()) {
  13108      // Pure two-byte strings can't be equal to Latin-1 strings.
  13109      JS::AutoCheckCannotGC nogc;
  13110      if (!mozilla::IsUtf16Latin1(str->twoByteRange(nogc))) {
  13111        masm.branchLatin1String(input, &setNotEqualResult);
  13112      }
  13113    }
  13114 
  13115    // Strings of different length can never be equal.
  13116    masm.branch32(Assembler::NotEqual,
  13117                  Address(input, JSString::offsetOfLength()),
  13118                  Imm32(str->length()), &setNotEqualResult);
  13119 
  13120    if (str->isAtom()) {
  13121      Label forwardedPtrEqual;
  13122      masm.tryFastAtomize(input, output, output, &compareChars);
  13123 
  13124      // We now have two atoms. Just check pointer equality.
  13125      masm.branchPtr(Assembler::Equal, output, ImmGCPtr(str),
  13126                     &forwardedPtrEqual);
  13127 
  13128      masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
  13129      masm.jump(ool->rejoin());
  13130 
  13131      masm.bind(&forwardedPtrEqual);
  13132      masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
  13133      masm.jump(ool->rejoin());
  13134    } else {
  13135      masm.jump(&compareChars);
  13136    }
  13137 
  13138    masm.bind(&setNotEqualResult);
  13139    masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
  13140    masm.jump(ool->rejoin());
  13141  }
  13142 
  13143  masm.bind(&compareChars);
  13144 
  13145  // Load the input string's characters.
  13146  Register stringChars = output;
  13147  masm.loadStringCharsForCompare(input, str, stringChars, ool->entry());
  13148 
  13149  // Start comparing character by character.
  13150  masm.compareStringChars(op, stringChars, str, output);
  13151 
  13152  masm.bind(ool->rejoin());
  13153 }
  13154 
  13155 void CodeGenerator::visitCompareSSingle(LCompareSSingle* lir) {
  13156  JSOp op = lir->jsop();
  13157  MOZ_ASSERT(IsRelationalOp(op));
  13158 
  13159  Register input = ToRegister(lir->input());
  13160  Register output = ToRegister(lir->output());
  13161  Register temp = ToRegister(lir->temp0());
  13162 
  13163  const JSOffThreadAtom* str = lir->constant();
  13164  MOZ_ASSERT(str->length() == 1);
  13165 
  13166  char16_t ch = str->latin1OrTwoByteChar(0);
  13167 
  13168  masm.movePtr(input, temp);
  13169 
  13170  // Check if the string is empty.
  13171  Label compareLength;
  13172  masm.branch32(Assembler::Equal, Address(temp, JSString::offsetOfLength()),
  13173                Imm32(0), &compareLength);
  13174 
  13175  // The first character is in the left-most rope child.
  13176  Label notRope;
  13177  masm.branchIfNotRope(temp, &notRope);
  13178  {
  13179    // Unwind ropes at the start if possible.
  13180    Label unwindRope;
  13181    masm.bind(&unwindRope);
  13182    masm.loadRopeLeftChild(temp, output);
  13183    masm.movePtr(output, temp);
  13184 
  13185 #ifdef DEBUG
  13186    Label notEmpty;
  13187    masm.branch32(Assembler::NotEqual,
  13188                  Address(temp, JSString::offsetOfLength()), Imm32(0),
  13189                  &notEmpty);
  13190    masm.assumeUnreachable("rope children are non-empty");
  13191    masm.bind(&notEmpty);
  13192 #endif
  13193 
  13194    // Otherwise keep unwinding ropes.
  13195    masm.branchIfRope(temp, &unwindRope);
  13196  }
  13197  masm.bind(&notRope);
  13198 
  13199  // Load the first character into |output|.
  13200  auto loadFirstChar = [&](auto encoding) {
  13201    masm.loadStringChars(temp, output, encoding);
  13202    masm.loadChar(Address(output, 0), output, encoding);
  13203  };
  13204 
  13205  Label done;
  13206  if (ch <= JSString::MAX_LATIN1_CHAR) {
  13207    // Handle both encodings when the search character is Latin-1.
  13208    Label twoByte, compare;
  13209    masm.branchTwoByteString(temp, &twoByte);
  13210 
  13211    loadFirstChar(CharEncoding::Latin1);
  13212    masm.jump(&compare);
  13213 
  13214    masm.bind(&twoByte);
  13215    loadFirstChar(CharEncoding::TwoByte);
  13216 
  13217    masm.bind(&compare);
  13218  } else {
  13219    // The search character is a two-byte character, so it can't be equal to any
  13220    // character of a Latin-1 string.
  13221    masm.move32(Imm32(int32_t(op == JSOp::Lt || op == JSOp::Le)), output);
  13222    masm.branchLatin1String(temp, &done);
  13223 
  13224    loadFirstChar(CharEncoding::TwoByte);
  13225  }
  13226 
  13227  // Compare the string length when the search character is equal to the
  13228  // input's first character.
  13229  masm.branch32(Assembler::Equal, output, Imm32(ch), &compareLength);
  13230 
  13231  // Otherwise compute the result and jump to the end.
  13232  masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false), output, Imm32(ch),
  13233                output);
  13234  masm.jump(&done);
  13235 
  13236  // Compare the string length to compute the overall result.
  13237  masm.bind(&compareLength);
  13238  masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
  13239                Address(input, JSString::offsetOfLength()), Imm32(1), output);
  13240 
  13241  masm.bind(&done);
  13242 }
  13243 
  13244 void CodeGenerator::visitCompareBigInt(LCompareBigInt* lir) {
  13245  JSOp op = lir->mir()->jsop();
  13246  Register left = ToRegister(lir->left());
  13247  Register right = ToRegister(lir->right());
  13248  Register temp0 = ToRegister(lir->temp0());
  13249  Register temp1 = ToRegister(lir->temp1());
  13250  Register temp2 = ToRegister(lir->temp2());
  13251  Register output = ToRegister(lir->output());
  13252 
  13253  Label notSame;
  13254  Label compareSign;
  13255  Label compareLength;
  13256  Label compareDigit;
  13257 
  13258  Label* notSameSign;
  13259  Label* notSameLength;
  13260  Label* notSameDigit;
  13261  if (IsEqualityOp(op)) {
  13262    notSameSign = &notSame;
  13263    notSameLength = &notSame;
  13264    notSameDigit = &notSame;
  13265  } else {
  13266    notSameSign = &compareSign;
  13267    notSameLength = &compareLength;
  13268    notSameDigit = &compareDigit;
  13269  }
  13270 
  13271  masm.equalBigInts(left, right, temp0, temp1, temp2, output, notSameSign,
  13272                    notSameLength, notSameDigit);
  13273 
  13274  Label done;
  13275  masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
  13276                    op == JSOp::Ge),
  13277              output);
  13278  masm.jump(&done);
  13279 
  13280  if (IsEqualityOp(op)) {
  13281    masm.bind(&notSame);
  13282    masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
  13283  } else {
  13284    Label invertWhenNegative;
  13285 
  13286    // There are two cases when sign(left) != sign(right):
  13287    // 1. sign(left) = positive and sign(right) = negative,
  13288    // 2. or the dual case with reversed signs.
  13289    //
  13290    // For case 1, |left| <cmp> |right| is true for cmp=Gt or cmp=Ge and false
  13291    // for cmp=Lt or cmp=Le. Initialize the result for case 1 and handle case 2
  13292    // with |invertWhenNegative|.
  13293    masm.bind(&compareSign);
  13294    masm.move32(Imm32(op == JSOp::Gt || op == JSOp::Ge), output);
  13295    masm.jump(&invertWhenNegative);
  13296 
  13297    // For sign(left) = sign(right) and len(digits(left)) != len(digits(right)),
  13298    // we have to consider the two cases:
  13299    // 1. len(digits(left)) < len(digits(right))
  13300    // 2. len(digits(left)) > len(digits(right))
  13301    //
  13302    // For |left| <cmp> |right| with cmp=Lt:
  13303    // Assume both BigInts are positive, then |left < right| is true for case 1
  13304    // and false for case 2. When both are negative, the result is reversed.
  13305    //
  13306    // The other comparison operators can be handled similarly.
  13307    //
  13308    // |temp0| holds the digits length of the right-hand side operand.
  13309    masm.bind(&compareLength);
  13310    masm.cmp32Set(JSOpToCondition(op, /* isSigned = */ false),
  13311                  Address(left, BigInt::offsetOfLength()), temp0, output);
  13312    masm.jump(&invertWhenNegative);
  13313 
  13314    // Similar to the case above, compare the current digit to determine the
  13315    // overall comparison result.
  13316    //
  13317    // |temp1| points to the current digit of the left-hand side operand.
  13318    // |output| holds the current digit of the right-hand side operand.
  13319    masm.bind(&compareDigit);
  13320    masm.cmpPtrSet(JSOpToCondition(op, /* isSigned = */ false),
  13321                   Address(temp1, 0), output, output);
  13322 
  13323    Label nonNegative;
  13324    masm.bind(&invertWhenNegative);
  13325    masm.branchIfBigIntIsNonNegative(left, &nonNegative);
  13326    masm.xor32(Imm32(1), output);
  13327    masm.bind(&nonNegative);
  13328  }
  13329 
  13330  masm.bind(&done);
  13331 }
  13332 
  13333 void CodeGenerator::visitCompareBigIntInt32(LCompareBigIntInt32* lir) {
  13334  JSOp op = lir->mir()->jsop();
  13335  Register left = ToRegister(lir->left());
  13336  Register temp0 = ToRegister(lir->temp0());
  13337  Register temp1 = ToTempRegisterOrInvalid(lir->temp1());
  13338  Register output = ToRegister(lir->output());
  13339 
  13340  Label ifTrue, ifFalse;
  13341  if (lir->right()->isConstant()) {
  13342    MOZ_ASSERT(temp1 == InvalidReg);
  13343 
  13344    Imm32 right = Imm32(ToInt32(lir->right()));
  13345    masm.compareBigIntAndInt32(op, left, right, temp0, &ifTrue, &ifFalse);
  13346  } else {
  13347    MOZ_ASSERT(temp1 != InvalidReg);
  13348 
  13349    Register right = ToRegister(lir->right());
  13350    masm.compareBigIntAndInt32(op, left, right, temp0, temp1, &ifTrue,
  13351                               &ifFalse);
  13352  }
  13353 
  13354  Label done;
  13355  masm.bind(&ifFalse);
  13356  masm.move32(Imm32(0), output);
  13357  masm.jump(&done);
  13358  masm.bind(&ifTrue);
  13359  masm.move32(Imm32(1), output);
  13360  masm.bind(&done);
  13361 }
  13362 
  13363 void CodeGenerator::visitCompareBigIntInt32AndBranch(
  13364    LCompareBigIntInt32AndBranch* lir) {
  13365  JSOp op = lir->cmpMir()->jsop();
  13366  Register left = ToRegister(lir->left());
  13367  Register temp1 = ToRegister(lir->temp0());
  13368  Register temp2 = ToTempRegisterOrInvalid(lir->temp1());
  13369 
  13370  Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
  13371  Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
  13372 
  13373  // compareBigIntAndInt32 falls through to the false case. If the next block
  13374  // is the true case, negate the comparison so we can fall through.
  13375  if (isNextBlock(lir->ifTrue()->lir())) {
  13376    op = NegateCompareOp(op);
  13377    std::swap(ifTrue, ifFalse);
  13378  }
  13379 
  13380  if (lir->right()->isConstant()) {
  13381    MOZ_ASSERT(temp2 == InvalidReg);
  13382 
  13383    Imm32 right = Imm32(ToInt32(lir->right()));
  13384    masm.compareBigIntAndInt32(op, left, right, temp1, ifTrue, ifFalse);
  13385  } else {
  13386    MOZ_ASSERT(temp2 != InvalidReg);
  13387 
  13388    Register right = ToRegister(lir->right());
  13389    masm.compareBigIntAndInt32(op, left, right, temp1, temp2, ifTrue, ifFalse);
  13390  }
  13391 
  13392  if (!isNextBlock(lir->ifTrue()->lir())) {
  13393    jumpToBlock(lir->ifFalse());
  13394  }
  13395 }
  13396 
  13397 void CodeGenerator::visitCompareBigIntDouble(LCompareBigIntDouble* lir) {
  13398  JSOp op = lir->mir()->jsop();
  13399  Register left = ToRegister(lir->left());
  13400  FloatRegister right = ToFloatRegister(lir->right());
  13401  Register output = ToRegister(lir->output());
  13402 
  13403  masm.setupAlignedABICall();
  13404 
  13405  // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
  13406  // - |left <= right| is implemented as |right >= left|.
  13407  // - |left > right| is implemented as |right < left|.
  13408  if (op == JSOp::Le || op == JSOp::Gt) {
  13409    masm.passABIArg(right, ABIType::Float64);
  13410    masm.passABIArg(left);
  13411  } else {
  13412    masm.passABIArg(left);
  13413    masm.passABIArg(right, ABIType::Float64);
  13414  }
  13415 
  13416  using FnBigIntNumber = bool (*)(BigInt*, double);
  13417  using FnNumberBigInt = bool (*)(double, BigInt*);
  13418  switch (op) {
  13419    case JSOp::Eq: {
  13420      masm.callWithABI<FnBigIntNumber,
  13421                       jit::BigIntNumberEqual<EqualityKind::Equal>>();
  13422      break;
  13423    }
  13424    case JSOp::Ne: {
  13425      masm.callWithABI<FnBigIntNumber,
  13426                       jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
  13427      break;
  13428    }
  13429    case JSOp::Lt: {
  13430      masm.callWithABI<FnBigIntNumber,
  13431                       jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
  13432      break;
  13433    }
  13434    case JSOp::Gt: {
  13435      masm.callWithABI<FnNumberBigInt,
  13436                       jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
  13437      break;
  13438    }
  13439    case JSOp::Le: {
  13440      masm.callWithABI<
  13441          FnNumberBigInt,
  13442          jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
  13443      break;
  13444    }
  13445    case JSOp::Ge: {
  13446      masm.callWithABI<
  13447          FnBigIntNumber,
  13448          jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
  13449      break;
  13450    }
  13451    default:
  13452      MOZ_CRASH("unhandled op");
  13453  }
  13454 
  13455  masm.storeCallBoolResult(output);
  13456 }
  13457 
  13458 void CodeGenerator::visitCompareBigIntString(LCompareBigIntString* lir) {
  13459  JSOp op = lir->mir()->jsop();
  13460  Register left = ToRegister(lir->left());
  13461  Register right = ToRegister(lir->right());
  13462 
  13463  // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
  13464  // - |left <= right| is implemented as |right >= left|.
  13465  // - |left > right| is implemented as |right < left|.
  13466  if (op == JSOp::Le || op == JSOp::Gt) {
  13467    pushArg(left);
  13468    pushArg(right);
  13469  } else {
  13470    pushArg(right);
  13471    pushArg(left);
  13472  }
  13473 
  13474  using FnBigIntString =
  13475      bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
  13476  using FnStringBigInt =
  13477      bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
  13478 
  13479  switch (op) {
  13480    case JSOp::Eq: {
  13481      constexpr auto Equal = EqualityKind::Equal;
  13482      callVM<FnBigIntString, BigIntStringEqual<Equal>>(lir);
  13483      break;
  13484    }
  13485    case JSOp::Ne: {
  13486      constexpr auto NotEqual = EqualityKind::NotEqual;
  13487      callVM<FnBigIntString, BigIntStringEqual<NotEqual>>(lir);
  13488      break;
  13489    }
  13490    case JSOp::Lt: {
  13491      constexpr auto LessThan = ComparisonKind::LessThan;
  13492      callVM<FnBigIntString, BigIntStringCompare<LessThan>>(lir);
  13493      break;
  13494    }
  13495    case JSOp::Gt: {
  13496      constexpr auto LessThan = ComparisonKind::LessThan;
  13497      callVM<FnStringBigInt, StringBigIntCompare<LessThan>>(lir);
  13498      break;
  13499    }
  13500    case JSOp::Le: {
  13501      constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
  13502      callVM<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>(lir);
  13503      break;
  13504    }
  13505    case JSOp::Ge: {
  13506      constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
  13507      callVM<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>(lir);
  13508      break;
  13509    }
  13510    default:
  13511      MOZ_CRASH("Unexpected compare op");
  13512  }
  13513 }
  13514 
  13515 void CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir) {
  13516  MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
  13517             lir->mir()->compareType() == MCompare::Compare_Null);
  13518 
  13519  JSOp op = lir->mir()->jsop();
  13520  MOZ_ASSERT(IsLooseEqualityOp(op));
  13521 
  13522  ValueOperand value = ToValue(lir->value());
  13523  Register output = ToRegister(lir->output());
  13524 
  13525  bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
  13526  if (!intact) {
  13527    auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
  13528    addOutOfLineCode(ool, lir->mir());
  13529 
  13530    Label* nullOrLikeUndefined = ool->label1();
  13531    Label* notNullOrLikeUndefined = ool->label2();
  13532 
  13533    {
  13534      ScratchTagScope tag(masm, value);
  13535      masm.splitTagForTest(value, tag);
  13536 
  13537      masm.branchTestNull(Assembler::Equal, tag, nullOrLikeUndefined);
  13538      masm.branchTestUndefined(Assembler::Equal, tag, nullOrLikeUndefined);
  13539 
  13540      // Check whether it's a truthy object or a falsy object that emulates
  13541      // undefined.
  13542      masm.branchTestObject(Assembler::NotEqual, tag, notNullOrLikeUndefined);
  13543    }
  13544 
  13545    Register objreg =
  13546        masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
  13547    branchTestObjectEmulatesUndefined(objreg, nullOrLikeUndefined,
  13548                                      notNullOrLikeUndefined, output, ool);
  13549    // fall through
  13550 
  13551    Label done;
  13552 
  13553    // It's not null or undefined, and if it's an object it doesn't
  13554    // emulate undefined, so it's not like undefined.
  13555    masm.move32(Imm32(op == JSOp::Ne), output);
  13556    masm.jump(&done);
  13557 
  13558    masm.bind(nullOrLikeUndefined);
  13559    masm.move32(Imm32(op == JSOp::Eq), output);
  13560 
  13561    // Both branches meet here.
  13562    masm.bind(&done);
  13563  } else {
  13564    Label nullOrUndefined, notNullOrLikeUndefined;
  13565 #if defined(DEBUG) || defined(FUZZING)
  13566    Register objreg = Register::Invalid();
  13567 #endif
  13568    {
  13569      ScratchTagScope tag(masm, value);
  13570      masm.splitTagForTest(value, tag);
  13571 
  13572      masm.branchTestNull(Assembler::Equal, tag, &nullOrUndefined);
  13573      masm.branchTestUndefined(Assembler::Equal, tag, &nullOrUndefined);
  13574 
  13575 #if defined(DEBUG) || defined(FUZZING)
  13576      // Check whether it's a truthy object or a falsy object that emulates
  13577      // undefined.
  13578      masm.branchTestObject(Assembler::NotEqual, tag, &notNullOrLikeUndefined);
  13579      objreg = masm.extractObject(value, ToTempUnboxRegister(lir->temp0()));
  13580 #endif
  13581    }
  13582 
  13583 #if defined(DEBUG) || defined(FUZZING)
  13584    assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
  13585    masm.bind(&notNullOrLikeUndefined);
  13586 #endif
  13587 
  13588    Label done;
  13589 
  13590    // It's not null or undefined, and if it's an object it doesn't
  13591    // emulate undefined.
  13592    masm.move32(Imm32(op == JSOp::Ne), output);
  13593    masm.jump(&done);
  13594 
  13595    masm.bind(&nullOrUndefined);
  13596    masm.move32(Imm32(op == JSOp::Eq), output);
  13597 
  13598    // Both branches meet here.
  13599    masm.bind(&done);
  13600  }
  13601 }
  13602 
  13603 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchV(
  13604    LIsNullOrLikeUndefinedAndBranchV* lir) {
  13605  MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
  13606             lir->cmpMir()->compareType() == MCompare::Compare_Null);
  13607 
  13608  JSOp op = lir->cmpMir()->jsop();
  13609  MOZ_ASSERT(IsLooseEqualityOp(op));
  13610 
  13611  ValueOperand value = ToValue(lir->value());
  13612 
  13613  MBasicBlock* ifTrue = lir->ifTrue();
  13614  MBasicBlock* ifFalse = lir->ifFalse();
  13615 
  13616  if (op == JSOp::Ne) {
  13617    // Swap branches.
  13618    std::swap(ifTrue, ifFalse);
  13619  }
  13620 
  13621  bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
  13622 
  13623  Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
  13624  Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
  13625 
  13626  bool extractObject = !intact;
  13627  Register objreg = Register::Invalid();
  13628 #if defined(DEBUG) || defined(FUZZING)
  13629  // always extract objreg if we're in debug and
  13630  // assertObjectDoesNotEmulateUndefined;
  13631  extractObject = true;
  13632 #endif
  13633 
  13634  {
  13635    ScratchTagScope tag(masm, value);
  13636    masm.splitTagForTest(value, tag);
  13637 
  13638    masm.branchTestNull(Assembler::Equal, tag, ifTrueLabel);
  13639    masm.branchTestUndefined(Assembler::Equal, tag, ifTrueLabel);
  13640 
  13641    if (extractObject) {
  13642      masm.branchTestObject(Assembler::NotEqual, tag, ifFalseLabel);
  13643      objreg = masm.extractObject(value, ToTempUnboxRegister(lir->temp1()));
  13644    }
  13645  }
  13646 
  13647  Register scratch = ToRegister(lir->temp0());
  13648  if (!intact) {
  13649    // Objects that emulate undefined are loosely equal to null/undefined.
  13650    OutOfLineTestObject* ool = new (alloc()) OutOfLineTestObject();
  13651    addOutOfLineCode(ool, lir->cmpMir());
  13652    testObjectEmulatesUndefined(objreg, ifTrueLabel, ifFalseLabel, scratch,
  13653                                ool);
  13654  } else {
  13655    assertObjectDoesNotEmulateUndefined(objreg, scratch, lir->cmpMir());
  13656    // Bug 1874905. This would be nice to optimize out at the MIR level.
  13657    if (!isNextBlock(ifFalse->lir())) {
  13658      masm.jump(ifFalseLabel);
  13659    }
  13660  }
  13661 }
  13662 
  13663 void CodeGenerator::visitIsNullOrLikeUndefinedT(LIsNullOrLikeUndefinedT* lir) {
  13664  MOZ_ASSERT(lir->mir()->compareType() == MCompare::Compare_Undefined ||
  13665             lir->mir()->compareType() == MCompare::Compare_Null);
  13666  MOZ_ASSERT(lir->mir()->lhs()->type() == MIRType::Object);
  13667 
  13668  bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
  13669  JSOp op = lir->mir()->jsop();
  13670  Register output = ToRegister(lir->output());
  13671  Register objreg = ToRegister(lir->input());
  13672  if (!intact) {
  13673    MOZ_ASSERT(IsLooseEqualityOp(op),
  13674               "Strict equality should have been folded");
  13675 
  13676    auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
  13677    addOutOfLineCode(ool, lir->mir());
  13678 
  13679    Label* emulatesUndefined = ool->label1();
  13680    Label* doesntEmulateUndefined = ool->label2();
  13681 
  13682    branchTestObjectEmulatesUndefined(objreg, emulatesUndefined,
  13683                                      doesntEmulateUndefined, output, ool);
  13684 
  13685    Label done;
  13686 
  13687    masm.move32(Imm32(op == JSOp::Ne), output);
  13688    masm.jump(&done);
  13689 
  13690    masm.bind(emulatesUndefined);
  13691    masm.move32(Imm32(op == JSOp::Eq), output);
  13692    masm.bind(&done);
  13693  } else {
  13694    assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
  13695    masm.move32(Imm32(op == JSOp::Ne), output);
  13696  }
  13697 }
  13698 
  13699 void CodeGenerator::visitIsNullOrLikeUndefinedAndBranchT(
  13700    LIsNullOrLikeUndefinedAndBranchT* lir) {
  13701  MOZ_ASSERT(lir->cmpMir()->compareType() == MCompare::Compare_Undefined ||
  13702             lir->cmpMir()->compareType() == MCompare::Compare_Null);
  13703  MOZ_ASSERT(lir->cmpMir()->lhs()->type() == MIRType::Object);
  13704 
  13705  bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
  13706 
  13707  JSOp op = lir->cmpMir()->jsop();
  13708  MOZ_ASSERT(IsLooseEqualityOp(op), "Strict equality should have been folded");
  13709 
  13710  MBasicBlock* ifTrue = lir->ifTrue();
  13711  MBasicBlock* ifFalse = lir->ifFalse();
  13712 
  13713  if (op == JSOp::Ne) {
  13714    // Swap branches.
  13715    std::swap(ifTrue, ifFalse);
  13716  }
  13717 
  13718  Register input = ToRegister(lir->value());
  13719  Register scratch = ToRegister(lir->temp0());
  13720  Label* ifTrueLabel = getJumpLabelForBranch(ifTrue);
  13721  Label* ifFalseLabel = getJumpLabelForBranch(ifFalse);
  13722 
  13723  if (intact) {
  13724    // Bug 1874905. Ideally branches like this would be optimized out.
  13725    assertObjectDoesNotEmulateUndefined(input, scratch, lir->mir());
  13726    masm.jump(ifFalseLabel);
  13727  } else {
  13728    auto* ool = new (alloc()) OutOfLineTestObject();
  13729    addOutOfLineCode(ool, lir->cmpMir());
  13730 
  13731    // Objects that emulate undefined are loosely equal to null/undefined.
  13732    testObjectEmulatesUndefined(input, ifTrueLabel, ifFalseLabel, scratch, ool);
  13733  }
  13734 }
  13735 
  13736 void CodeGenerator::visitIsNull(LIsNull* lir) {
  13737  MCompare::CompareType compareType = lir->mir()->compareType();
  13738  MOZ_ASSERT(compareType == MCompare::Compare_Null);
  13739 
  13740  JSOp op = lir->mir()->jsop();
  13741  MOZ_ASSERT(IsStrictEqualityOp(op));
  13742 
  13743  ValueOperand value = ToValue(lir->value());
  13744  Register output = ToRegister(lir->output());
  13745 
  13746  Assembler::Condition cond = JSOpToCondition(compareType, op);
  13747  masm.testNullSet(cond, value, output);
  13748 }
  13749 
  13750 void CodeGenerator::visitIsUndefined(LIsUndefined* lir) {
  13751  MCompare::CompareType compareType = lir->mir()->compareType();
  13752  MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
  13753 
  13754  JSOp op = lir->mir()->jsop();
  13755  MOZ_ASSERT(IsStrictEqualityOp(op));
  13756 
  13757  ValueOperand value = ToValue(lir->value());
  13758  Register output = ToRegister(lir->output());
  13759 
  13760  Assembler::Condition cond = JSOpToCondition(compareType, op);
  13761  masm.testUndefinedSet(cond, value, output);
  13762 }
  13763 
  13764 void CodeGenerator::visitIsNullAndBranch(LIsNullAndBranch* lir) {
  13765  MCompare::CompareType compareType = lir->cmpMir()->compareType();
  13766  MOZ_ASSERT(compareType == MCompare::Compare_Null);
  13767 
  13768  JSOp op = lir->cmpMir()->jsop();
  13769  MOZ_ASSERT(IsStrictEqualityOp(op));
  13770 
  13771  ValueOperand value = ToValue(lir->value());
  13772 
  13773  Assembler::Condition cond = JSOpToCondition(compareType, op);
  13774 
  13775  MBasicBlock* ifTrue = lir->ifTrue();
  13776  MBasicBlock* ifFalse = lir->ifFalse();
  13777 
  13778  if (isNextBlock(ifFalse->lir())) {
  13779    masm.branchTestNull(cond, value, getJumpLabelForBranch(ifTrue));
  13780  } else {
  13781    masm.branchTestNull(Assembler::InvertCondition(cond), value,
  13782                        getJumpLabelForBranch(ifFalse));
  13783    jumpToBlock(ifTrue);
  13784  }
  13785 }
  13786 
  13787 void CodeGenerator::visitIsUndefinedAndBranch(LIsUndefinedAndBranch* lir) {
  13788  MCompare::CompareType compareType = lir->cmpMir()->compareType();
  13789  MOZ_ASSERT(compareType == MCompare::Compare_Undefined);
  13790 
  13791  JSOp op = lir->cmpMir()->jsop();
  13792  MOZ_ASSERT(IsStrictEqualityOp(op));
  13793 
  13794  ValueOperand value = ToValue(lir->value());
  13795 
  13796  Assembler::Condition cond = JSOpToCondition(compareType, op);
  13797 
  13798  MBasicBlock* ifTrue = lir->ifTrue();
  13799  MBasicBlock* ifFalse = lir->ifFalse();
  13800 
  13801  if (isNextBlock(ifFalse->lir())) {
  13802    masm.branchTestUndefined(cond, value, getJumpLabelForBranch(ifTrue));
  13803  } else {
  13804    masm.branchTestUndefined(Assembler::InvertCondition(cond), value,
  13805                             getJumpLabelForBranch(ifFalse));
  13806    jumpToBlock(ifTrue);
  13807  }
  13808 }
  13809 
  13810 void CodeGenerator::visitSameValueDouble(LSameValueDouble* lir) {
  13811  FloatRegister left = ToFloatRegister(lir->left());
  13812  FloatRegister right = ToFloatRegister(lir->right());
  13813  FloatRegister temp = ToFloatRegister(lir->temp0());
  13814  Register output = ToRegister(lir->output());
  13815 
  13816  masm.sameValueDouble(left, right, temp, output);
  13817 }
  13818 
  13819 void CodeGenerator::visitSameValue(LSameValue* lir) {
  13820  ValueOperand lhs = ToValue(lir->left());
  13821  ValueOperand rhs = ToValue(lir->right());
  13822  Register output = ToRegister(lir->output());
  13823 
  13824  using Fn = bool (*)(JSContext*, const Value&, const Value&, bool*);
  13825  OutOfLineCode* ool =
  13826      oolCallVM<Fn, SameValue>(lir, ArgList(lhs, rhs), StoreRegisterTo(output));
  13827 
  13828  // First check to see if the values have identical bits.
  13829  // This is correct for SameValue because SameValue(NaN,NaN) is true,
  13830  // and SameValue(0,-0) is false.
  13831  masm.branch64(Assembler::NotEqual, lhs.toRegister64(), rhs.toRegister64(),
  13832                ool->entry());
  13833  masm.move32(Imm32(1), output);
  13834 
  13835  // If this fails, call SameValue.
  13836  masm.bind(ool->rejoin());
  13837 }
  13838 
  13839 void CodeGenerator::emitConcat(LInstruction* lir, Register lhs, Register rhs,
  13840                               Register output) {
  13841  using Fn =
  13842      JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
  13843  OutOfLineCode* ool = oolCallVM<Fn, ConcatStrings<CanGC>>(
  13844      lir, ArgList(lhs, rhs, static_cast<Imm32>(int32_t(gc::Heap::Default))),
  13845      StoreRegisterTo(output));
  13846 
  13847  JitCode* stringConcatStub =
  13848      snapshot_->getZoneStub(JitZone::StubKind::StringConcat);
  13849  masm.call(stringConcatStub);
  13850  masm.branchTestPtr(Assembler::Zero, output, output, ool->entry());
  13851 
  13852  masm.bind(ool->rejoin());
  13853 }
  13854 
  13855 void CodeGenerator::visitConcat(LConcat* lir) {
  13856  Register lhs = ToRegister(lir->lhs());
  13857  Register rhs = ToRegister(lir->rhs());
  13858 
  13859  Register output = ToRegister(lir->output());
  13860 
  13861  MOZ_ASSERT(lhs == CallTempReg0);
  13862  MOZ_ASSERT(rhs == CallTempReg1);
  13863  MOZ_ASSERT(ToRegister(lir->temp0()) == CallTempReg0);
  13864  MOZ_ASSERT(ToRegister(lir->temp1()) == CallTempReg1);
  13865  MOZ_ASSERT(ToRegister(lir->temp2()) == CallTempReg2);
  13866  MOZ_ASSERT(ToRegister(lir->temp3()) == CallTempReg3);
  13867  MOZ_ASSERT(ToRegister(lir->temp4()) == CallTempReg4);
  13868  MOZ_ASSERT(output == CallTempReg5);
  13869 
  13870  emitConcat(lir, lhs, rhs, output);
  13871 }
  13872 
  13873 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
  13874                            Register len, Register byteOpScratch,
  13875                            CharEncoding fromEncoding, CharEncoding toEncoding,
  13876                            size_t maximumLength = SIZE_MAX) {
  13877  // Copy |len| char16_t code units from |from| to |to|. Assumes len > 0
  13878  // (checked below in debug builds), and when done |to| must point to the
  13879  // next available char.
  13880 
  13881 #ifdef DEBUG
  13882  Label ok;
  13883  masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
  13884  masm.assumeUnreachable("Length should be greater than 0.");
  13885  masm.bind(&ok);
  13886 
  13887  if (maximumLength != SIZE_MAX) {
  13888    MOZ_ASSERT(maximumLength <= INT32_MAX, "maximum length fits into int32");
  13889 
  13890    Label ok;
  13891    masm.branchPtr(Assembler::BelowOrEqual, len, Imm32(maximumLength), &ok);
  13892    masm.assumeUnreachable("Length should not exceed maximum length.");
  13893    masm.bind(&ok);
  13894  }
  13895 #endif
  13896 
  13897  MOZ_ASSERT_IF(toEncoding == CharEncoding::Latin1,
  13898                fromEncoding == CharEncoding::Latin1);
  13899 
  13900  size_t fromWidth =
  13901      fromEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
  13902  size_t toWidth =
  13903      toEncoding == CharEncoding::Latin1 ? sizeof(char) : sizeof(char16_t);
  13904 
  13905  // Try to copy multiple characters at once when both encoding are equal.
  13906  if (fromEncoding == toEncoding) {
  13907    constexpr size_t ptrWidth = sizeof(uintptr_t);
  13908 
  13909    // Copy |width| bytes and then adjust |from| and |to|.
  13910    auto copyCharacters = [&](size_t width) {
  13911      static_assert(ptrWidth <= 8, "switch handles only up to eight bytes");
  13912 
  13913      switch (width) {
  13914        case 1:
  13915          masm.load8ZeroExtend(Address(from, 0), byteOpScratch);
  13916          masm.store8(byteOpScratch, Address(to, 0));
  13917          break;
  13918        case 2:
  13919          masm.load16ZeroExtend(Address(from, 0), byteOpScratch);
  13920          masm.store16(byteOpScratch, Address(to, 0));
  13921          break;
  13922        case 4:
  13923          masm.load32(Address(from, 0), byteOpScratch);
  13924          masm.store32(byteOpScratch, Address(to, 0));
  13925          break;
  13926        case 8:
  13927          MOZ_ASSERT(width == ptrWidth);
  13928          masm.loadPtr(Address(from, 0), byteOpScratch);
  13929          masm.storePtr(byteOpScratch, Address(to, 0));
  13930          break;
  13931      }
  13932 
  13933      masm.addPtr(Imm32(width), from);
  13934      masm.addPtr(Imm32(width), to);
  13935    };
  13936 
  13937    // First align |len| to pointer width.
  13938    Label done;
  13939    for (size_t width = fromWidth; width < ptrWidth; width *= 2) {
  13940      // Number of characters which fit into |width| bytes.
  13941      size_t charsPerWidth = width / fromWidth;
  13942 
  13943      if (charsPerWidth < maximumLength) {
  13944        Label next;
  13945        masm.branchTest32(Assembler::Zero, len, Imm32(charsPerWidth), &next);
  13946 
  13947        copyCharacters(width);
  13948 
  13949        masm.branchSub32(Assembler::Zero, Imm32(charsPerWidth), len, &done);
  13950        masm.bind(&next);
  13951      } else if (charsPerWidth == maximumLength) {
  13952        copyCharacters(width);
  13953        masm.sub32(Imm32(charsPerWidth), len);
  13954      }
  13955    }
  13956 
  13957    size_t maxInlineLength;
  13958    if (fromEncoding == CharEncoding::Latin1) {
  13959      maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
  13960    } else {
  13961      maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
  13962    }
  13963 
  13964    // Number of characters which fit into a single register.
  13965    size_t charsPerPtr = ptrWidth / fromWidth;
  13966 
  13967    // Unroll small loops.
  13968    constexpr size_t unrollLoopLimit = 3;
  13969    size_t loopCount = std::min(maxInlineLength, maximumLength) / charsPerPtr;
  13970 
  13971 #ifdef JS_64BIT
  13972    static constexpr size_t latin1MaxInlineByteLength =
  13973        JSFatInlineString::MAX_LENGTH_LATIN1 * sizeof(char);
  13974    static constexpr size_t twoByteMaxInlineByteLength =
  13975        JSFatInlineString::MAX_LENGTH_TWO_BYTE * sizeof(char16_t);
  13976 
  13977    // |unrollLoopLimit| should be large enough to allow loop unrolling on
  13978    // 64-bit targets.
  13979    static_assert(latin1MaxInlineByteLength / ptrWidth == unrollLoopLimit,
  13980                  "Latin-1 loops are unrolled on 64-bit");
  13981    static_assert(twoByteMaxInlineByteLength / ptrWidth == unrollLoopLimit,
  13982                  "Two-byte loops are unrolled on 64-bit");
  13983 #endif
  13984 
  13985    if (loopCount <= unrollLoopLimit) {
  13986      Label labels[unrollLoopLimit];
  13987 
  13988      // Check up front how many characters can be copied.
  13989      for (size_t i = 1; i < loopCount; i++) {
  13990        masm.branch32(Assembler::Below, len, Imm32((i + 1) * charsPerPtr),
  13991                      &labels[i]);
  13992      }
  13993 
  13994      // Generate the unrolled loop body.
  13995      for (size_t i = loopCount; i > 0; i--) {
  13996        copyCharacters(ptrWidth);
  13997        masm.sub32(Imm32(charsPerPtr), len);
  13998 
  13999        // Jump target for the previous length check.
  14000        if (i != 1) {
  14001          masm.bind(&labels[i - 1]);
  14002        }
  14003      }
  14004    } else {
  14005      Label start;
  14006      masm.bind(&start);
  14007      copyCharacters(ptrWidth);
  14008      masm.branchSub32(Assembler::NonZero, Imm32(charsPerPtr), len, &start);
  14009    }
  14010 
  14011    masm.bind(&done);
  14012  } else {
  14013    Label start;
  14014    masm.bind(&start);
  14015    masm.loadChar(Address(from, 0), byteOpScratch, fromEncoding);
  14016    masm.storeChar(byteOpScratch, Address(to, 0), toEncoding);
  14017    masm.addPtr(Imm32(fromWidth), from);
  14018    masm.addPtr(Imm32(toWidth), to);
  14019    masm.branchSub32(Assembler::NonZero, Imm32(1), len, &start);
  14020  }
  14021 }
  14022 
  14023 static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
  14024                            Register len, Register byteOpScratch,
  14025                            CharEncoding encoding, size_t maximumLength) {
  14026  CopyStringChars(masm, to, from, len, byteOpScratch, encoding, encoding,
  14027                  maximumLength);
  14028 }
  14029 
  14030 static void CopyStringCharsMaybeInflate(MacroAssembler& masm, Register input,
  14031                                        Register destChars, Register temp1,
  14032                                        Register temp2) {
  14033  // destChars is TwoByte and input is a Latin1 or TwoByte string, so we may
  14034  // have to inflate.
  14035 
  14036  Label isLatin1, done;
  14037  masm.loadStringLength(input, temp1);
  14038  masm.branchLatin1String(input, &isLatin1);
  14039  {
  14040    masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
  14041    masm.movePtr(temp2, input);
  14042    CopyStringChars(masm, destChars, input, temp1, temp2,
  14043                    CharEncoding::TwoByte);
  14044    masm.jump(&done);
  14045  }
  14046  masm.bind(&isLatin1);
  14047  {
  14048    masm.loadStringChars(input, temp2, CharEncoding::Latin1);
  14049    masm.movePtr(temp2, input);
  14050    CopyStringChars(masm, destChars, input, temp1, temp2, CharEncoding::Latin1,
  14051                    CharEncoding::TwoByte);
  14052  }
  14053  masm.bind(&done);
  14054 }
  14055 
  14056 static void AllocateThinOrFatInlineString(MacroAssembler& masm, Register output,
  14057                                          Register length, Register temp,
  14058                                          gc::Heap initialStringHeap,
  14059                                          Label* failure,
  14060                                          CharEncoding encoding) {
  14061 #ifdef DEBUG
  14062  size_t maxInlineLength;
  14063  if (encoding == CharEncoding::Latin1) {
  14064    maxInlineLength = JSFatInlineString::MAX_LENGTH_LATIN1;
  14065  } else {
  14066    maxInlineLength = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
  14067  }
  14068 
  14069  Label ok;
  14070  masm.branch32(Assembler::BelowOrEqual, length, Imm32(maxInlineLength), &ok);
  14071  masm.assumeUnreachable("string length too large to be allocated as inline");
  14072  masm.bind(&ok);
  14073 #endif
  14074 
  14075  size_t maxThinInlineLength;
  14076  if (encoding == CharEncoding::Latin1) {
  14077    maxThinInlineLength = JSThinInlineString::MAX_LENGTH_LATIN1;
  14078  } else {
  14079    maxThinInlineLength = JSThinInlineString::MAX_LENGTH_TWO_BYTE;
  14080  }
  14081 
  14082  Label isFat, allocDone;
  14083  masm.branch32(Assembler::Above, length, Imm32(maxThinInlineLength), &isFat);
  14084  {
  14085    uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
  14086    if (encoding == CharEncoding::Latin1) {
  14087      flags |= JSString::LATIN1_CHARS_BIT;
  14088    }
  14089    masm.newGCString(output, temp, initialStringHeap, failure);
  14090    masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
  14091    masm.jump(&allocDone);
  14092  }
  14093  masm.bind(&isFat);
  14094  {
  14095    uint32_t flags = JSString::INIT_FAT_INLINE_FLAGS;
  14096    if (encoding == CharEncoding::Latin1) {
  14097      flags |= JSString::LATIN1_CHARS_BIT;
  14098    }
  14099    masm.newGCFatInlineString(output, temp, initialStringHeap, failure);
  14100    masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
  14101  }
  14102  masm.bind(&allocDone);
  14103 
  14104  // Store length.
  14105  masm.store32(length, Address(output, JSString::offsetOfLength()));
  14106 }
  14107 
  14108 static void ConcatInlineString(MacroAssembler& masm, Register lhs, Register rhs,
  14109                               Register output, Register temp1, Register temp2,
  14110                               Register temp3, gc::Heap initialStringHeap,
  14111                               Label* failure, CharEncoding encoding) {
  14112  JitSpew(JitSpew_Codegen, "# Emitting ConcatInlineString (encoding=%s)",
  14113          (encoding == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
  14114 
  14115  // State: result length in temp2.
  14116 
  14117  // Ensure both strings are linear.
  14118  masm.branchIfRope(lhs, failure);
  14119  masm.branchIfRope(rhs, failure);
  14120 
  14121  // Allocate a JSThinInlineString or JSFatInlineString.
  14122  AllocateThinOrFatInlineString(masm, output, temp2, temp1, initialStringHeap,
  14123                                failure, encoding);
  14124 
  14125  // Load chars pointer in temp2.
  14126  masm.loadInlineStringCharsForStore(output, temp2);
  14127 
  14128  auto copyChars = [&](Register src) {
  14129    if (encoding == CharEncoding::TwoByte) {
  14130      CopyStringCharsMaybeInflate(masm, src, temp2, temp1, temp3);
  14131    } else {
  14132      masm.loadStringLength(src, temp3);
  14133      masm.loadStringChars(src, temp1, CharEncoding::Latin1);
  14134      masm.movePtr(temp1, src);
  14135      CopyStringChars(masm, temp2, src, temp3, temp1, CharEncoding::Latin1);
  14136    }
  14137  };
  14138 
  14139  // Copy lhs chars. Note that this advances temp2 to point to the next
  14140  // char. This also clobbers the lhs register.
  14141  copyChars(lhs);
  14142 
  14143  // Copy rhs chars. Clobbers the rhs register.
  14144  copyChars(rhs);
  14145 }
  14146 
  14147 void CodeGenerator::visitSubstr(LSubstr* lir) {
  14148  Register string = ToRegister(lir->string());
  14149  Register begin = ToRegister(lir->begin());
  14150  Register length = ToRegister(lir->length());
  14151  Register output = ToRegister(lir->output());
  14152  Register temp0 = ToRegister(lir->temp0());
  14153  Register temp2 = ToRegister(lir->temp2());
  14154 
  14155  // On x86 there are not enough registers. In that case reuse the string
  14156  // register as temporary.
  14157  Register temp1 =
  14158      lir->temp1()->isBogusTemp() ? string : ToRegister(lir->temp1());
  14159 
  14160  size_t maximumLength = SIZE_MAX;
  14161 
  14162  Range* range = lir->mir()->length()->range();
  14163  if (range && range->hasInt32UpperBound()) {
  14164    MOZ_ASSERT(range->upper() >= 0);
  14165    maximumLength = size_t(range->upper());
  14166  }
  14167 
  14168  static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <=
  14169                JSThinInlineString::MAX_LENGTH_LATIN1);
  14170 
  14171  static_assert(JSFatInlineString::MAX_LENGTH_TWO_BYTE <=
  14172                JSFatInlineString::MAX_LENGTH_LATIN1);
  14173 
  14174  bool tryFatInlineOrDependent =
  14175      maximumLength > JSThinInlineString::MAX_LENGTH_TWO_BYTE;
  14176  bool tryDependent = maximumLength > JSFatInlineString::MAX_LENGTH_TWO_BYTE;
  14177 
  14178 #ifdef DEBUG
  14179  if (maximumLength != SIZE_MAX) {
  14180    Label ok;
  14181    masm.branch32(Assembler::BelowOrEqual, length, Imm32(maximumLength), &ok);
  14182    masm.assumeUnreachable("length should not exceed maximum length");
  14183    masm.bind(&ok);
  14184  }
  14185 #endif
  14186 
  14187  Label nonZero, nonInput;
  14188 
  14189  // For every edge case use the C++ variant.
  14190  // Note: we also use this upon allocation failure in newGCString and
  14191  // newGCFatInlineString. To squeeze out even more performance those failures
  14192  // can be handled by allocate in ool code and returning to jit code to fill
  14193  // in all data.
  14194  using Fn = JSString* (*)(JSContext * cx, HandleString str, int32_t begin,
  14195                           int32_t len);
  14196  OutOfLineCode* ool = oolCallVM<Fn, SubstringKernel>(
  14197      lir, ArgList(string, begin, length), StoreRegisterTo(output));
  14198  Label* slowPath = ool->entry();
  14199  Label* done = ool->rejoin();
  14200 
  14201  // Zero length, return emptystring.
  14202  masm.branchTest32(Assembler::NonZero, length, length, &nonZero);
  14203  const JSAtomState& names = gen->runtime->names();
  14204  masm.movePtr(ImmGCPtr(names.empty_), output);
  14205  masm.jump(done);
  14206 
  14207  // Substring from 0..|str.length|, return str.
  14208  masm.bind(&nonZero);
  14209  masm.branch32(Assembler::NotEqual,
  14210                Address(string, JSString::offsetOfLength()), length, &nonInput);
  14211 #ifdef DEBUG
  14212  {
  14213    Label ok;
  14214    masm.branchTest32(Assembler::Zero, begin, begin, &ok);
  14215    masm.assumeUnreachable("length == str.length implies begin == 0");
  14216    masm.bind(&ok);
  14217  }
  14218 #endif
  14219  masm.movePtr(string, output);
  14220  masm.jump(done);
  14221 
  14222  // Use slow path for ropes.
  14223  masm.bind(&nonInput);
  14224  masm.branchIfRope(string, slowPath);
  14225 
  14226  // Optimize one and two character strings.
  14227  Label nonStatic;
  14228  masm.branch32(Assembler::Above, length, Imm32(2), &nonStatic);
  14229  {
  14230    Label loadLengthOne, loadLengthTwo;
  14231 
  14232    auto loadChars = [&](CharEncoding encoding, bool fallthru) {
  14233      size_t size = encoding == CharEncoding::Latin1 ? sizeof(JS::Latin1Char)
  14234                                                     : sizeof(char16_t);
  14235 
  14236      masm.loadStringChars(string, temp0, encoding);
  14237      masm.loadChar(temp0, begin, temp2, encoding);
  14238      masm.branch32(Assembler::Equal, length, Imm32(1), &loadLengthOne);
  14239      masm.loadChar(temp0, begin, temp0, encoding, int32_t(size));
  14240      if (!fallthru) {
  14241        masm.jump(&loadLengthTwo);
  14242      }
  14243    };
  14244 
  14245    Label isLatin1;
  14246    masm.branchLatin1String(string, &isLatin1);
  14247    loadChars(CharEncoding::TwoByte, /* fallthru = */ false);
  14248 
  14249    masm.bind(&isLatin1);
  14250    loadChars(CharEncoding::Latin1, /* fallthru = */ true);
  14251 
  14252    // Try to load a length-two static string.
  14253    masm.bind(&loadLengthTwo);
  14254    masm.lookupStaticString(temp2, temp0, output, gen->runtime->staticStrings(),
  14255                            &nonStatic);
  14256    masm.jump(done);
  14257 
  14258    // Try to load a length-one static string.
  14259    masm.bind(&loadLengthOne);
  14260    masm.lookupStaticString(temp2, output, gen->runtime->staticStrings(),
  14261                            &nonStatic);
  14262    masm.jump(done);
  14263  }
  14264  masm.bind(&nonStatic);
  14265 
  14266  // Allocate either a JSThinInlineString or JSFatInlineString, or jump to
  14267  // notInline if we need a dependent string.
  14268  Label notInline;
  14269  {
  14270    static_assert(JSThinInlineString::MAX_LENGTH_LATIN1 <
  14271                  JSFatInlineString::MAX_LENGTH_LATIN1);
  14272    static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE <
  14273                  JSFatInlineString::MAX_LENGTH_TWO_BYTE);
  14274 
  14275    // Use temp2 to store the JS(Thin|Fat)InlineString flags. This avoids having
  14276    // duplicate newGCString/newGCFatInlineString codegen for Latin1 vs TwoByte
  14277    // strings.
  14278 
  14279    Label allocFat, allocDone;
  14280    if (tryFatInlineOrDependent) {
  14281      Label isLatin1, allocThin;
  14282      masm.branchLatin1String(string, &isLatin1);
  14283      {
  14284        if (tryDependent) {
  14285          masm.branch32(Assembler::Above, length,
  14286                        Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
  14287                        &notInline);
  14288        }
  14289        masm.move32(Imm32(0), temp2);
  14290        masm.branch32(Assembler::Above, length,
  14291                      Imm32(JSThinInlineString::MAX_LENGTH_TWO_BYTE),
  14292                      &allocFat);
  14293        masm.jump(&allocThin);
  14294      }
  14295 
  14296      masm.bind(&isLatin1);
  14297      {
  14298        if (tryDependent) {
  14299          masm.branch32(Assembler::Above, length,
  14300                        Imm32(JSFatInlineString::MAX_LENGTH_LATIN1),
  14301                        &notInline);
  14302        }
  14303        masm.move32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
  14304        masm.branch32(Assembler::Above, length,
  14305                      Imm32(JSThinInlineString::MAX_LENGTH_LATIN1), &allocFat);
  14306      }
  14307 
  14308      masm.bind(&allocThin);
  14309    } else {
  14310      masm.load32(Address(string, JSString::offsetOfFlags()), temp2);
  14311      masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp2);
  14312    }
  14313 
  14314    {
  14315      masm.newGCString(output, temp0, initialStringHeap(), slowPath);
  14316      masm.or32(Imm32(JSString::INIT_THIN_INLINE_FLAGS), temp2);
  14317    }
  14318 
  14319    if (tryFatInlineOrDependent) {
  14320      masm.jump(&allocDone);
  14321 
  14322      masm.bind(&allocFat);
  14323      {
  14324        masm.newGCFatInlineString(output, temp0, initialStringHeap(), slowPath);
  14325        masm.or32(Imm32(JSString::INIT_FAT_INLINE_FLAGS), temp2);
  14326      }
  14327 
  14328      masm.bind(&allocDone);
  14329    }
  14330 
  14331    masm.store32(temp2, Address(output, JSString::offsetOfFlags()));
  14332    masm.store32(length, Address(output, JSString::offsetOfLength()));
  14333 
  14334    auto initializeInlineString = [&](CharEncoding encoding) {
  14335      masm.loadStringChars(string, temp0, encoding);
  14336      masm.addToCharPtr(temp0, begin, encoding);
  14337      if (temp1 == string) {
  14338        masm.push(string);
  14339      }
  14340      masm.loadInlineStringCharsForStore(output, temp1);
  14341      CopyStringChars(masm, temp1, temp0, length, temp2, encoding,
  14342                      maximumLength);
  14343      masm.loadStringLength(output, length);
  14344      if (temp1 == string) {
  14345        masm.pop(string);
  14346      }
  14347    };
  14348 
  14349    Label isInlineLatin1;
  14350    masm.branchTest32(Assembler::NonZero, temp2,
  14351                      Imm32(JSString::LATIN1_CHARS_BIT), &isInlineLatin1);
  14352    initializeInlineString(CharEncoding::TwoByte);
  14353    masm.jump(done);
  14354 
  14355    masm.bind(&isInlineLatin1);
  14356    initializeInlineString(CharEncoding::Latin1);
  14357  }
  14358 
  14359  // Handle other cases with a DependentString.
  14360  if (tryDependent) {
  14361    masm.jump(done);
  14362 
  14363    masm.bind(&notInline);
  14364    masm.newGCString(output, temp0, gen->initialStringHeap(), slowPath);
  14365    masm.store32(length, Address(output, JSString::offsetOfLength()));
  14366 
  14367    // Note: no post barrier is needed because the dependent string is either
  14368    // allocated in the nursery or both strings are tenured (if nursery strings
  14369    // are disabled for this zone).
  14370    EmitInitDependentStringBase(masm, output, string, temp0, temp2,
  14371                                /* needsPostBarrier = */ false);
  14372 
  14373    auto initializeDependentString = [&](CharEncoding encoding) {
  14374      uint32_t flags = JSString::INIT_DEPENDENT_FLAGS;
  14375      if (encoding == CharEncoding::Latin1) {
  14376        flags |= JSString::LATIN1_CHARS_BIT;
  14377      }
  14378      masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
  14379      masm.loadNonInlineStringChars(string, temp0, encoding);
  14380      masm.addToCharPtr(temp0, begin, encoding);
  14381      masm.storeNonInlineStringChars(temp0, output);
  14382    };
  14383 
  14384    Label isLatin1;
  14385    masm.branchLatin1String(string, &isLatin1);
  14386    initializeDependentString(CharEncoding::TwoByte);
  14387    masm.jump(done);
  14388 
  14389    masm.bind(&isLatin1);
  14390    initializeDependentString(CharEncoding::Latin1);
  14391  }
  14392 
  14393  masm.bind(done);
  14394 }
  14395 
  14396 JitCode* JitZone::generateStringConcatStub(JSContext* cx) {
  14397  JitSpew(JitSpew_Codegen, "# Emitting StringConcat stub");
  14398 
  14399  TempAllocator temp(&cx->tempLifoAlloc());
  14400  JitContext jcx(cx);
  14401  StackMacroAssembler masm(cx, temp);
  14402  AutoCreatedBy acb(masm, "JitZone::generateStringConcatStub");
  14403 
  14404  Register lhs = CallTempReg0;
  14405  Register rhs = CallTempReg1;
  14406  Register temp1 = CallTempReg2;
  14407  Register temp2 = CallTempReg3;
  14408  Register temp3 = CallTempReg4;
  14409  Register output = CallTempReg5;
  14410 
  14411  Label failure;
  14412 #ifdef JS_USE_LINK_REGISTER
  14413  masm.pushReturnAddress();
  14414 #endif
  14415  masm.Push(FramePointer);
  14416  masm.moveStackPtrTo(FramePointer);
  14417 
  14418  // If lhs is empty, return rhs.
  14419  Label leftEmpty;
  14420  masm.loadStringLength(lhs, temp1);
  14421  masm.branchTest32(Assembler::Zero, temp1, temp1, &leftEmpty);
  14422 
  14423  // If rhs is empty, return lhs.
  14424  Label rightEmpty;
  14425  masm.loadStringLength(rhs, temp2);
  14426  masm.branchTest32(Assembler::Zero, temp2, temp2, &rightEmpty);
  14427 
  14428  masm.add32(temp1, temp2);
  14429 
  14430  // Check if we can use a JSInlineString. The result is a Latin1 string if
  14431  // lhs and rhs are both Latin1, so we AND the flags.
  14432  Label isInlineTwoByte, isInlineLatin1;
  14433  masm.load32(Address(lhs, JSString::offsetOfFlags()), temp1);
  14434  masm.and32(Address(rhs, JSString::offsetOfFlags()), temp1);
  14435 
  14436  Label isLatin1, notInline;
  14437  masm.branchTest32(Assembler::NonZero, temp1,
  14438                    Imm32(JSString::LATIN1_CHARS_BIT), &isLatin1);
  14439  {
  14440    masm.branch32(Assembler::BelowOrEqual, temp2,
  14441                  Imm32(JSFatInlineString::MAX_LENGTH_TWO_BYTE),
  14442                  &isInlineTwoByte);
  14443    masm.jump(&notInline);
  14444  }
  14445  masm.bind(&isLatin1);
  14446  {
  14447    masm.branch32(Assembler::BelowOrEqual, temp2,
  14448                  Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), &isInlineLatin1);
  14449  }
  14450  masm.bind(&notInline);
  14451 
  14452  // Keep AND'ed flags in temp1.
  14453 
  14454  // Ensure result length <= JSString::MAX_LENGTH.
  14455  masm.branch32(Assembler::Above, temp2, Imm32(JSString::MAX_LENGTH), &failure);
  14456 
  14457  // Allocate a new rope, guaranteed to be in the nursery if initialStringHeap
  14458  // == gc::Heap::Default. (As a result, no post barriers are needed below.)
  14459  masm.newGCString(output, temp3, initialStringHeap, &failure);
  14460 
  14461  // Store rope length and flags. temp1 still holds the result of AND'ing the
  14462  // lhs and rhs flags, so we just have to clear the other flags to get our rope
  14463  // flags (Latin1 if both lhs and rhs are Latin1).
  14464  static_assert(JSString::INIT_ROPE_FLAGS == 0,
  14465                "Rope type flags must have no bits set");
  14466  masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp1);
  14467  masm.store32(temp1, Address(output, JSString::offsetOfFlags()));
  14468  masm.store32(temp2, Address(output, JSString::offsetOfLength()));
  14469 
  14470  // Store left and right nodes.
  14471  masm.storeRopeChildren(lhs, rhs, output);
  14472  masm.pop(FramePointer);
  14473  masm.ret();
  14474 
  14475  masm.bind(&leftEmpty);
  14476  masm.mov(rhs, output);
  14477  masm.pop(FramePointer);
  14478  masm.ret();
  14479 
  14480  masm.bind(&rightEmpty);
  14481  masm.mov(lhs, output);
  14482  masm.pop(FramePointer);
  14483  masm.ret();
  14484 
  14485  masm.bind(&isInlineTwoByte);
  14486  ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
  14487                     initialStringHeap, &failure, CharEncoding::TwoByte);
  14488  masm.pop(FramePointer);
  14489  masm.ret();
  14490 
  14491  masm.bind(&isInlineLatin1);
  14492  ConcatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
  14493                     initialStringHeap, &failure, CharEncoding::Latin1);
  14494  masm.pop(FramePointer);
  14495  masm.ret();
  14496 
  14497  masm.bind(&failure);
  14498  masm.movePtr(ImmPtr(nullptr), output);
  14499  masm.pop(FramePointer);
  14500  masm.ret();
  14501 
  14502  Linker linker(masm);
  14503  JitCode* code = linker.newCode(cx, CodeKind::Other);
  14504 
  14505  CollectPerfSpewerJitCodeProfile(code, "StringConcatStub");
  14506 #ifdef MOZ_VTUNE
  14507  vtune::MarkStub(code, "StringConcatStub");
  14508 #endif
  14509 
  14510  return code;
  14511 }
  14512 
  14513 void JitRuntime::generateLazyLinkStub(MacroAssembler& masm) {
  14514  AutoCreatedBy acb(masm, "JitRuntime::generateLazyLinkStub");
  14515 
  14516  lazyLinkStubOffset_ = startTrampolineCode(masm);
  14517 
  14518 #ifdef JS_USE_LINK_REGISTER
  14519  masm.pushReturnAddress();
  14520 #endif
  14521  masm.Push(FramePointer);
  14522  masm.moveStackPtrTo(FramePointer);
  14523 
  14524  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
  14525  Register temp0 = regs.takeAny();
  14526  Register temp1 = regs.takeAny();
  14527  Register temp2 = regs.takeAny();
  14528 
  14529  masm.loadJSContext(temp0);
  14530  masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::LazyLink);
  14531  masm.moveStackPtrTo(temp1);
  14532 
  14533  using Fn = uint8_t* (*)(JSContext * cx, LazyLinkExitFrameLayout * frame);
  14534  masm.setupUnalignedABICall(temp2);
  14535  masm.passABIArg(temp0);
  14536  masm.passABIArg(temp1);
  14537  masm.callWithABI<Fn, LazyLinkTopActivation>(
  14538      ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
  14539 
  14540  // Discard exit frame and restore frame pointer.
  14541  masm.leaveExitFrame(0);
  14542  masm.pop(FramePointer);
  14543 
  14544 #ifdef JS_USE_LINK_REGISTER
  14545  // Restore the return address such that the emitPrologue function of the
  14546  // CodeGenerator can push it back on the stack with pushReturnAddress.
  14547  masm.popReturnAddress();
  14548 #endif
  14549  masm.jump(ReturnReg);
  14550 }
  14551 
  14552 void JitRuntime::generateInterpreterStub(MacroAssembler& masm) {
  14553  AutoCreatedBy acb(masm, "JitRuntime::generateInterpreterStub");
  14554 
  14555  interpreterStubOffset_ = startTrampolineCode(masm);
  14556 
  14557 #ifdef JS_USE_LINK_REGISTER
  14558  masm.pushReturnAddress();
  14559 #endif
  14560  masm.Push(FramePointer);
  14561  masm.moveStackPtrTo(FramePointer);
  14562 
  14563  AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
  14564  Register temp0 = regs.takeAny();
  14565  Register temp1 = regs.takeAny();
  14566  Register temp2 = regs.takeAny();
  14567 
  14568  masm.loadJSContext(temp0);
  14569  masm.enterFakeExitFrame(temp0, temp2, ExitFrameType::InterpreterStub);
  14570  masm.moveStackPtrTo(temp1);
  14571 
  14572  using Fn = bool (*)(JSContext* cx, InterpreterStubExitFrameLayout* frame);
  14573  masm.setupUnalignedABICall(temp2);
  14574  masm.passABIArg(temp0);
  14575  masm.passABIArg(temp1);
  14576  masm.callWithABI<Fn, InvokeFromInterpreterStub>(
  14577      ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
  14578 
  14579  masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
  14580 
  14581  // Discard exit frame and restore frame pointer.
  14582  masm.leaveExitFrame(0);
  14583  masm.pop(FramePointer);
  14584 
  14585  // InvokeFromInterpreterStub stores the return value in argv[0], where the
  14586  // caller stored |this|. Subtract |sizeof(void*)| for the frame pointer we
  14587  // just popped.
  14588  masm.loadValue(Address(masm.getStackPointer(),
  14589                         JitFrameLayout::offsetOfThis() - sizeof(void*)),
  14590                 JSReturnOperand);
  14591  masm.ret();
  14592 }
  14593 
  14594 void JitRuntime::generateDoubleToInt32ValueStub(MacroAssembler& masm) {
  14595  AutoCreatedBy acb(masm, "JitRuntime::generateDoubleToInt32ValueStub");
  14596  doubleToInt32ValueStubOffset_ = startTrampolineCode(masm);
  14597 
  14598  Label done;
  14599  masm.branchTestDouble(Assembler::NotEqual, R0, &done);
  14600 
  14601  masm.unboxDouble(R0, FloatReg0);
  14602  masm.convertDoubleToInt32(FloatReg0, R1.scratchReg(), &done,
  14603                            /* negativeZeroCheck = */ false);
  14604  masm.tagValue(JSVAL_TYPE_INT32, R1.scratchReg(), R0);
  14605 
  14606  masm.bind(&done);
  14607  masm.abiret();
  14608 }
  14609 
  14610 void CodeGenerator::visitLinearizeString(LLinearizeString* lir) {
  14611  Register str = ToRegister(lir->string());
  14612  Register output = ToRegister(lir->output());
  14613 
  14614  using Fn = JSLinearString* (*)(JSContext*, JSString*);
  14615  auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
  14616      lir, ArgList(str), StoreRegisterTo(output));
  14617 
  14618  masm.branchIfRope(str, ool->entry());
  14619 
  14620  masm.movePtr(str, output);
  14621  masm.bind(ool->rejoin());
  14622 }
  14623 
  14624 void CodeGenerator::visitLinearizeForCharAccess(LLinearizeForCharAccess* lir) {
  14625  Register str = ToRegister(lir->string());
  14626  Register index = ToRegister(lir->index());
  14627  Register output = ToRegister(lir->output());
  14628 
  14629  using Fn = JSLinearString* (*)(JSContext*, JSString*);
  14630  auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
  14631      lir, ArgList(str), StoreRegisterTo(output));
  14632 
  14633  masm.branchIfNotCanLoadStringChar(str, index, output, ool->entry());
  14634 
  14635  masm.movePtr(str, output);
  14636  masm.bind(ool->rejoin());
  14637 }
  14638 
  14639 void CodeGenerator::visitLinearizeForCodePointAccess(
  14640    LLinearizeForCodePointAccess* lir) {
  14641  Register str = ToRegister(lir->string());
  14642  Register index = ToRegister(lir->index());
  14643  Register output = ToRegister(lir->output());
  14644  Register temp = ToRegister(lir->temp0());
  14645 
  14646  using Fn = JSLinearString* (*)(JSContext*, JSString*);
  14647  auto* ool = oolCallVM<Fn, jit::LinearizeForCharAccess>(
  14648      lir, ArgList(str), StoreRegisterTo(output));
  14649 
  14650  masm.branchIfNotCanLoadStringCodePoint(str, index, output, temp,
  14651                                         ool->entry());
  14652 
  14653  masm.movePtr(str, output);
  14654  masm.bind(ool->rejoin());
  14655 }
  14656 
  14657 void CodeGenerator::visitToRelativeStringIndex(LToRelativeStringIndex* lir) {
  14658  Register index = ToRegister(lir->index());
  14659  Register length = ToRegister(lir->length());
  14660  Register output = ToRegister(lir->output());
  14661 
  14662  masm.move32(Imm32(0), output);
  14663  masm.cmp32Move32(Assembler::LessThan, index, Imm32(0), length, output);
  14664  masm.add32(index, output);
  14665 }
  14666 
  14667 void CodeGenerator::visitCharCodeAt(LCharCodeAt* lir) {
  14668  Register str = ToRegister(lir->string());
  14669  Register output = ToRegister(lir->output());
  14670  Register temp0 = ToRegister(lir->temp0());
  14671  Register temp1 = ToRegister(lir->temp1());
  14672 
  14673  using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
  14674 
  14675  if (lir->index()->isBogus()) {
  14676    auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
  14677                                               StoreRegisterTo(output));
  14678    masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
  14679    masm.bind(ool->rejoin());
  14680  } else {
  14681    Register index = ToRegister(lir->index());
  14682 
  14683    auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
  14684                                               StoreRegisterTo(output));
  14685    masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
  14686    masm.bind(ool->rejoin());
  14687  }
  14688 }
  14689 
  14690 void CodeGenerator::visitCharCodeAtOrNegative(LCharCodeAtOrNegative* lir) {
  14691  Register str = ToRegister(lir->string());
  14692  Register output = ToRegister(lir->output());
  14693  Register temp0 = ToRegister(lir->temp0());
  14694  Register temp1 = ToRegister(lir->temp1());
  14695 
  14696  using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
  14697 
  14698  // Return -1 for out-of-bounds access.
  14699  masm.move32(Imm32(-1), output);
  14700 
  14701  if (lir->index()->isBogus()) {
  14702    auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, Imm32(0)),
  14703                                               StoreRegisterTo(output));
  14704 
  14705    masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
  14706                  Imm32(0), ool->rejoin());
  14707    masm.loadStringChar(str, 0, output, temp0, temp1, ool->entry());
  14708    masm.bind(ool->rejoin());
  14709  } else {
  14710    Register index = ToRegister(lir->index());
  14711 
  14712    auto* ool = oolCallVM<Fn, jit::CharCodeAt>(lir, ArgList(str, index),
  14713                                               StoreRegisterTo(output));
  14714 
  14715    masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
  14716                              temp0, ool->rejoin());
  14717    masm.loadStringChar(str, index, output, temp0, temp1, ool->entry());
  14718    masm.bind(ool->rejoin());
  14719  }
  14720 }
  14721 
  14722 void CodeGenerator::visitCodePointAt(LCodePointAt* lir) {
  14723  Register str = ToRegister(lir->string());
  14724  Register index = ToRegister(lir->index());
  14725  Register output = ToRegister(lir->output());
  14726  Register temp0 = ToRegister(lir->temp0());
  14727  Register temp1 = ToRegister(lir->temp1());
  14728 
  14729  using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
  14730  auto* ool = oolCallVM<Fn, jit::CodePointAt>(lir, ArgList(str, index),
  14731                                              StoreRegisterTo(output));
  14732 
  14733  masm.loadStringCodePoint(str, index, output, temp0, temp1, ool->entry());
  14734  masm.bind(ool->rejoin());
  14735 }
  14736 
  14737 void CodeGenerator::visitCodePointAtOrNegative(LCodePointAtOrNegative* lir) {
  14738  Register str = ToRegister(lir->string());
  14739  Register index = ToRegister(lir->index());
  14740  Register output = ToRegister(lir->output());
  14741  Register temp0 = ToRegister(lir->temp0());
  14742  Register temp1 = ToRegister(lir->temp1());
  14743 
  14744  using Fn = bool (*)(JSContext*, HandleString, int32_t, uint32_t*);
  14745  auto* ool = oolCallVM<Fn, jit::CodePointAt>(lir, ArgList(str, index),
  14746                                              StoreRegisterTo(output));
  14747 
  14748  // Return -1 for out-of-bounds access.
  14749  masm.move32(Imm32(-1), output);
  14750 
  14751  masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
  14752                            temp0, ool->rejoin());
  14753  masm.loadStringCodePoint(str, index, output, temp0, temp1, ool->entry());
  14754  masm.bind(ool->rejoin());
  14755 }
  14756 
  14757 void CodeGenerator::visitNegativeToNaN(LNegativeToNaN* lir) {
  14758  Register input = ToRegister(lir->input());
  14759  ValueOperand output = ToOutValue(lir);
  14760 
  14761  masm.tagValue(JSVAL_TYPE_INT32, input, output);
  14762 
  14763  Label done;
  14764  masm.branchTest32(Assembler::NotSigned, input, input, &done);
  14765  masm.moveValue(JS::NaNValue(), output);
  14766  masm.bind(&done);
  14767 }
  14768 
  14769 void CodeGenerator::visitNegativeToUndefined(LNegativeToUndefined* lir) {
  14770  Register input = ToRegister(lir->input());
  14771  ValueOperand output = ToOutValue(lir);
  14772 
  14773  masm.tagValue(JSVAL_TYPE_INT32, input, output);
  14774 
  14775  Label done;
  14776  masm.branchTest32(Assembler::NotSigned, input, input, &done);
  14777  masm.moveValue(JS::UndefinedValue(), output);
  14778  masm.bind(&done);
  14779 }
  14780 
  14781 void CodeGenerator::visitFromCharCode(LFromCharCode* lir) {
  14782  Register code = ToRegister(lir->code());
  14783  Register output = ToRegister(lir->output());
  14784 
  14785  using Fn = JSLinearString* (*)(JSContext*, int32_t);
  14786  auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
  14787                                                    StoreRegisterTo(output));
  14788 
  14789  // OOL path if code >= UNIT_STATIC_LIMIT.
  14790  masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
  14791                          ool->entry());
  14792 
  14793  masm.bind(ool->rejoin());
  14794 }
  14795 
  14796 void CodeGenerator::visitFromCharCodeEmptyIfNegative(
  14797    LFromCharCodeEmptyIfNegative* lir) {
  14798  Register code = ToRegister(lir->code());
  14799  Register output = ToRegister(lir->output());
  14800 
  14801  using Fn = JSLinearString* (*)(JSContext*, int32_t);
  14802  auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
  14803                                                    StoreRegisterTo(output));
  14804 
  14805  // Return the empty string for negative inputs.
  14806  const JSAtomState& names = gen->runtime->names();
  14807  masm.movePtr(ImmGCPtr(names.empty_), output);
  14808  masm.branchTest32(Assembler::Signed, code, code, ool->rejoin());
  14809 
  14810  // OOL path if code >= UNIT_STATIC_LIMIT.
  14811  masm.lookupStaticString(code, output, gen->runtime->staticStrings(),
  14812                          ool->entry());
  14813 
  14814  masm.bind(ool->rejoin());
  14815 }
  14816 
  14817 void CodeGenerator::visitFromCharCodeUndefinedIfNegative(
  14818    LFromCharCodeUndefinedIfNegative* lir) {
  14819  Register code = ToRegister(lir->code());
  14820  ValueOperand output = ToOutValue(lir);
  14821  Register temp = output.scratchReg();
  14822 
  14823  using Fn = JSLinearString* (*)(JSContext*, int32_t);
  14824  auto* ool = oolCallVM<Fn, js::StringFromCharCode>(lir, ArgList(code),
  14825                                                    StoreRegisterTo(temp));
  14826 
  14827  // Return |undefined| for negative inputs.
  14828  Label done;
  14829  masm.moveValue(UndefinedValue(), output);
  14830  masm.branchTest32(Assembler::Signed, code, code, &done);
  14831 
  14832  // OOL path if code >= UNIT_STATIC_LIMIT.
  14833  masm.lookupStaticString(code, temp, gen->runtime->staticStrings(),
  14834                          ool->entry());
  14835 
  14836  masm.bind(ool->rejoin());
  14837  masm.tagValue(JSVAL_TYPE_STRING, temp, output);
  14838 
  14839  masm.bind(&done);
  14840 }
  14841 
  14842 void CodeGenerator::visitFromCodePoint(LFromCodePoint* lir) {
  14843  Register codePoint = ToRegister(lir->codePoint());
  14844  Register output = ToRegister(lir->output());
  14845  Register temp0 = ToRegister(lir->temp0());
  14846  Register temp1 = ToRegister(lir->temp1());
  14847  LSnapshot* snapshot = lir->snapshot();
  14848 
  14849  // The OOL path is only taken when we can't allocate the inline string.
  14850  using Fn = JSLinearString* (*)(JSContext*, char32_t);
  14851  auto* ool = oolCallVM<Fn, js::StringFromCodePoint>(lir, ArgList(codePoint),
  14852                                                     StoreRegisterTo(output));
  14853 
  14854  Label isTwoByte;
  14855  Label* done = ool->rejoin();
  14856 
  14857  static_assert(
  14858      StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
  14859      "Latin-1 strings can be loaded from static strings");
  14860 
  14861  {
  14862    masm.lookupStaticString(codePoint, output, gen->runtime->staticStrings(),
  14863                            &isTwoByte);
  14864    masm.jump(done);
  14865  }
  14866  masm.bind(&isTwoByte);
  14867  {
  14868    // Use a bailout if the input is not a valid code point, because
  14869    // MFromCodePoint is movable and it'd be observable when a moved
  14870    // fromCodePoint throws an exception before its actual call site.
  14871    bailoutCmp32(Assembler::Above, codePoint, Imm32(unicode::NonBMPMax),
  14872                 snapshot);
  14873 
  14874    // Allocate a JSThinInlineString.
  14875    {
  14876      static_assert(JSThinInlineString::MAX_LENGTH_TWO_BYTE >= 2,
  14877                    "JSThinInlineString can hold a supplementary code point");
  14878 
  14879      uint32_t flags = JSString::INIT_THIN_INLINE_FLAGS;
  14880      masm.newGCString(output, temp0, gen->initialStringHeap(), ool->entry());
  14881      masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
  14882    }
  14883 
  14884    Label isSupplementary;
  14885    masm.branch32(Assembler::AboveOrEqual, codePoint, Imm32(unicode::NonBMPMin),
  14886                  &isSupplementary);
  14887    {
  14888      // Store length.
  14889      masm.store32(Imm32(1), Address(output, JSString::offsetOfLength()));
  14890 
  14891      // Load chars pointer in temp0.
  14892      masm.loadInlineStringCharsForStore(output, temp0);
  14893 
  14894      masm.store16(codePoint, Address(temp0, 0));
  14895 
  14896      masm.jump(done);
  14897    }
  14898    masm.bind(&isSupplementary);
  14899    {
  14900      // Store length.
  14901      masm.store32(Imm32(2), Address(output, JSString::offsetOfLength()));
  14902 
  14903      // Load chars pointer in temp0.
  14904      masm.loadInlineStringCharsForStore(output, temp0);
  14905 
  14906      // Inlined unicode::LeadSurrogate(uint32_t).
  14907      masm.rshift32(Imm32(10), codePoint, temp1);
  14908      masm.add32(Imm32(unicode::LeadSurrogateMin - (unicode::NonBMPMin >> 10)),
  14909                 temp1);
  14910 
  14911      masm.store16(temp1, Address(temp0, 0));
  14912 
  14913      // Inlined unicode::TrailSurrogate(uint32_t).
  14914      masm.and32(Imm32(0x3FF), codePoint, temp1);
  14915      masm.or32(Imm32(unicode::TrailSurrogateMin), temp1);
  14916 
  14917      masm.store16(temp1, Address(temp0, sizeof(char16_t)));
  14918    }
  14919  }
  14920 
  14921  masm.bind(done);
  14922 }
  14923 
  14924 void CodeGenerator::visitStringIncludes(LStringIncludes* lir) {
  14925  pushArg(ToRegister(lir->searchString()));
  14926  pushArg(ToRegister(lir->string()));
  14927 
  14928  using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
  14929  callVM<Fn, js::StringIncludes>(lir);
  14930 }
  14931 
  14932 template <typename LIns>
  14933 static void CallStringMatch(MacroAssembler& masm, LIns* lir, OutOfLineCode* ool,
  14934                            LiveRegisterSet volatileRegs) {
  14935  Register string = ToRegister(lir->string());
  14936  Register output = ToRegister(lir->output());
  14937  Register tempLength = ToRegister(lir->temp0());
  14938  Register tempChars = ToRegister(lir->temp1());
  14939  Register maybeTempPat = ToTempRegisterOrInvalid(lir->temp2());
  14940 
  14941  const JSOffThreadAtom* searchString = lir->searchString();
  14942  size_t length = searchString->length();
  14943  MOZ_ASSERT(length == 1 || length == 2);
  14944 
  14945  // The additional temp register is only needed when searching for two
  14946  // pattern characters.
  14947  MOZ_ASSERT_IF(length == 2, maybeTempPat != InvalidReg);
  14948 
  14949  if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
  14950    masm.move32(Imm32(0), output);
  14951  } else {
  14952    masm.move32(Imm32(-1), output);
  14953  }
  14954 
  14955  masm.loadStringLength(string, tempLength);
  14956 
  14957  // Can't be a substring when the string is smaller than the search string.
  14958  Label done;
  14959  masm.branch32(Assembler::Below, tempLength, Imm32(length), ool->rejoin());
  14960 
  14961  bool searchStringIsPureTwoByte = false;
  14962  if (searchString->hasTwoByteChars()) {
  14963    JS::AutoCheckCannotGC nogc;
  14964    searchStringIsPureTwoByte =
  14965        !mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc));
  14966  }
  14967 
  14968  // Pure two-byte strings can't occur in a Latin-1 string.
  14969  if (searchStringIsPureTwoByte) {
  14970    masm.branchLatin1String(string, ool->rejoin());
  14971  }
  14972 
  14973  // Slow path when we need to linearize the string.
  14974  masm.branchIfRope(string, ool->entry());
  14975 
  14976  Label restoreVolatile;
  14977 
  14978  auto callMatcher = [&](CharEncoding encoding) {
  14979    masm.loadStringChars(string, tempChars, encoding);
  14980 
  14981    LiveGeneralRegisterSet liveRegs;
  14982    if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
  14983      // Save |tempChars| to compute the result index.
  14984      liveRegs.add(tempChars);
  14985 
  14986 #ifdef DEBUG
  14987      // Save |tempLength| in debug-mode for assertions.
  14988      liveRegs.add(tempLength);
  14989 #endif
  14990 
  14991      // Exclude non-volatile registers.
  14992      liveRegs.set() = GeneralRegisterSet::Intersect(
  14993          liveRegs.set(), GeneralRegisterSet::Volatile());
  14994 
  14995      masm.PushRegsInMask(liveRegs);
  14996    }
  14997 
  14998    if (length == 1) {
  14999      char16_t pat = searchString->latin1OrTwoByteChar(0);
  15000      MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
  15001                    pat <= JSString::MAX_LATIN1_CHAR);
  15002 
  15003      masm.move32(Imm32(pat), output);
  15004 
  15005      masm.setupAlignedABICall();
  15006      masm.passABIArg(tempChars);
  15007      masm.passABIArg(output);
  15008      masm.passABIArg(tempLength);
  15009      if (encoding == CharEncoding::Latin1) {
  15010        using Fn = const char* (*)(const char*, char, size_t);
  15011        masm.callWithABI<Fn, mozilla::SIMD::memchr8>(
  15012            ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
  15013      } else {
  15014        using Fn = const char16_t* (*)(const char16_t*, char16_t, size_t);
  15015        masm.callWithABI<Fn, mozilla::SIMD::memchr16>(
  15016            ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
  15017      }
  15018    } else {
  15019      char16_t pat0 = searchString->latin1OrTwoByteChar(0);
  15020      MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
  15021                    pat0 <= JSString::MAX_LATIN1_CHAR);
  15022 
  15023      char16_t pat1 = searchString->latin1OrTwoByteChar(1);
  15024      MOZ_ASSERT_IF(encoding == CharEncoding::Latin1,
  15025                    pat1 <= JSString::MAX_LATIN1_CHAR);
  15026 
  15027      masm.move32(Imm32(pat0), output);
  15028      masm.move32(Imm32(pat1), maybeTempPat);
  15029 
  15030      masm.setupAlignedABICall();
  15031      masm.passABIArg(tempChars);
  15032      masm.passABIArg(output);
  15033      masm.passABIArg(maybeTempPat);
  15034      masm.passABIArg(tempLength);
  15035      if (encoding == CharEncoding::Latin1) {
  15036        using Fn = const char* (*)(const char*, char, char, size_t);
  15037        masm.callWithABI<Fn, mozilla::SIMD::memchr2x8>(
  15038            ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
  15039      } else {
  15040        using Fn =
  15041            const char16_t* (*)(const char16_t*, char16_t, char16_t, size_t);
  15042        masm.callWithABI<Fn, mozilla::SIMD::memchr2x16>(
  15043            ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
  15044      }
  15045    }
  15046 
  15047    masm.storeCallPointerResult(output);
  15048 
  15049    // Convert to string index for `indexOf`.
  15050    if constexpr (std::is_same_v<LIns, LStringIndexOfSIMD>) {
  15051      // Restore |tempChars|. (And in debug mode |tempLength|.)
  15052      masm.PopRegsInMask(liveRegs);
  15053 
  15054      Label found;
  15055      masm.branchPtr(Assembler::NotEqual, output, ImmPtr(nullptr), &found);
  15056      {
  15057        masm.move32(Imm32(-1), output);
  15058        masm.jump(&restoreVolatile);
  15059      }
  15060      masm.bind(&found);
  15061 
  15062 #ifdef DEBUG
  15063      // Check lower bound.
  15064      Label lower;
  15065      masm.branchPtr(Assembler::AboveOrEqual, output, tempChars, &lower);
  15066      masm.assumeUnreachable("result pointer below string chars");
  15067      masm.bind(&lower);
  15068 
  15069      // Compute the end position of the characters.
  15070      auto scale = encoding == CharEncoding::Latin1 ? TimesOne : TimesTwo;
  15071      masm.computeEffectiveAddress(BaseIndex(tempChars, tempLength, scale),
  15072                                   tempLength);
  15073 
  15074      // Check upper bound.
  15075      Label upper;
  15076      masm.branchPtr(Assembler::Below, output, tempLength, &upper);
  15077      masm.assumeUnreachable("result pointer above string chars");
  15078      masm.bind(&upper);
  15079 #endif
  15080 
  15081      masm.subPtr(tempChars, output);
  15082 
  15083      if (encoding == CharEncoding::TwoByte) {
  15084        masm.rshiftPtr(Imm32(1), output);
  15085      }
  15086    }
  15087  };
  15088 
  15089  volatileRegs.takeUnchecked(output);
  15090  volatileRegs.takeUnchecked(tempLength);
  15091  volatileRegs.takeUnchecked(tempChars);
  15092  if (maybeTempPat != InvalidReg) {
  15093    volatileRegs.takeUnchecked(maybeTempPat);
  15094  }
  15095  masm.PushRegsInMask(volatileRegs);
  15096 
  15097  // Handle the case when the input is a Latin-1 string.
  15098  if (!searchStringIsPureTwoByte) {
  15099    Label twoByte;
  15100    masm.branchTwoByteString(string, &twoByte);
  15101    {
  15102      callMatcher(CharEncoding::Latin1);
  15103      masm.jump(&restoreVolatile);
  15104    }
  15105    masm.bind(&twoByte);
  15106  }
  15107 
  15108  // Handle the case when the input is a two-byte string.
  15109  callMatcher(CharEncoding::TwoByte);
  15110 
  15111  masm.bind(&restoreVolatile);
  15112  masm.PopRegsInMask(volatileRegs);
  15113 
  15114  // Convert to bool for `includes`.
  15115  if constexpr (std::is_same_v<LIns, LStringIncludesSIMD>) {
  15116    masm.cmpPtrSet(Assembler::NotEqual, output, ImmPtr(nullptr), output);
  15117  }
  15118 
  15119  masm.bind(ool->rejoin());
  15120 }
  15121 
  15122 void CodeGenerator::visitStringIncludesSIMD(LStringIncludesSIMD* lir) {
  15123  Register string = ToRegister(lir->string());
  15124  Register output = ToRegister(lir->output());
  15125  const JSOffThreadAtom* searchString = lir->searchString();
  15126 
  15127  using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
  15128  auto* ool = oolCallVM<Fn, js::StringIncludes>(
  15129      lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
  15130 
  15131  CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
  15132 }
  15133 
  15134 void CodeGenerator::visitStringIndexOf(LStringIndexOf* lir) {
  15135  pushArg(ToRegister(lir->searchString()));
  15136  pushArg(ToRegister(lir->string()));
  15137 
  15138  using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
  15139  callVM<Fn, js::StringIndexOf>(lir);
  15140 }
  15141 
  15142 void CodeGenerator::visitStringIndexOfSIMD(LStringIndexOfSIMD* lir) {
  15143  Register string = ToRegister(lir->string());
  15144  Register output = ToRegister(lir->output());
  15145  const JSOffThreadAtom* searchString = lir->searchString();
  15146 
  15147  using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
  15148  auto* ool = oolCallVM<Fn, js::StringIndexOf>(
  15149      lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
  15150 
  15151  CallStringMatch(masm, lir, ool, liveVolatileRegs(lir));
  15152 }
  15153 
  15154 void CodeGenerator::visitStringLastIndexOf(LStringLastIndexOf* lir) {
  15155  pushArg(ToRegister(lir->searchString()));
  15156  pushArg(ToRegister(lir->string()));
  15157 
  15158  using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
  15159  callVM<Fn, js::StringLastIndexOf>(lir);
  15160 }
  15161 
  15162 void CodeGenerator::visitStringStartsWith(LStringStartsWith* lir) {
  15163  pushArg(ToRegister(lir->searchString()));
  15164  pushArg(ToRegister(lir->string()));
  15165 
  15166  using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
  15167  callVM<Fn, js::StringStartsWith>(lir);
  15168 }
  15169 
  15170 void CodeGenerator::visitStringStartsWithInline(LStringStartsWithInline* lir) {
  15171  Register string = ToRegister(lir->string());
  15172  Register output = ToRegister(lir->output());
  15173  Register temp = ToRegister(lir->temp0());
  15174 
  15175  const JSOffThreadAtom* searchString = lir->searchString();
  15176 
  15177  size_t length = searchString->length();
  15178  MOZ_ASSERT(length > 0);
  15179 
  15180  using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
  15181  auto* ool = oolCallVM<Fn, js::StringStartsWith>(
  15182      lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
  15183 
  15184  masm.move32(Imm32(0), output);
  15185 
  15186  // Can't be a prefix when the string is smaller than the search string.
  15187  masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
  15188                Imm32(length), ool->rejoin());
  15189 
  15190  // Unwind ropes at the start if possible.
  15191  Label compare;
  15192  masm.movePtr(string, temp);
  15193  masm.branchIfNotRope(temp, &compare);
  15194 
  15195  Label unwindRope;
  15196  masm.bind(&unwindRope);
  15197  masm.loadRopeLeftChild(temp, output);
  15198  masm.movePtr(output, temp);
  15199 
  15200  // If the left child is smaller than the search string, jump into the VM to
  15201  // linearize the string.
  15202  masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
  15203                Imm32(length), ool->entry());
  15204 
  15205  // Otherwise keep unwinding ropes.
  15206  masm.branchIfRope(temp, &unwindRope);
  15207 
  15208  masm.bind(&compare);
  15209 
  15210  // If operands point to the same instance, it's trivially a prefix.
  15211  Label notPointerEqual;
  15212  masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
  15213                 &notPointerEqual);
  15214  masm.move32(Imm32(1), output);
  15215  masm.jump(ool->rejoin());
  15216  masm.bind(&notPointerEqual);
  15217 
  15218  if (searchString->hasTwoByteChars()) {
  15219    // Pure two-byte strings can't be a prefix of Latin-1 strings.
  15220    JS::AutoCheckCannotGC nogc;
  15221    if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
  15222      Label compareChars;
  15223      masm.branchTwoByteString(temp, &compareChars);
  15224      masm.move32(Imm32(0), output);
  15225      masm.jump(ool->rejoin());
  15226      masm.bind(&compareChars);
  15227    }
  15228  }
  15229 
  15230  // Load the input string's characters.
  15231  Register stringChars = output;
  15232  masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
  15233 
  15234  // Start comparing character by character.
  15235  masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
  15236 
  15237  masm.bind(ool->rejoin());
  15238 }
  15239 
  15240 void CodeGenerator::visitStringEndsWith(LStringEndsWith* lir) {
  15241  pushArg(ToRegister(lir->searchString()));
  15242  pushArg(ToRegister(lir->string()));
  15243 
  15244  using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
  15245  callVM<Fn, js::StringEndsWith>(lir);
  15246 }
  15247 
  15248 void CodeGenerator::visitStringEndsWithInline(LStringEndsWithInline* lir) {
  15249  Register string = ToRegister(lir->string());
  15250  Register output = ToRegister(lir->output());
  15251  Register temp = ToRegister(lir->temp0());
  15252 
  15253  const JSOffThreadAtom* searchString = lir->searchString();
  15254 
  15255  size_t length = searchString->length();
  15256  MOZ_ASSERT(length > 0);
  15257 
  15258  using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
  15259  auto* ool = oolCallVM<Fn, js::StringEndsWith>(
  15260      lir, ArgList(string, ImmGCPtr(searchString)), StoreRegisterTo(output));
  15261 
  15262  masm.move32(Imm32(0), output);
  15263 
  15264  // Can't be a suffix when the string is smaller than the search string.
  15265  masm.branch32(Assembler::Below, Address(string, JSString::offsetOfLength()),
  15266                Imm32(length), ool->rejoin());
  15267 
  15268  // Unwind ropes at the end if possible.
  15269  Label compare;
  15270  masm.movePtr(string, temp);
  15271  masm.branchIfNotRope(temp, &compare);
  15272 
  15273  Label unwindRope;
  15274  masm.bind(&unwindRope);
  15275  masm.loadRopeRightChild(temp, output);
  15276  masm.movePtr(output, temp);
  15277 
  15278  // If the right child is smaller than the search string, jump into the VM to
  15279  // linearize the string.
  15280  masm.branch32(Assembler::Below, Address(temp, JSString::offsetOfLength()),
  15281                Imm32(length), ool->entry());
  15282 
  15283  // Otherwise keep unwinding ropes.
  15284  masm.branchIfRope(temp, &unwindRope);
  15285 
  15286  masm.bind(&compare);
  15287 
  15288  // If operands point to the same instance, it's trivially a suffix.
  15289  Label notPointerEqual;
  15290  masm.branchPtr(Assembler::NotEqual, temp, ImmGCPtr(searchString),
  15291                 &notPointerEqual);
  15292  masm.move32(Imm32(1), output);
  15293  masm.jump(ool->rejoin());
  15294  masm.bind(&notPointerEqual);
  15295 
  15296  CharEncoding encoding = searchString->hasLatin1Chars()
  15297                              ? CharEncoding::Latin1
  15298                              : CharEncoding::TwoByte;
  15299  if (encoding == CharEncoding::TwoByte) {
  15300    // Pure two-byte strings can't be a suffix of Latin-1 strings.
  15301    JS::AutoCheckCannotGC nogc;
  15302    if (!mozilla::IsUtf16Latin1(searchString->twoByteRange(nogc))) {
  15303      Label compareChars;
  15304      masm.branchTwoByteString(temp, &compareChars);
  15305      masm.move32(Imm32(0), output);
  15306      masm.jump(ool->rejoin());
  15307      masm.bind(&compareChars);
  15308    }
  15309  }
  15310 
  15311  // Load the input string's characters.
  15312  Register stringChars = output;
  15313  masm.loadStringCharsForCompare(temp, searchString, stringChars, ool->entry());
  15314 
  15315  // Move string-char pointer to the suffix string.
  15316  masm.loadStringLength(temp, temp);
  15317  masm.sub32(Imm32(length), temp);
  15318  masm.addToCharPtr(stringChars, temp, encoding);
  15319 
  15320  // Start comparing character by character.
  15321  masm.compareStringChars(JSOp::Eq, stringChars, searchString, output);
  15322 
  15323  masm.bind(ool->rejoin());
  15324 }
  15325 
  15326 void CodeGenerator::visitStringToLowerCase(LStringToLowerCase* lir) {
  15327  Register string = ToRegister(lir->string());
  15328  Register output = ToRegister(lir->output());
  15329  Register temp0 = ToRegister(lir->temp0());
  15330  Register temp1 = ToRegister(lir->temp1());
  15331  Register temp2 = ToRegister(lir->temp2());
  15332 
  15333  // On x86 there are not enough registers. In that case reuse the string
  15334  // register as a temporary.
  15335  Register temp3 =
  15336      lir->temp3()->isBogusTemp() ? string : ToRegister(lir->temp3());
  15337  Register temp4 = ToRegister(lir->temp4());
  15338 
  15339  using Fn = JSLinearString* (*)(JSContext*, JSString*);
  15340  OutOfLineCode* ool = oolCallVM<Fn, js::StringToLowerCase>(
  15341      lir, ArgList(string), StoreRegisterTo(output));
  15342 
  15343  // Take the slow path if the string isn't a linear Latin-1 string.
  15344  Imm32 linearLatin1Bits(JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT);
  15345  Register flags = temp0;
  15346  masm.load32(Address(string, JSString::offsetOfFlags()), flags);
  15347  masm.and32(linearLatin1Bits, flags);
  15348  masm.branch32(Assembler::NotEqual, flags, linearLatin1Bits, ool->entry());
  15349 
  15350  Register length = temp0;
  15351  masm.loadStringLength(string, length);
  15352 
  15353  // Return the input if it's the empty string.
  15354  Label notEmptyString;
  15355  masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmptyString);
  15356  {
  15357    masm.movePtr(string, output);
  15358    masm.jump(ool->rejoin());
  15359  }
  15360  masm.bind(&notEmptyString);
  15361 
  15362  Register inputChars = temp1;
  15363  masm.loadStringChars(string, inputChars, CharEncoding::Latin1);
  15364 
  15365  Register toLowerCaseTable = temp2;
  15366  masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), toLowerCaseTable);
  15367 
  15368  // Single element strings can be directly retrieved from static strings cache.
  15369  Label notSingleElementString;
  15370  masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleElementString);
  15371  {
  15372    Register current = temp4;
  15373 
  15374    masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
  15375    masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
  15376                         current);
  15377    masm.lookupStaticString(current, output, gen->runtime->staticStrings());
  15378 
  15379    masm.jump(ool->rejoin());
  15380  }
  15381  masm.bind(&notSingleElementString);
  15382 
  15383  // Use the OOL-path when the string is too long. This prevents scanning long
  15384  // strings which have upper case characters only near the end a second time in
  15385  // the VM.
  15386  constexpr int32_t MaxInlineLength = 64;
  15387  masm.branch32(Assembler::Above, length, Imm32(MaxInlineLength), ool->entry());
  15388 
  15389  {
  15390    // Check if there are any characters which need to be converted.
  15391    //
  15392    // This extra loop gives a small performance improvement for strings which
  15393    // are already lower cased and lets us avoid calling into the runtime for
  15394    // non-inline, all lower case strings. But more importantly it avoids
  15395    // repeated inline allocation failures:
  15396    // |AllocateThinOrFatInlineString| below takes the OOL-path and calls the
  15397    // |js::StringToLowerCase| runtime function when the result string can't be
  15398    // allocated inline. And |js::StringToLowerCase| directly returns the input
  15399    // string when no characters need to be converted. That means it won't
  15400    // trigger GC to clear up the free nursery space, so the next toLowerCase()
  15401    // call will again fail to inline allocate the result string.
  15402    Label hasUpper;
  15403    {
  15404      Register checkInputChars = output;
  15405      masm.movePtr(inputChars, checkInputChars);
  15406 
  15407      Register current = temp4;
  15408 
  15409      Label start;
  15410      masm.bind(&start);
  15411      masm.loadChar(Address(checkInputChars, 0), current, CharEncoding::Latin1);
  15412      masm.branch8(Assembler::NotEqual,
  15413                   BaseIndex(toLowerCaseTable, current, TimesOne), current,
  15414                   &hasUpper);
  15415      masm.addPtr(Imm32(sizeof(Latin1Char)), checkInputChars);
  15416      masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
  15417 
  15418      // Input is already in lower case.
  15419      masm.movePtr(string, output);
  15420      masm.jump(ool->rejoin());
  15421    }
  15422    masm.bind(&hasUpper);
  15423 
  15424    // |length| was clobbered above, reload.
  15425    masm.loadStringLength(string, length);
  15426 
  15427    // Call into the runtime when we can't create an inline string.
  15428    masm.branch32(Assembler::Above, length,
  15429                  Imm32(JSFatInlineString::MAX_LENGTH_LATIN1), ool->entry());
  15430 
  15431    AllocateThinOrFatInlineString(masm, output, length, temp4,
  15432                                  initialStringHeap(), ool->entry(),
  15433                                  CharEncoding::Latin1);
  15434 
  15435    if (temp3 == string) {
  15436      masm.push(string);
  15437    }
  15438 
  15439    Register outputChars = temp3;
  15440    masm.loadInlineStringCharsForStore(output, outputChars);
  15441 
  15442    {
  15443      Register current = temp4;
  15444 
  15445      Label start;
  15446      masm.bind(&start);
  15447      masm.loadChar(Address(inputChars, 0), current, CharEncoding::Latin1);
  15448      masm.load8ZeroExtend(BaseIndex(toLowerCaseTable, current, TimesOne),
  15449                           current);
  15450      masm.storeChar(current, Address(outputChars, 0), CharEncoding::Latin1);
  15451      masm.addPtr(Imm32(sizeof(Latin1Char)), inputChars);
  15452      masm.addPtr(Imm32(sizeof(Latin1Char)), outputChars);
  15453      masm.branchSub32(Assembler::NonZero, Imm32(1), length, &start);
  15454    }
  15455 
  15456    if (temp3 == string) {
  15457      masm.pop(string);
  15458    }
  15459  }
  15460 
  15461  masm.bind(ool->rejoin());
  15462 }
  15463 
  15464 void CodeGenerator::visitStringToUpperCase(LStringToUpperCase* lir) {
  15465  pushArg(ToRegister(lir->string()));
  15466 
  15467  using Fn = JSLinearString* (*)(JSContext*, JSString*);
  15468  callVM<Fn, js::StringToUpperCase>(lir);
  15469 }
  15470 
  15471 void CodeGenerator::visitCharCodeToLowerCase(LCharCodeToLowerCase* lir) {
  15472  Register code = ToRegister(lir->code());
  15473  Register output = ToRegister(lir->output());
  15474  Register temp = ToRegister(lir->temp0());
  15475 
  15476  using Fn = JSString* (*)(JSContext*, int32_t);
  15477  auto* ool = oolCallVM<Fn, jit::CharCodeToLowerCase>(lir, ArgList(code),
  15478                                                      StoreRegisterTo(output));
  15479 
  15480  constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
  15481 
  15482  // OOL path if code >= NonLatin1Min.
  15483  masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
  15484 
  15485  // Convert to lower case.
  15486  masm.movePtr(ImmPtr(unicode::latin1ToLowerCaseTable), temp);
  15487  masm.load8ZeroExtend(BaseIndex(temp, code, TimesOne), temp);
  15488 
  15489  // Load static string for lower case character.
  15490  masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
  15491 
  15492  masm.bind(ool->rejoin());
  15493 }
  15494 
  15495 void CodeGenerator::visitCharCodeToUpperCase(LCharCodeToUpperCase* lir) {
  15496  Register code = ToRegister(lir->code());
  15497  Register output = ToRegister(lir->output());
  15498  Register temp = ToRegister(lir->temp0());
  15499 
  15500  using Fn = JSString* (*)(JSContext*, int32_t);
  15501  auto* ool = oolCallVM<Fn, jit::CharCodeToUpperCase>(lir, ArgList(code),
  15502                                                      StoreRegisterTo(output));
  15503 
  15504  constexpr char16_t NonLatin1Min = char16_t(JSString::MAX_LATIN1_CHAR) + 1;
  15505 
  15506  // OOL path if code >= NonLatin1Min.
  15507  masm.boundsCheck32PowerOfTwo(code, NonLatin1Min, ool->entry());
  15508 
  15509  // Most one element Latin-1 strings can be directly retrieved from the
  15510  // static strings cache, except the following three characters:
  15511  //
  15512  // 1. ToUpper(U+00B5) = 0+039C
  15513  // 2. ToUpper(U+00FF) = 0+0178
  15514  // 3. ToUpper(U+00DF) = 0+0053 0+0053
  15515  masm.branch32(Assembler::Equal, code, Imm32(unicode::MICRO_SIGN),
  15516                ool->entry());
  15517  masm.branch32(Assembler::Equal, code,
  15518                Imm32(unicode::LATIN_SMALL_LETTER_Y_WITH_DIAERESIS),
  15519                ool->entry());
  15520  masm.branch32(Assembler::Equal, code,
  15521                Imm32(unicode::LATIN_SMALL_LETTER_SHARP_S), ool->entry());
  15522 
  15523  // Inline unicode::ToUpperCase (without the special case for ASCII characters)
  15524 
  15525  constexpr size_t shift = unicode::CharInfoShift;
  15526 
  15527  // code >> shift
  15528  masm.rshift32(Imm32(shift), code, temp);
  15529 
  15530  // index = index1[code >> shift];
  15531  masm.movePtr(ImmPtr(unicode::index1), output);
  15532  masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
  15533 
  15534  // (code & ((1 << shift) - 1)
  15535  masm.and32(Imm32((1 << shift) - 1), code, output);
  15536 
  15537  // (index << shift) + (code & ((1 << shift) - 1))
  15538  masm.lshift32(Imm32(shift), temp);
  15539  masm.add32(output, temp);
  15540 
  15541  // index = index2[(index << shift) + (code & ((1 << shift) - 1))]
  15542  masm.movePtr(ImmPtr(unicode::index2), output);
  15543  masm.load8ZeroExtend(BaseIndex(output, temp, TimesOne), temp);
  15544 
  15545  // Compute |index * 6| through |(index * 3) * TimesTwo|.
  15546  static_assert(sizeof(unicode::CharacterInfo) == 6);
  15547  masm.mulBy3(temp, temp);
  15548 
  15549  // upperCase = js_charinfo[index].upperCase
  15550  masm.movePtr(ImmPtr(unicode::js_charinfo), output);
  15551  masm.load16ZeroExtend(BaseIndex(output, temp, TimesTwo,
  15552                                  offsetof(unicode::CharacterInfo, upperCase)),
  15553                        temp);
  15554 
  15555  // uint16_t(ch) + upperCase
  15556  masm.add32(code, temp);
  15557 
  15558  // Clear any high bits added when performing the unsigned 16-bit addition
  15559  // through a signed 32-bit addition.
  15560  masm.move8ZeroExtend(temp, temp);
  15561 
  15562  // Load static string for upper case character.
  15563  masm.lookupStaticString(temp, output, gen->runtime->staticStrings());
  15564 
  15565  masm.bind(ool->rejoin());
  15566 }
  15567 
  15568 void CodeGenerator::visitStringTrimStartIndex(LStringTrimStartIndex* lir) {
  15569  Register string = ToRegister(lir->string());
  15570  Register output = ToRegister(lir->output());
  15571 
  15572  auto volatileRegs = liveVolatileRegs(lir);
  15573  volatileRegs.takeUnchecked(output);
  15574 
  15575  masm.PushRegsInMask(volatileRegs);
  15576 
  15577  using Fn = int32_t (*)(const JSString*);
  15578  masm.setupAlignedABICall();
  15579  masm.passABIArg(string);
  15580  masm.callWithABI<Fn, jit::StringTrimStartIndex>();
  15581  masm.storeCallInt32Result(output);
  15582 
  15583  masm.PopRegsInMask(volatileRegs);
  15584 }
  15585 
  15586 void CodeGenerator::visitStringTrimEndIndex(LStringTrimEndIndex* lir) {
  15587  Register string = ToRegister(lir->string());
  15588  Register start = ToRegister(lir->start());
  15589  Register output = ToRegister(lir->output());
  15590 
  15591  auto volatileRegs = liveVolatileRegs(lir);
  15592  volatileRegs.takeUnchecked(output);
  15593 
  15594  masm.PushRegsInMask(volatileRegs);
  15595 
  15596  using Fn = int32_t (*)(const JSString*, int32_t);
  15597  masm.setupAlignedABICall();
  15598  masm.passABIArg(string);
  15599  masm.passABIArg(start);
  15600  masm.callWithABI<Fn, jit::StringTrimEndIndex>();
  15601  masm.storeCallInt32Result(output);
  15602 
  15603  masm.PopRegsInMask(volatileRegs);
  15604 }
  15605 
  15606 void CodeGenerator::visitStringSplit(LStringSplit* lir) {
  15607  pushArg(Imm32(INT32_MAX));
  15608  pushArg(ToRegister(lir->separator()));
  15609  pushArg(ToRegister(lir->string()));
  15610 
  15611  using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
  15612  callVM<Fn, js::StringSplitString>(lir);
  15613 }
  15614 
  15615 void CodeGenerator::visitInitializedLength(LInitializedLength* lir) {
  15616  Address initLength(ToRegister(lir->elements()),
  15617                     ObjectElements::offsetOfInitializedLength());
  15618  masm.load32(initLength, ToRegister(lir->output()));
  15619 }
  15620 
  15621 void CodeGenerator::visitSetInitializedLength(LSetInitializedLength* lir) {
  15622  Address initLength(ToRegister(lir->elements()),
  15623                     ObjectElements::offsetOfInitializedLength());
  15624  SetLengthFromIndex(masm, lir->index(), initLength);
  15625 }
  15626 
  15627 void CodeGenerator::visitNotI(LNotI* lir) {
  15628  Register input = ToRegister(lir->input());
  15629  Register output = ToRegister(lir->output());
  15630 
  15631  masm.cmp32Set(Assembler::Equal, input, Imm32(0), output);
  15632 }
  15633 
  15634 void CodeGenerator::visitNotIPtr(LNotIPtr* lir) {
  15635  Register input = ToRegister(lir->input());
  15636  Register output = ToRegister(lir->output());
  15637 
  15638  masm.cmpPtrSet(Assembler::Equal, input, ImmWord(0), output);
  15639 }
  15640 
  15641 void CodeGenerator::visitNotI64(LNotI64* lir) {
  15642  Register64 input = ToRegister64(lir->inputI64());
  15643  Register output = ToRegister(lir->output());
  15644 
  15645  masm.cmp64Set(Assembler::Equal, input, Imm64(0), output);
  15646 }
  15647 
  15648 void CodeGenerator::visitNotBI(LNotBI* lir) {
  15649  Register input = ToRegister(lir->input());
  15650  Register output = ToRegister(lir->output());
  15651 
  15652  masm.cmp32Set(Assembler::Equal, Address(input, BigInt::offsetOfLength()),
  15653                Imm32(0), output);
  15654 }
  15655 
  15656 void CodeGenerator::visitNotO(LNotO* lir) {
  15657  Register objreg = ToRegister(lir->input());
  15658  Register output = ToRegister(lir->output());
  15659 
  15660  bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
  15661  if (intact) {
  15662    // Bug 1874905: It would be fantastic if this could be optimized out.
  15663    assertObjectDoesNotEmulateUndefined(objreg, output, lir->mir());
  15664    masm.move32(Imm32(0), output);
  15665  } else {
  15666    auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
  15667    addOutOfLineCode(ool, lir->mir());
  15668 
  15669    Label* ifEmulatesUndefined = ool->label1();
  15670    Label* ifDoesntEmulateUndefined = ool->label2();
  15671 
  15672    branchTestObjectEmulatesUndefined(objreg, ifEmulatesUndefined,
  15673                                      ifDoesntEmulateUndefined, output, ool);
  15674    // fall through
  15675 
  15676    Label join;
  15677 
  15678    masm.move32(Imm32(0), output);
  15679    masm.jump(&join);
  15680 
  15681    masm.bind(ifEmulatesUndefined);
  15682    masm.move32(Imm32(1), output);
  15683 
  15684    masm.bind(&join);
  15685  }
  15686 }
  15687 
  15688 void CodeGenerator::visitNotV(LNotV* lir) {
  15689  auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
  15690  addOutOfLineCode(ool, lir->mir());
  15691 
  15692  Label* ifTruthy = ool->label1();
  15693  Label* ifFalsy = ool->label2();
  15694 
  15695  ValueOperand input = ToValue(lir->input());
  15696  Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
  15697  FloatRegister floatTemp = ToFloatRegister(lir->temp0());
  15698  Register output = ToRegister(lir->output());
  15699  const TypeDataList& observedTypes = lir->mir()->observedTypes();
  15700 
  15701  testValueTruthy(input, tempToUnbox, output, floatTemp, observedTypes,
  15702                  ifTruthy, ifFalsy, ool);
  15703 
  15704  Label join;
  15705 
  15706  // Note that the testValueTruthy call above may choose to fall through
  15707  // to ifTruthy instead of branching there.
  15708  masm.bind(ifTruthy);
  15709  masm.move32(Imm32(0), output);
  15710  masm.jump(&join);
  15711 
  15712  masm.bind(ifFalsy);
  15713  masm.move32(Imm32(1), output);
  15714 
  15715  // both branches meet here.
  15716  masm.bind(&join);
  15717 }
  15718 
  15719 void CodeGenerator::visitBoundsCheck(LBoundsCheck* lir) {
  15720  const LAllocation* index = lir->index();
  15721  const LAllocation* length = lir->length();
  15722  LSnapshot* snapshot = lir->snapshot();
  15723 
  15724  MIRType type = lir->mir()->type();
  15725 
  15726  auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
  15727    if (type == MIRType::Int32) {
  15728      bailoutCmp32(cond, lhs, rhs, snapshot);
  15729    } else {
  15730      MOZ_ASSERT(type == MIRType::IntPtr);
  15731      bailoutCmpPtr(cond, lhs, rhs, snapshot);
  15732    }
  15733  };
  15734 
  15735  auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
  15736                                int32_t rhs) {
  15737    if (type == MIRType::Int32) {
  15738      bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
  15739    } else {
  15740      MOZ_ASSERT(type == MIRType::IntPtr);
  15741      bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
  15742    }
  15743  };
  15744 
  15745  if (index->isConstant()) {
  15746    // Use uint32 so that the comparison is unsigned.
  15747    uint32_t idx = ToInt32(index);
  15748    if (length->isConstant()) {
  15749      uint32_t len = ToInt32(lir->length());
  15750      if (idx < len) {
  15751        return;
  15752      }
  15753      bailout(snapshot);
  15754      return;
  15755    }
  15756 
  15757    if (length->isGeneralReg()) {
  15758      bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), idx);
  15759    } else {
  15760      bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), idx);
  15761    }
  15762    return;
  15763  }
  15764 
  15765  Register indexReg = ToRegister(index);
  15766  if (length->isConstant()) {
  15767    bailoutCmpConstant(Assembler::AboveOrEqual, indexReg, ToInt32(length));
  15768  } else if (length->isGeneralReg()) {
  15769    bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), indexReg);
  15770  } else {
  15771    bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), indexReg);
  15772  }
  15773 }
  15774 
  15775 void CodeGenerator::visitBoundsCheckRange(LBoundsCheckRange* lir) {
  15776  int32_t min = lir->mir()->minimum();
  15777  int32_t max = lir->mir()->maximum();
  15778  MOZ_ASSERT(max >= min);
  15779 
  15780  LSnapshot* snapshot = lir->snapshot();
  15781  MIRType type = lir->mir()->type();
  15782 
  15783  const LAllocation* length = lir->length();
  15784  Register temp = ToRegister(lir->temp0());
  15785 
  15786  auto bailoutCmp = [&](Assembler::Condition cond, auto lhs, auto rhs) {
  15787    if (type == MIRType::Int32) {
  15788      bailoutCmp32(cond, lhs, rhs, snapshot);
  15789    } else {
  15790      MOZ_ASSERT(type == MIRType::IntPtr);
  15791      bailoutCmpPtr(cond, lhs, rhs, snapshot);
  15792    }
  15793  };
  15794 
  15795  auto bailoutCmpConstant = [&](Assembler::Condition cond, auto lhs,
  15796                                int32_t rhs) {
  15797    if (type == MIRType::Int32) {
  15798      bailoutCmp32(cond, lhs, Imm32(rhs), snapshot);
  15799    } else {
  15800      MOZ_ASSERT(type == MIRType::IntPtr);
  15801      bailoutCmpPtr(cond, lhs, ImmWord(rhs), snapshot);
  15802    }
  15803  };
  15804 
  15805  if (lir->index()->isConstant()) {
  15806    int32_t nmin, nmax;
  15807    int32_t index = ToInt32(lir->index());
  15808    if (mozilla::SafeAdd(index, min, &nmin) &&
  15809        mozilla::SafeAdd(index, max, &nmax) && nmin >= 0) {
  15810      if (length->isGeneralReg()) {
  15811        bailoutCmpConstant(Assembler::BelowOrEqual, ToRegister(length), nmax);
  15812      } else {
  15813        bailoutCmpConstant(Assembler::BelowOrEqual, ToAddress(length), nmax);
  15814      }
  15815      return;
  15816    }
  15817    masm.mov(ImmWord(index), temp);
  15818  } else {
  15819    masm.mov(ToRegister(lir->index()), temp);
  15820  }
  15821 
  15822  // If the minimum and maximum differ then do an underflow check first.
  15823  // If the two are the same then doing an unsigned comparison on the
  15824  // length will also catch a negative index.
  15825  if (min != max) {
  15826    if (min != 0) {
  15827      Label bail;
  15828      if (type == MIRType::Int32) {
  15829        masm.branchAdd32(Assembler::Overflow, Imm32(min), temp, &bail);
  15830      } else {
  15831        masm.branchAddPtr(Assembler::Overflow, Imm32(min), temp, &bail);
  15832      }
  15833      bailoutFrom(&bail, snapshot);
  15834    }
  15835 
  15836    bailoutCmpConstant(Assembler::LessThan, temp, 0);
  15837 
  15838    if (min != 0) {
  15839      int32_t diff;
  15840      if (mozilla::SafeSub(max, min, &diff)) {
  15841        max = diff;
  15842      } else {
  15843        if (type == MIRType::Int32) {
  15844          masm.sub32(Imm32(min), temp);
  15845        } else {
  15846          masm.subPtr(Imm32(min), temp);
  15847        }
  15848      }
  15849    }
  15850  }
  15851 
  15852  // Compute the maximum possible index. No overflow check is needed when
  15853  // max > 0. We can only wraparound to a negative number, which will test as
  15854  // larger than all nonnegative numbers in the unsigned comparison, and the
  15855  // length is required to be nonnegative (else testing a negative length
  15856  // would succeed on any nonnegative index).
  15857  if (max != 0) {
  15858    if (max < 0) {
  15859      Label bail;
  15860      if (type == MIRType::Int32) {
  15861        masm.branchAdd32(Assembler::Overflow, Imm32(max), temp, &bail);
  15862      } else {
  15863        masm.branchAddPtr(Assembler::Overflow, Imm32(max), temp, &bail);
  15864      }
  15865      bailoutFrom(&bail, snapshot);
  15866    } else {
  15867      if (type == MIRType::Int32) {
  15868        masm.add32(Imm32(max), temp);
  15869      } else {
  15870        masm.addPtr(Imm32(max), temp);
  15871      }
  15872    }
  15873  }
  15874 
  15875  if (length->isGeneralReg()) {
  15876    bailoutCmp(Assembler::BelowOrEqual, ToRegister(length), temp);
  15877  } else {
  15878    bailoutCmp(Assembler::BelowOrEqual, ToAddress(length), temp);
  15879  }
  15880 }
  15881 
  15882 void CodeGenerator::visitBoundsCheckLower(LBoundsCheckLower* lir) {
  15883  int32_t min = lir->mir()->minimum();
  15884  bailoutCmp32(Assembler::LessThan, ToRegister(lir->index()), Imm32(min),
  15885               lir->snapshot());
  15886 }
  15887 
  15888 void CodeGenerator::visitSpectreMaskIndex(LSpectreMaskIndex* lir) {
  15889  MOZ_ASSERT(JitOptions.spectreIndexMasking);
  15890 
  15891  const LAllocation* length = lir->length();
  15892  Register index = ToRegister(lir->index());
  15893  Register output = ToRegister(lir->output());
  15894 
  15895  if (lir->mir()->type() == MIRType::Int32) {
  15896    if (length->isGeneralReg()) {
  15897      masm.spectreMaskIndex32(index, ToRegister(length), output);
  15898    } else {
  15899      masm.spectreMaskIndex32(index, ToAddress(length), output);
  15900    }
  15901  } else {
  15902    MOZ_ASSERT(lir->mir()->type() == MIRType::IntPtr);
  15903    if (length->isGeneralReg()) {
  15904      masm.spectreMaskIndexPtr(index, ToRegister(length), output);
  15905    } else {
  15906      masm.spectreMaskIndexPtr(index, ToAddress(length), output);
  15907    }
  15908  }
  15909 }
  15910 
  15911 CodeGenerator::AddressOrBaseObjectElementIndex
  15912 CodeGenerator::ToAddressOrBaseObjectElementIndex(Register elements,
  15913                                                 const LAllocation* index) {
  15914  if (index->isConstant()) {
  15915    NativeObject::elementsSizeMustNotOverflow();
  15916    return AddressOrBaseObjectElementIndex(
  15917        Address(elements, ToInt32(index) * sizeof(JS::Value)));
  15918  }
  15919  return AddressOrBaseObjectElementIndex(
  15920      BaseObjectElementIndex(elements, ToRegister(index)));
  15921 }
  15922 
  15923 void CodeGenerator::emitStoreHoleCheck(Address dest, LSnapshot* snapshot) {
  15924  Label bail;
  15925  masm.branchTestMagic(Assembler::Equal, dest, &bail);
  15926  bailoutFrom(&bail, snapshot);
  15927 }
  15928 
  15929 void CodeGenerator::emitStoreHoleCheck(BaseObjectElementIndex dest,
  15930                                       LSnapshot* snapshot) {
  15931  Label bail;
  15932  masm.branchTestMagic(Assembler::Equal, dest, &bail);
  15933  bailoutFrom(&bail, snapshot);
  15934 }
  15935 
  15936 void CodeGenerator::visitStoreElementT(LStoreElementT* store) {
  15937  Register elements = ToRegister(store->elements());
  15938  const LAllocation* index = store->index();
  15939 
  15940  MIRType valueType = store->mir()->value()->type();
  15941  MOZ_ASSERT(valueType != MIRType::MagicHole);
  15942 
  15943  ConstantOrRegister value = ToConstantOrRegister(store->value(), valueType);
  15944 
  15945  auto dest = ToAddressOrBaseObjectElementIndex(elements, index);
  15946 
  15947  dest.match([&](const auto& dest) {
  15948    if (store->mir()->needsBarrier()) {
  15949      emitPreBarrier(dest);
  15950    }
  15951 
  15952    if (store->mir()->needsHoleCheck()) {
  15953      emitStoreHoleCheck(dest, store->snapshot());
  15954    }
  15955 
  15956    masm.storeUnboxedValue(value, valueType, dest);
  15957  });
  15958 }
  15959 
  15960 void CodeGenerator::visitStoreElementV(LStoreElementV* lir) {
  15961  ValueOperand value = ToValue(lir->value());
  15962  Register elements = ToRegister(lir->elements());
  15963  const LAllocation* index = lir->index();
  15964 
  15965  auto dest = ToAddressOrBaseObjectElementIndex(elements, index);
  15966 
  15967  dest.match([&](const auto& dest) {
  15968    if (lir->mir()->needsBarrier()) {
  15969      emitPreBarrier(dest);
  15970    }
  15971 
  15972    if (lir->mir()->needsHoleCheck()) {
  15973      emitStoreHoleCheck(dest, lir->snapshot());
  15974    }
  15975 
  15976    masm.storeValue(value, dest);
  15977  });
  15978 }
  15979 
  15980 void CodeGenerator::visitStoreHoleValueElement(LStoreHoleValueElement* lir) {
  15981  Register elements = ToRegister(lir->elements());
  15982  Register index = ToRegister(lir->index());
  15983 
  15984  Address elementsFlags(elements, ObjectElements::offsetOfFlags());
  15985  masm.or32(Imm32(ObjectElements::NON_PACKED), elementsFlags);
  15986 
  15987  BaseObjectElementIndex element(elements, index);
  15988  masm.storeValue(MagicValue(JS_ELEMENTS_HOLE), element);
  15989 }
  15990 
  15991 void CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT* lir) {
  15992  Register obj = ToRegister(lir->object());
  15993  Register elements = ToRegister(lir->elements());
  15994  Register index = ToRegister(lir->index());
  15995  Register temp = ToRegister(lir->temp0());
  15996 
  15997  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  15998    Label bail;
  15999    masm.prepareOOBStoreElement(obj, index, elements, temp, &bail,
  16000                                liveVolatileRegs(lir));
  16001    bailoutFrom(&bail, lir->snapshot());
  16002 
  16003    // Jump to the inline path where we will store the value.
  16004    // We rejoin after the prebarrier, because the memory is uninitialized.
  16005    masm.jump(ool.rejoin());
  16006  });
  16007  addOutOfLineCode(ool, lir->mir());
  16008 
  16009  Address initLength(elements, ObjectElements::offsetOfInitializedLength());
  16010  masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
  16011 
  16012  emitPreBarrier(BaseObjectElementIndex(elements, index));
  16013 
  16014  masm.bind(ool->rejoin());
  16015 
  16016  MIRType valueType = lir->mir()->value()->type();
  16017  MOZ_ASSERT(valueType != MIRType::MagicHole);
  16018 
  16019  ConstantOrRegister val = ToConstantOrRegister(lir->value(), valueType);
  16020  masm.storeUnboxedValue(val, valueType,
  16021                         BaseObjectElementIndex(elements, index));
  16022 
  16023  if (ValueNeedsPostBarrier(lir->mir()->value())) {
  16024    LiveRegisterSet regs = liveVolatileRegs(lir);
  16025    emitElementPostWriteBarrier(lir->mir(), regs, obj, index, temp, val);
  16026  }
  16027 }
  16028 
  16029 void CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV* lir) {
  16030  Register obj = ToRegister(lir->object());
  16031  Register elements = ToRegister(lir->elements());
  16032  Register index = ToRegister(lir->index());
  16033  ValueOperand value = ToValue(lir->value());
  16034  Register temp = ToRegister(lir->temp0());
  16035 
  16036  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  16037    Label bail;
  16038    masm.prepareOOBStoreElement(obj, index, elements, temp, &bail,
  16039                                liveVolatileRegs(lir));
  16040    bailoutFrom(&bail, lir->snapshot());
  16041 
  16042    // Jump to the inline path where we will store the value.
  16043    // We rejoin after the prebarrier, because the memory is uninitialized.
  16044    masm.jump(ool.rejoin());
  16045  });
  16046  addOutOfLineCode(ool, lir->mir());
  16047 
  16048  Address initLength(elements, ObjectElements::offsetOfInitializedLength());
  16049  masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
  16050 
  16051  emitPreBarrier(BaseObjectElementIndex(elements, index));
  16052 
  16053  masm.bind(ool->rejoin());
  16054  masm.storeValue(value, BaseObjectElementIndex(elements, index));
  16055 
  16056  if (ValueNeedsPostBarrier(lir->mir()->value())) {
  16057    LiveRegisterSet regs = liveVolatileRegs(lir);
  16058    emitElementPostWriteBarrier(lir->mir(), regs, obj, index, temp,
  16059                                ConstantOrRegister(value));
  16060  }
  16061 }
  16062 
  16063 void CodeGenerator::visitArrayPopShift(LArrayPopShift* lir) {
  16064  Register obj = ToRegister(lir->object());
  16065  Register temp1 = ToRegister(lir->temp0());
  16066  Register temp2 = ToRegister(lir->temp1());
  16067  ValueOperand out = ToOutValue(lir);
  16068 
  16069  Label bail;
  16070  if (lir->mir()->mode() == MArrayPopShift::Pop) {
  16071    masm.packedArrayPop(obj, out, temp1, temp2, &bail);
  16072  } else {
  16073    MOZ_ASSERT(lir->mir()->mode() == MArrayPopShift::Shift);
  16074    LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
  16075    masm.packedArrayShift(obj, out, temp1, temp2, volatileRegs, &bail);
  16076  }
  16077  bailoutFrom(&bail, lir->snapshot());
  16078 }
  16079 
  16080 void CodeGenerator::visitArrayPush(LArrayPush* lir) {
  16081  Register obj = ToRegister(lir->object());
  16082  Register elementsTemp = ToRegister(lir->temp0());
  16083  Register length = ToRegister(lir->output());
  16084  ValueOperand value = ToValue(lir->value());
  16085  Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
  16086 
  16087  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  16088    Register temp = ToRegister(lir->temp0());
  16089 
  16090    LiveRegisterSet liveRegs = liveVolatileRegs(lir);
  16091    liveRegs.takeUnchecked(temp);
  16092    liveRegs.addUnchecked(ToRegister(lir->output()));
  16093    liveRegs.addUnchecked(ToValue(lir->value()));
  16094 
  16095    masm.PushRegsInMask(liveRegs);
  16096 
  16097    masm.setupAlignedABICall();
  16098    masm.loadJSContext(temp);
  16099    masm.passABIArg(temp);
  16100    masm.passABIArg(obj);
  16101 
  16102    using Fn = bool (*)(JSContext*, NativeObject* obj);
  16103    masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
  16104    masm.storeCallPointerResult(temp);
  16105 
  16106    masm.PopRegsInMask(liveRegs);
  16107    bailoutIfFalseBool(temp, lir->snapshot());
  16108 
  16109    // Load the reallocated elements pointer.
  16110    masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
  16111 
  16112    masm.jump(ool.rejoin());
  16113  });
  16114  addOutOfLineCode(ool, lir->mir());
  16115 
  16116  // Load obj->elements in elementsTemp.
  16117  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), elementsTemp);
  16118 
  16119  Address initLengthAddr(elementsTemp,
  16120                         ObjectElements::offsetOfInitializedLength());
  16121  Address lengthAddr(elementsTemp, ObjectElements::offsetOfLength());
  16122  Address capacityAddr(elementsTemp, ObjectElements::offsetOfCapacity());
  16123 
  16124  // Bail out if length != initLength.
  16125  masm.load32(lengthAddr, length);
  16126  bailoutCmp32(Assembler::NotEqual, initLengthAddr, length, lir->snapshot());
  16127 
  16128  // If length < capacity, we can add a dense element inline. If not, we
  16129  // need to allocate more elements.
  16130  masm.spectreBoundsCheck32(length, capacityAddr, spectreTemp, ool->entry());
  16131  masm.bind(ool->rejoin());
  16132 
  16133  // Store the value.
  16134  masm.storeValue(value, BaseObjectElementIndex(elementsTemp, length));
  16135 
  16136  // Update length and initialized length.
  16137  masm.add32(Imm32(1), length);
  16138  masm.store32(length, Address(elementsTemp, ObjectElements::offsetOfLength()));
  16139  masm.store32(length, Address(elementsTemp,
  16140                               ObjectElements::offsetOfInitializedLength()));
  16141 
  16142  if (ValueNeedsPostBarrier(lir->mir()->value())) {
  16143    LiveRegisterSet regs = liveVolatileRegs(lir);
  16144    regs.addUnchecked(length);
  16145    emitElementPostWriteBarrier(lir->mir(), regs, obj, length, elementsTemp,
  16146                                ConstantOrRegister(value),
  16147                                /* indexDiff = */ -1);
  16148  }
  16149 }
  16150 
  16151 void CodeGenerator::visitArraySlice(LArraySlice* lir) {
  16152  Register object = ToRegister(lir->object());
  16153  Register begin = ToRegister(lir->begin());
  16154  Register end = ToRegister(lir->end());
  16155  Register temp0 = ToRegister(lir->temp0());
  16156  Register temp1 = ToRegister(lir->temp1());
  16157 
  16158  Label call, fail;
  16159 
  16160  Label bail;
  16161  masm.branchArrayIsNotPacked(object, temp0, temp1, &bail);
  16162  bailoutFrom(&bail, lir->snapshot());
  16163 
  16164  // Try to allocate an object.
  16165  TemplateObject templateObject(lir->mir()->templateObj());
  16166  masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
  16167                      &fail);
  16168 
  16169  masm.jump(&call);
  16170  {
  16171    masm.bind(&fail);
  16172    masm.movePtr(ImmPtr(nullptr), temp0);
  16173  }
  16174  masm.bind(&call);
  16175 
  16176  pushArg(temp0);
  16177  pushArg(end);
  16178  pushArg(begin);
  16179  pushArg(object);
  16180 
  16181  using Fn =
  16182      JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
  16183  callVM<Fn, ArraySliceDense>(lir);
  16184 }
  16185 
  16186 void CodeGenerator::visitArgumentsSlice(LArgumentsSlice* lir) {
  16187  Register object = ToRegister(lir->object());
  16188  Register begin = ToRegister(lir->begin());
  16189  Register end = ToRegister(lir->end());
  16190  Register temp0 = ToRegister(lir->temp0());
  16191  Register temp1 = ToRegister(lir->temp1());
  16192 
  16193  Label call, fail;
  16194 
  16195  // Try to allocate an object.
  16196  TemplateObject templateObject(lir->mir()->templateObj());
  16197  masm.createGCObject(temp0, temp1, templateObject, lir->mir()->initialHeap(),
  16198                      &fail);
  16199 
  16200  masm.jump(&call);
  16201  {
  16202    masm.bind(&fail);
  16203    masm.movePtr(ImmPtr(nullptr), temp0);
  16204  }
  16205  masm.bind(&call);
  16206 
  16207  pushArg(temp0);
  16208  pushArg(end);
  16209  pushArg(begin);
  16210  pushArg(object);
  16211 
  16212  using Fn =
  16213      JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
  16214  callVM<Fn, ArgumentsSliceDense>(lir);
  16215 }
  16216 
  16217 #ifdef DEBUG
  16218 void CodeGenerator::emitAssertArgumentsSliceBounds(const RegisterOrInt32& begin,
  16219                                                   const RegisterOrInt32& count,
  16220                                                   Register numActualArgs) {
  16221  // |begin| must be positive or zero.
  16222  if (begin.is<Register>()) {
  16223    Label beginOk;
  16224    masm.branch32(Assembler::GreaterThanOrEqual, begin.as<Register>(), Imm32(0),
  16225                  &beginOk);
  16226    masm.assumeUnreachable("begin < 0");
  16227    masm.bind(&beginOk);
  16228  } else {
  16229    MOZ_ASSERT(begin.as<int32_t>() >= 0);
  16230  }
  16231 
  16232  // |count| must be positive or zero.
  16233  if (count.is<Register>()) {
  16234    Label countOk;
  16235    masm.branch32(Assembler::GreaterThanOrEqual, count.as<Register>(), Imm32(0),
  16236                  &countOk);
  16237    masm.assumeUnreachable("count < 0");
  16238    masm.bind(&countOk);
  16239  } else {
  16240    MOZ_ASSERT(count.as<int32_t>() >= 0);
  16241  }
  16242 
  16243  // |begin| must be less-or-equal to |numActualArgs|.
  16244  Label argsBeginOk;
  16245  if (begin.is<Register>()) {
  16246    masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
  16247                   &argsBeginOk);
  16248  } else {
  16249    masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
  16250                   Imm32(begin.as<int32_t>()), &argsBeginOk);
  16251  }
  16252  masm.assumeUnreachable("begin <= numActualArgs");
  16253  masm.bind(&argsBeginOk);
  16254 
  16255  // |count| must be less-or-equal to |numActualArgs|.
  16256  Label argsCountOk;
  16257  if (count.is<Register>()) {
  16258    masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, count.as<Register>(),
  16259                   &argsCountOk);
  16260  } else {
  16261    masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
  16262                   Imm32(count.as<int32_t>()), &argsCountOk);
  16263  }
  16264  masm.assumeUnreachable("count <= numActualArgs");
  16265  masm.bind(&argsCountOk);
  16266 
  16267  // |begin| and |count| must be preserved, but |numActualArgs| can be changed.
  16268  //
  16269  // Pre-condition: |count| <= |numActualArgs|
  16270  // Condition to test: |begin + count| <= |numActualArgs|
  16271  // Transform to: |begin| <= |numActualArgs - count|
  16272  if (count.is<Register>()) {
  16273    masm.subPtr(count.as<Register>(), numActualArgs);
  16274  } else {
  16275    masm.subPtr(Imm32(count.as<int32_t>()), numActualArgs);
  16276  }
  16277 
  16278  // |begin + count| must be less-or-equal to |numActualArgs|.
  16279  Label argsBeginCountOk;
  16280  if (begin.is<Register>()) {
  16281    masm.branchPtr(Assembler::AboveOrEqual, numActualArgs, begin.as<Register>(),
  16282                   &argsBeginCountOk);
  16283  } else {
  16284    masm.branchPtr(Assembler::AboveOrEqual, numActualArgs,
  16285                   Imm32(begin.as<int32_t>()), &argsBeginCountOk);
  16286  }
  16287  masm.assumeUnreachable("begin + count <= numActualArgs");
  16288  masm.bind(&argsBeginCountOk);
  16289 }
  16290 #endif
  16291 
  16292 template <class ArgumentsSlice>
  16293 void CodeGenerator::emitNewArray(ArgumentsSlice* lir,
  16294                                 const RegisterOrInt32& count, Register output,
  16295                                 Register temp) {
  16296  using Fn = ArrayObject* (*)(JSContext*, int32_t);
  16297  auto* ool = count.match(
  16298      [&](Register count) {
  16299        return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
  16300            lir, ArgList(count), StoreRegisterTo(output));
  16301      },
  16302      [&](int32_t count) {
  16303        return oolCallVM<Fn, NewArrayObjectEnsureDenseInitLength>(
  16304            lir, ArgList(Imm32(count)), StoreRegisterTo(output));
  16305      });
  16306 
  16307  TemplateObject templateObject(lir->mir()->templateObj());
  16308  MOZ_ASSERT(templateObject.isArrayObject());
  16309 
  16310  auto templateNativeObj = templateObject.asTemplateNativeObject();
  16311  MOZ_ASSERT(templateNativeObj.getArrayLength() == 0);
  16312  MOZ_ASSERT(templateNativeObj.getDenseInitializedLength() == 0);
  16313  MOZ_ASSERT(!templateNativeObj.hasDynamicElements());
  16314 
  16315  // Check array capacity. Call into the VM if the template object's capacity
  16316  // is too small.
  16317  bool tryAllocate = count.match(
  16318      [&](Register count) {
  16319        masm.branch32(Assembler::Above, count,
  16320                      Imm32(templateNativeObj.getDenseCapacity()),
  16321                      ool->entry());
  16322        return true;
  16323      },
  16324      [&](int32_t count) {
  16325        MOZ_ASSERT(count >= 0);
  16326        if (uint32_t(count) > templateNativeObj.getDenseCapacity()) {
  16327          masm.jump(ool->entry());
  16328          return false;
  16329        }
  16330        return true;
  16331      });
  16332 
  16333  if (tryAllocate) {
  16334    // Try to allocate an object.
  16335    masm.createGCObject(output, temp, templateObject, lir->mir()->initialHeap(),
  16336                        ool->entry());
  16337 
  16338    auto setInitializedLengthAndLength = [&](auto count) {
  16339      const int elementsOffset = NativeObject::offsetOfFixedElements();
  16340 
  16341      // Update initialized length.
  16342      Address initLength(
  16343          output, elementsOffset + ObjectElements::offsetOfInitializedLength());
  16344      masm.store32(count, initLength);
  16345 
  16346      // Update length.
  16347      Address length(output, elementsOffset + ObjectElements::offsetOfLength());
  16348      masm.store32(count, length);
  16349    };
  16350 
  16351    // The array object was successfully created. Set the length and initialized
  16352    // length and then proceed to fill the elements.
  16353    count.match([&](Register count) { setInitializedLengthAndLength(count); },
  16354                [&](int32_t count) {
  16355                  if (count > 0) {
  16356                    setInitializedLengthAndLength(Imm32(count));
  16357                  }
  16358                });
  16359  }
  16360 
  16361  masm.bind(ool->rejoin());
  16362 }
  16363 
  16364 void CodeGenerator::visitFrameArgumentsSlice(LFrameArgumentsSlice* lir) {
  16365  Register begin = ToRegister(lir->begin());
  16366  Register count = ToRegister(lir->count());
  16367  Register temp = ToRegister(lir->temp0());
  16368  Register output = ToRegister(lir->output());
  16369 
  16370 #ifdef DEBUG
  16371  masm.loadNumActualArgs(FramePointer, temp);
  16372  emitAssertArgumentsSliceBounds(RegisterOrInt32(begin), RegisterOrInt32(count),
  16373                                 temp);
  16374 #endif
  16375 
  16376  emitNewArray(lir, RegisterOrInt32(count), output, temp);
  16377 
  16378  Label done;
  16379  masm.branch32(Assembler::Equal, count, Imm32(0), &done);
  16380  {
  16381    AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
  16382    allRegs.take(begin);
  16383    allRegs.take(count);
  16384    allRegs.take(temp);
  16385    allRegs.take(output);
  16386 
  16387    ValueOperand value = allRegs.takeAnyValue();
  16388 
  16389    LiveRegisterSet liveRegs;
  16390    liveRegs.add(output);
  16391    liveRegs.add(begin);
  16392    liveRegs.add(value);
  16393 
  16394    masm.PushRegsInMask(liveRegs);
  16395 
  16396    // Initialize all elements.
  16397 
  16398    Register elements = output;
  16399    masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
  16400 
  16401    Register argIndex = begin;
  16402 
  16403    Register index = temp;
  16404    masm.move32(Imm32(0), index);
  16405 
  16406    size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
  16407    BaseValueIndex argPtr(FramePointer, argIndex, argvOffset);
  16408 
  16409    Label loop;
  16410    masm.bind(&loop);
  16411 
  16412    masm.loadValue(argPtr, value);
  16413 
  16414    // We don't need a pre-barrier, because the element at |index| is guaranteed
  16415    // to be a non-GC thing (either uninitialized memory or the magic hole
  16416    // value).
  16417    masm.storeValue(value, BaseObjectElementIndex(elements, index));
  16418 
  16419    masm.add32(Imm32(1), index);
  16420    masm.add32(Imm32(1), argIndex);
  16421 
  16422    masm.branch32(Assembler::LessThan, index, count, &loop);
  16423 
  16424    masm.PopRegsInMask(liveRegs);
  16425 
  16426    // Emit a post-write barrier if |output| is tenured.
  16427    //
  16428    // We expect that |output| is nursery allocated, so it isn't worth the
  16429    // trouble to check if no frame argument is a nursery thing, which would
  16430    // allow to omit the post-write barrier.
  16431    masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
  16432 
  16433    LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
  16434    volatileRegs.takeUnchecked(temp);
  16435    if (output.volatile_()) {
  16436      volatileRegs.addUnchecked(output);
  16437    }
  16438 
  16439    masm.PushRegsInMask(volatileRegs);
  16440    emitPostWriteBarrier(output);
  16441    masm.PopRegsInMask(volatileRegs);
  16442  }
  16443  masm.bind(&done);
  16444 }
  16445 
  16446 CodeGenerator::RegisterOrInt32 CodeGenerator::ToRegisterOrInt32(
  16447    const LAllocation* allocation) {
  16448  if (allocation->isConstant()) {
  16449    return RegisterOrInt32(allocation->toConstant()->toInt32());
  16450  }
  16451  return RegisterOrInt32(ToRegister(allocation));
  16452 }
  16453 
  16454 void CodeGenerator::visitInlineArgumentsSlice(LInlineArgumentsSlice* lir) {
  16455  RegisterOrInt32 begin = ToRegisterOrInt32(lir->begin());
  16456  RegisterOrInt32 count = ToRegisterOrInt32(lir->count());
  16457  Register temp = ToRegister(lir->temp());
  16458  Register output = ToRegister(lir->output());
  16459 
  16460  uint32_t numActuals = lir->mir()->numActuals();
  16461 
  16462 #ifdef DEBUG
  16463  masm.move32(Imm32(numActuals), temp);
  16464 
  16465  emitAssertArgumentsSliceBounds(begin, count, temp);
  16466 #endif
  16467 
  16468  emitNewArray(lir, count, output, temp);
  16469 
  16470  // We're done if there are no actual arguments.
  16471  if (numActuals == 0) {
  16472    return;
  16473  }
  16474 
  16475  // Check if any arguments have to be copied.
  16476  Label done;
  16477  if (count.is<Register>()) {
  16478    masm.branch32(Assembler::Equal, count.as<Register>(), Imm32(0), &done);
  16479  } else if (count.as<int32_t>() == 0) {
  16480    return;
  16481  }
  16482 
  16483  auto getArg = [&](uint32_t i) {
  16484    return toConstantOrRegister(lir, LInlineArgumentsSlice::ArgIndex(i),
  16485                                lir->mir()->getArg(i)->type());
  16486  };
  16487 
  16488  auto storeArg = [&](uint32_t i, auto dest) {
  16489    // We don't need a pre-barrier because the element at |index| is guaranteed
  16490    // to be a non-GC thing (either uninitialized memory or the magic hole
  16491    // value).
  16492    masm.storeConstantOrRegister(getArg(i), dest);
  16493  };
  16494 
  16495  // Initialize all elements.
  16496  if (numActuals == 1) {
  16497    // There's exactly one argument. We've checked that |count| is non-zero,
  16498    // which implies that |begin| must be zero.
  16499    MOZ_ASSERT_IF(begin.is<int32_t>(), begin.as<int32_t>() == 0);
  16500 
  16501    Register elements = temp;
  16502    masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
  16503 
  16504    storeArg(0, Address(elements, 0));
  16505  } else if (begin.is<Register>()) {
  16506    // There is more than one argument and |begin| isn't a compile-time
  16507    // constant. Iterate through 0..numActuals to search for |begin| and then
  16508    // start copying |count| arguments from that index.
  16509 
  16510    LiveGeneralRegisterSet liveRegs;
  16511    liveRegs.add(output);
  16512    liveRegs.add(begin.as<Register>());
  16513 
  16514    masm.PushRegsInMask(liveRegs);
  16515 
  16516    Register elements = output;
  16517    masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
  16518 
  16519    Register argIndex = begin.as<Register>();
  16520 
  16521    Register index = temp;
  16522    masm.move32(Imm32(0), index);
  16523 
  16524    Label doneLoop;
  16525    for (uint32_t i = 0; i < numActuals; ++i) {
  16526      Label next;
  16527      masm.branch32(Assembler::NotEqual, argIndex, Imm32(i), &next);
  16528 
  16529      storeArg(i, BaseObjectElementIndex(elements, index));
  16530 
  16531      masm.add32(Imm32(1), index);
  16532      masm.add32(Imm32(1), argIndex);
  16533 
  16534      if (count.is<Register>()) {
  16535        masm.branch32(Assembler::GreaterThanOrEqual, index,
  16536                      count.as<Register>(), &doneLoop);
  16537      } else {
  16538        masm.branch32(Assembler::GreaterThanOrEqual, index,
  16539                      Imm32(count.as<int32_t>()), &doneLoop);
  16540      }
  16541 
  16542      masm.bind(&next);
  16543    }
  16544    masm.bind(&doneLoop);
  16545 
  16546    masm.PopRegsInMask(liveRegs);
  16547  } else {
  16548    // There is more than one argument and |begin| is a compile-time constant.
  16549 
  16550    Register elements = temp;
  16551    masm.loadPtr(Address(output, NativeObject::offsetOfElements()), elements);
  16552 
  16553    int32_t argIndex = begin.as<int32_t>();
  16554 
  16555    int32_t index = 0;
  16556 
  16557    Label doneLoop;
  16558    for (uint32_t i = argIndex; i < numActuals; ++i) {
  16559      storeArg(i, Address(elements, index * sizeof(Value)));
  16560 
  16561      index += 1;
  16562 
  16563      if (count.is<Register>()) {
  16564        masm.branch32(Assembler::LessThanOrEqual, count.as<Register>(),
  16565                      Imm32(index), &doneLoop);
  16566      } else {
  16567        if (index >= count.as<int32_t>()) {
  16568          break;
  16569        }
  16570      }
  16571    }
  16572    masm.bind(&doneLoop);
  16573  }
  16574 
  16575  // Determine if we have to emit post-write barrier.
  16576  //
  16577  // If either |begin| or |count| is a constant, use their value directly.
  16578  // Otherwise assume we copy all inline arguments from 0..numActuals.
  16579  bool postWriteBarrier = false;
  16580  uint32_t actualBegin = begin.match([](Register) { return 0; },
  16581                                     [](int32_t value) { return value; });
  16582  uint32_t actualCount =
  16583      count.match([=](Register) { return numActuals; },
  16584                  [](int32_t value) -> uint32_t { return value; });
  16585  for (uint32_t i = 0; i < actualCount; ++i) {
  16586    ConstantOrRegister arg = getArg(actualBegin + i);
  16587    if (arg.constant()) {
  16588      Value v = arg.value();
  16589      if (v.isGCThing() && IsInsideNursery(v.toGCThing())) {
  16590        postWriteBarrier = true;
  16591      }
  16592    } else {
  16593      MIRType type = arg.reg().type();
  16594      if (type == MIRType::Value || NeedsPostBarrier(type)) {
  16595        postWriteBarrier = true;
  16596      }
  16597    }
  16598  }
  16599 
  16600  // Emit a post-write barrier if |output| is tenured and we couldn't
  16601  // determine at compile-time that no barrier is needed.
  16602  if (postWriteBarrier) {
  16603    masm.branchPtrInNurseryChunk(Assembler::Equal, output, temp, &done);
  16604 
  16605    LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
  16606    volatileRegs.takeUnchecked(temp);
  16607    if (output.volatile_()) {
  16608      volatileRegs.addUnchecked(output);
  16609    }
  16610 
  16611    masm.PushRegsInMask(volatileRegs);
  16612    emitPostWriteBarrier(output);
  16613    masm.PopRegsInMask(volatileRegs);
  16614  }
  16615 
  16616  masm.bind(&done);
  16617 }
  16618 
  16619 void CodeGenerator::visitNormalizeSliceTerm(LNormalizeSliceTerm* lir) {
  16620  Register value = ToRegister(lir->value());
  16621  Register length = ToRegister(lir->length());
  16622  Register output = ToRegister(lir->output());
  16623 
  16624  masm.move32(value, output);
  16625 
  16626  Label positive;
  16627  masm.branch32(Assembler::GreaterThanOrEqual, value, Imm32(0), &positive);
  16628 
  16629  Label done;
  16630  masm.add32(length, output);
  16631  masm.branch32(Assembler::GreaterThanOrEqual, output, Imm32(0), &done);
  16632  masm.move32(Imm32(0), output);
  16633  masm.jump(&done);
  16634 
  16635  masm.bind(&positive);
  16636  masm.cmp32Move32(Assembler::LessThan, length, value, length, output);
  16637 
  16638  masm.bind(&done);
  16639 }
  16640 
  16641 void CodeGenerator::visitArrayJoin(LArrayJoin* lir) {
  16642  Label skipCall;
  16643 
  16644  Register output = ToRegister(lir->output());
  16645  Register sep = ToRegister(lir->separator());
  16646  Register array = ToRegister(lir->array());
  16647  Register temp = ToRegister(lir->temp0());
  16648 
  16649  // Fast path for simple length <= 1 cases.
  16650  {
  16651    masm.loadPtr(Address(array, NativeObject::offsetOfElements()), temp);
  16652    Address length(temp, ObjectElements::offsetOfLength());
  16653    Address initLength(temp, ObjectElements::offsetOfInitializedLength());
  16654 
  16655    // Check for length == 0
  16656    Label notEmpty;
  16657    masm.branch32(Assembler::NotEqual, length, Imm32(0), &notEmpty);
  16658    const JSAtomState& names = gen->runtime->names();
  16659    masm.movePtr(ImmGCPtr(names.empty_), output);
  16660    masm.jump(&skipCall);
  16661 
  16662    masm.bind(&notEmpty);
  16663    Label notSingleString;
  16664    // Check for length == 1, initializedLength >= 1, arr[0].isString()
  16665    masm.branch32(Assembler::NotEqual, length, Imm32(1), &notSingleString);
  16666    masm.branch32(Assembler::LessThan, initLength, Imm32(1), &notSingleString);
  16667 
  16668    Address elem0(temp, 0);
  16669    masm.branchTestString(Assembler::NotEqual, elem0, &notSingleString);
  16670 
  16671    // At this point, 'output' can be used as a scratch register, since we're
  16672    // guaranteed to succeed.
  16673    masm.unboxString(elem0, output);
  16674    masm.jump(&skipCall);
  16675    masm.bind(&notSingleString);
  16676  }
  16677 
  16678  pushArg(sep);
  16679  pushArg(array);
  16680 
  16681  using Fn = JSString* (*)(JSContext*, HandleObject, HandleString);
  16682  callVM<Fn, jit::ArrayJoin>(lir);
  16683  masm.bind(&skipCall);
  16684 }
  16685 
  16686 void CodeGenerator::visitObjectKeys(LObjectKeys* lir) {
  16687  Register object = ToRegister(lir->object());
  16688 
  16689  pushArg(object);
  16690 
  16691  using Fn = JSObject* (*)(JSContext*, HandleObject);
  16692  callVM<Fn, jit::ObjectKeys>(lir);
  16693 }
  16694 
  16695 void CodeGenerator::visitObjectKeysLength(LObjectKeysLength* lir) {
  16696  Register object = ToRegister(lir->object());
  16697 
  16698  pushArg(object);
  16699 
  16700  using Fn = bool (*)(JSContext*, HandleObject, int32_t*);
  16701  callVM<Fn, jit::ObjectKeysLength>(lir);
  16702 }
  16703 
  16704 void CodeGenerator::visitGetIteratorCache(LGetIteratorCache* lir) {
  16705  LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
  16706  TypedOrValueRegister val =
  16707      toConstantOrRegister(lir, LGetIteratorCache::ValueIndex,
  16708                           lir->mir()->value()->type())
  16709          .reg();
  16710  Register output = ToRegister(lir->output());
  16711  Register temp0 = ToRegister(lir->temp0());
  16712  Register temp1 = ToRegister(lir->temp1());
  16713 
  16714  IonGetIteratorIC ic(liveRegs, val, output, temp0, temp1);
  16715  addIC(lir, allocateIC(ic));
  16716 }
  16717 
  16718 void CodeGenerator::visitOptimizeSpreadCallCache(
  16719    LOptimizeSpreadCallCache* lir) {
  16720  LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
  16721  ValueOperand val = ToValue(lir->value());
  16722  ValueOperand output = ToOutValue(lir);
  16723  Register temp = ToRegister(lir->temp0());
  16724 
  16725  IonOptimizeSpreadCallIC ic(liveRegs, val, output, temp);
  16726  addIC(lir, allocateIC(ic));
  16727 }
  16728 
  16729 void CodeGenerator::visitCloseIterCache(LCloseIterCache* lir) {
  16730  LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
  16731  Register iter = ToRegister(lir->iter());
  16732  Register temp = ToRegister(lir->temp0());
  16733  CompletionKind kind = CompletionKind(lir->mir()->completionKind());
  16734 
  16735  IonCloseIterIC ic(liveRegs, iter, temp, kind);
  16736  addIC(lir, allocateIC(ic));
  16737 }
  16738 
  16739 void CodeGenerator::visitOptimizeGetIteratorCache(
  16740    LOptimizeGetIteratorCache* lir) {
  16741  LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
  16742  ValueOperand val = ToValue(lir->value());
  16743  Register output = ToRegister(lir->output());
  16744  Register temp = ToRegister(lir->temp0());
  16745 
  16746  IonOptimizeGetIteratorIC ic(liveRegs, val, output, temp);
  16747  addIC(lir, allocateIC(ic));
  16748 }
  16749 
  16750 void CodeGenerator::visitIteratorMore(LIteratorMore* lir) {
  16751  const Register obj = ToRegister(lir->iterator());
  16752  const ValueOperand output = ToOutValue(lir);
  16753  const Register temp = ToRegister(lir->temp0());
  16754 
  16755  masm.iteratorMore(obj, output, temp);
  16756 }
  16757 
  16758 void CodeGenerator::visitIteratorLength(LIteratorLength* lir) {
  16759  Register obj = ToRegister(lir->iter());
  16760  Register output = ToRegister(lir->output());
  16761  masm.iteratorLength(obj, output);
  16762 }
  16763 
  16764 void CodeGenerator::visitLoadIteratorElement(LLoadIteratorElement* lir) {
  16765  Register obj = ToRegister(lir->iter());
  16766  Register output = ToRegister(lir->output());
  16767  if (lir->index()->isConstant()) {
  16768    int32_t index = ToInt32(lir->index());
  16769    masm.iteratorLoadElement(obj, index, output);
  16770  } else {
  16771    Register index = ToRegister(lir->index());
  16772    masm.iteratorLoadElement(obj, index, output);
  16773  }
  16774 }
  16775 
  16776 void CodeGenerator::visitIsNoIterAndBranch(LIsNoIterAndBranch* lir) {
  16777  ValueOperand input = ToValue(lir->input());
  16778  Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
  16779  Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
  16780 
  16781  masm.branchTestMagic(Assembler::Equal, input, ifTrue);
  16782 
  16783  if (!isNextBlock(lir->ifFalse()->lir())) {
  16784    masm.jump(ifFalse);
  16785  }
  16786 }
  16787 
  16788 void CodeGenerator::visitIteratorEnd(LIteratorEnd* lir) {
  16789  const Register obj = ToRegister(lir->iterator());
  16790  const Register temp0 = ToRegister(lir->temp0());
  16791  const Register temp1 = ToRegister(lir->temp1());
  16792  const Register temp2 = ToRegister(lir->temp2());
  16793 
  16794  masm.iteratorClose(obj, temp0, temp1, temp2);
  16795 }
  16796 
  16797 void CodeGenerator::visitArgumentsLength(LArgumentsLength* lir) {
  16798  // read number of actual arguments from the JS frame.
  16799  Register argc = ToRegister(lir->output());
  16800  masm.loadNumActualArgs(FramePointer, argc);
  16801 }
  16802 
  16803 void CodeGenerator::visitGetFrameArgument(LGetFrameArgument* lir) {
  16804  ValueOperand result = ToOutValue(lir);
  16805  const LAllocation* index = lir->index();
  16806  size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
  16807 
  16808  // This instruction is used to access actual arguments and formal arguments.
  16809  // The number of Values on the stack is |max(numFormals, numActuals)|, so we
  16810  // assert |index < numFormals || index < numActuals| in debug builds.
  16811  DebugOnly<size_t> numFormals = gen->outerInfo().script()->function()->nargs();
  16812 
  16813  if (index->isConstant()) {
  16814    int32_t i = index->toConstant()->toInt32();
  16815 #ifdef DEBUG
  16816    if (uint32_t(i) >= numFormals) {
  16817      Label ok;
  16818      Register argc = result.scratchReg();
  16819      masm.loadNumActualArgs(FramePointer, argc);
  16820      masm.branch32(Assembler::Above, argc, Imm32(i), &ok);
  16821      masm.assumeUnreachable("Invalid argument index");
  16822      masm.bind(&ok);
  16823    }
  16824 #endif
  16825    Address argPtr(FramePointer, sizeof(Value) * i + argvOffset);
  16826    masm.loadValue(argPtr, result);
  16827  } else {
  16828    Register i = ToRegister(index);
  16829 #ifdef DEBUG
  16830    Label ok;
  16831    Register argc = result.scratchReg();
  16832    masm.branch32(Assembler::Below, i, Imm32(numFormals), &ok);
  16833    masm.loadNumActualArgs(FramePointer, argc);
  16834    masm.branch32(Assembler::Above, argc, i, &ok);
  16835    masm.assumeUnreachable("Invalid argument index");
  16836    masm.bind(&ok);
  16837 #endif
  16838    BaseValueIndex argPtr(FramePointer, i, argvOffset);
  16839    masm.loadValue(argPtr, result);
  16840  }
  16841 }
  16842 
  16843 void CodeGenerator::visitGetFrameArgumentHole(LGetFrameArgumentHole* lir) {
  16844  ValueOperand result = ToOutValue(lir);
  16845  Register index = ToRegister(lir->index());
  16846  Register length = ToRegister(lir->length());
  16847  Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
  16848  size_t argvOffset = JitFrameLayout::offsetOfActualArgs();
  16849 
  16850  Label outOfBounds, done;
  16851  masm.spectreBoundsCheck32(index, length, spectreTemp, &outOfBounds);
  16852 
  16853  BaseValueIndex argPtr(FramePointer, index, argvOffset);
  16854  masm.loadValue(argPtr, result);
  16855  masm.jump(&done);
  16856 
  16857  masm.bind(&outOfBounds);
  16858  bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
  16859  masm.moveValue(UndefinedValue(), result);
  16860 
  16861  masm.bind(&done);
  16862 }
  16863 
  16864 void CodeGenerator::visitRest(LRest* lir) {
  16865  Register numActuals = ToRegister(lir->numActuals());
  16866  Register temp0 = ToRegister(lir->temp0());
  16867  Register temp1 = ToRegister(lir->temp1());
  16868  Register temp2 = ToRegister(lir->temp2());
  16869  Register temp3 = ToRegister(lir->temp3());
  16870  unsigned numFormals = lir->mir()->numFormals();
  16871 
  16872  // In baseline, DoRestFallback calls into NewArray to allocate the rest array.
  16873  // If the length is 0, NewArray guesses a good capacity for it. We don't want
  16874  // a smaller capacity in Ion, because that can lead to bailout loops.
  16875  constexpr uint32_t arrayCapacity = 6;
  16876  static_assert(GuessArrayGCKind(0) == GuessArrayGCKind(arrayCapacity));
  16877 
  16878  if (Shape* shape = lir->mir()->shape()) {
  16879    uint32_t arrayLength = 0;
  16880    gc::AllocKind allocKind = GuessArrayGCKind(arrayCapacity);
  16881    MOZ_ASSERT(gc::GetObjectFinalizeKind(&ArrayObject::class_) ==
  16882               gc::FinalizeKind::None);
  16883    MOZ_ASSERT(!IsFinalizedKind(allocKind));
  16884    MOZ_ASSERT(GetGCKindSlots(allocKind) ==
  16885               arrayCapacity + ObjectElements::VALUES_PER_HEADER);
  16886 
  16887    Label joinAlloc, failAlloc;
  16888    masm.movePtr(ImmGCPtr(shape), temp0);
  16889    masm.createArrayWithFixedElements(temp2, temp0, temp1, InvalidReg,
  16890                                      arrayLength, arrayCapacity, 0, 0,
  16891                                      allocKind, gc::Heap::Default, &failAlloc);
  16892    masm.jump(&joinAlloc);
  16893    {
  16894      masm.bind(&failAlloc);
  16895      masm.movePtr(ImmPtr(nullptr), temp2);
  16896    }
  16897    masm.bind(&joinAlloc);
  16898  } else {
  16899    masm.movePtr(ImmPtr(nullptr), temp2);
  16900  }
  16901 
  16902  // Set temp1 to the address of the first actual argument.
  16903  size_t actualsOffset = JitFrameLayout::offsetOfActualArgs();
  16904  masm.computeEffectiveAddress(Address(FramePointer, actualsOffset), temp1);
  16905 
  16906  // Compute array length: max(numActuals - numFormals, 0).
  16907  Register lengthReg;
  16908  if (numFormals) {
  16909    lengthReg = temp0;
  16910    Label emptyLength, joinLength;
  16911    masm.branch32(Assembler::LessThanOrEqual, numActuals, Imm32(numFormals),
  16912                  &emptyLength);
  16913    {
  16914      masm.move32(numActuals, lengthReg);
  16915      masm.sub32(Imm32(numFormals), lengthReg);
  16916 
  16917      // Skip formal arguments.
  16918      masm.addPtr(Imm32(sizeof(Value) * numFormals), temp1);
  16919 
  16920      masm.jump(&joinLength);
  16921    }
  16922    masm.bind(&emptyLength);
  16923    {
  16924      masm.move32(Imm32(0), lengthReg);
  16925 
  16926      // Leave temp1 pointed to the start of actuals() when the rest-array
  16927      // length is zero. We don't use |actuals() + numFormals| because
  16928      // |numFormals| can be any non-negative int32 value when this MRest was
  16929      // created from scalar replacement optimizations. And it seems
  16930      // questionable to compute a Value* pointer which points to who knows
  16931      // where.
  16932    }
  16933    masm.bind(&joinLength);
  16934  } else {
  16935    // Use numActuals directly when there are no formals.
  16936    lengthReg = numActuals;
  16937  }
  16938 
  16939  // Try to initialize the array elements.
  16940  Label vmCall, done;
  16941  if (lir->mir()->shape()) {
  16942    // Call into C++ if we failed to allocate an array or there are more than
  16943    // |arrayCapacity| elements.
  16944    masm.branchTestPtr(Assembler::Zero, temp2, temp2, &vmCall);
  16945    masm.branch32(Assembler::Above, lengthReg, Imm32(arrayCapacity), &vmCall);
  16946 
  16947    // The array must be nursery allocated so no post barrier is needed.
  16948 #ifdef DEBUG
  16949    Label ok;
  16950    masm.branchPtrInNurseryChunk(Assembler::Equal, temp2, temp3, &ok);
  16951    masm.assumeUnreachable("Unexpected tenured object for LRest");
  16952    masm.bind(&ok);
  16953 #endif
  16954 
  16955    Label nonZeroLength;
  16956    masm.branch32(Assembler::NotEqual, lengthReg, Imm32(0), &nonZeroLength);
  16957    masm.movePtr(temp2, ReturnReg);
  16958    masm.jump(&done);
  16959    masm.bind(&nonZeroLength);
  16960 
  16961    // Store length and initializedLength.
  16962    Register elements = temp3;
  16963    masm.loadPtr(Address(temp2, NativeObject::offsetOfElements()), elements);
  16964    Address lengthAddr(elements, ObjectElements::offsetOfLength());
  16965    Address initLengthAddr(elements,
  16966                           ObjectElements::offsetOfInitializedLength());
  16967    masm.store32(lengthReg, lengthAddr);
  16968    masm.store32(lengthReg, initLengthAddr);
  16969 
  16970    masm.push(temp2);  // Spill result to free up register.
  16971 
  16972    Register end = temp0;
  16973    Register args = temp1;
  16974    Register scratch = temp2;
  16975    masm.computeEffectiveAddress(BaseObjectElementIndex(elements, lengthReg),
  16976                                 end);
  16977 
  16978    Label loop;
  16979    masm.bind(&loop);
  16980    masm.storeValue(Address(args, 0), Address(elements, 0), scratch);
  16981    masm.addPtr(Imm32(sizeof(Value)), args);
  16982    masm.addPtr(Imm32(sizeof(Value)), elements);
  16983    masm.branchPtr(Assembler::Below, elements, end, &loop);
  16984 
  16985    // Pop result
  16986    masm.pop(ReturnReg);
  16987    masm.jump(&done);
  16988  }
  16989 
  16990  masm.bind(&vmCall);
  16991 
  16992  pushArg(temp2);
  16993  pushArg(temp1);
  16994  pushArg(lengthReg);
  16995 
  16996  using Fn =
  16997      ArrayObject* (*)(JSContext*, uint32_t, Value*, Handle<ArrayObject*>);
  16998  callVM<Fn, InitRestParameter>(lir);
  16999 
  17000  masm.bind(&done);
  17001 }
  17002 
  17003 // Create a stackmap from the given safepoint, with the structure:
  17004 //
  17005 //   <reg dump, if any>
  17006 //   |       ++ <body (general spill)>
  17007 //   |       |       ++ <space for Frame>
  17008 //   |       |               ++ <inbound args>
  17009 //   |       |                               |
  17010 //   Lowest Addr                             Highest Addr
  17011 //           |
  17012 //           framePushedAtStackMapBase
  17013 //
  17014 // The caller owns the resulting stackmap.  This assumes a grow-down stack.
  17015 //
  17016 // For non-debug builds, if the stackmap would contain no pointers, no
  17017 // stackmap is created, and nullptr is returned.  For a debug build, a
  17018 // stackmap is always created and returned.
  17019 //
  17020 // Depending on the type of safepoint, the stackmap may need to account for
  17021 // spilled registers. WasmSafepointKind::LirCall corresponds to LIR nodes where
  17022 // isCall() == true, for which the register allocator will spill/restore all
  17023 // live registers at the LIR level - in this case, the LSafepoint sees only live
  17024 // values on the stack, never in registers. WasmSafepointKind::CodegenCall, on
  17025 // the other hand, is for LIR nodes which may manually spill/restore live
  17026 // registers in codegen, in which case the stackmap must account for this. Traps
  17027 // also require tracking of live registers, but spilling is handled by the trap
  17028 // mechanism.
  17029 static bool CreateStackMapFromLSafepoint(LSafepoint& safepoint,
  17030                                         const RegisterOffsets& trapExitLayout,
  17031                                         size_t trapExitLayoutNumWords,
  17032                                         size_t nInboundStackArgBytes,
  17033                                         wasm::StackMaps& stackMaps,
  17034                                         wasm::StackMap** result) {
  17035  // Ensure this is defined on all return paths.
  17036  *result = nullptr;
  17037 
  17038  // The size of the wasm::Frame itself.
  17039  const size_t nFrameBytes = sizeof(wasm::Frame);
  17040 
  17041  // This is the number of bytes spilled for live registers, outside of a trap.
  17042  // For traps, trapExitLayout and trapExitLayoutNumWords will be used.
  17043  const size_t nRegisterDumpBytes =
  17044      MacroAssembler::PushRegsInMaskSizeInBytes(safepoint.liveRegs());
  17045 
  17046  // As mentioned above, for WasmSafepointKind::LirCall, register spills and
  17047  // restores are handled at the LIR level and there should therefore be no live
  17048  // registers to handle here.
  17049  MOZ_ASSERT_IF(safepoint.wasmSafepointKind() == WasmSafepointKind::LirCall,
  17050                nRegisterDumpBytes == 0);
  17051  MOZ_ASSERT(nRegisterDumpBytes % sizeof(void*) == 0);
  17052 
  17053  // This is the number of bytes in the general spill area, below the Frame.
  17054  const size_t nBodyBytes = safepoint.framePushedAtStackMapBase();
  17055 
  17056  // The stack map owns any alignment padding around inbound stack args.
  17057  const size_t nInboundStackArgBytesAligned =
  17058      wasm::AlignStackArgAreaSize(nInboundStackArgBytes);
  17059 
  17060  // This is the number of bytes in the general spill area, the Frame, and the
  17061  // incoming args, but not including any register dump area.
  17062  const size_t nNonRegisterBytes =
  17063      nBodyBytes + nFrameBytes + nInboundStackArgBytesAligned;
  17064  MOZ_ASSERT(nNonRegisterBytes % sizeof(void*) == 0);
  17065 
  17066  // This is the number of bytes in the register dump area, if any, below the
  17067  // general spill area.
  17068  const size_t nRegisterBytes =
  17069      (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap)
  17070          ? (trapExitLayoutNumWords * sizeof(void*))
  17071          : nRegisterDumpBytes;
  17072 
  17073  // This is the total number of bytes covered by the map.
  17074  const size_t nTotalBytes = nNonRegisterBytes + nRegisterBytes;
  17075 
  17076  // This stackmap/safepoint is for a wasm frame, so there should be no
  17077  // slotsOrElements-style roots.
  17078  MOZ_RELEASE_ASSERT(safepoint.slotsOrElementsSlots().empty());
  17079  MOZ_RELEASE_ASSERT(safepoint.slotsOrElementsRegs().empty());
  17080 
  17081 #ifndef DEBUG
  17082  bool needStackMap = !safepoint.wasmAnyRefRegs().empty() ||
  17083                      !safepoint.wasmAnyRefSlots().empty() ||
  17084                      !safepoint.wasmStructDataRegs().empty() ||
  17085                      !safepoint.wasmStructDataSlots().empty() ||
  17086                      !safepoint.wasmArrayDataRegs().empty() ||
  17087                      !safepoint.wasmArrayDataSlots().empty();
  17088  // There are no references, and this is a non-debug build, so don't bother
  17089  // building the stackmap.
  17090  if (!needStackMap) {
  17091    return true;
  17092  }
  17093 #endif
  17094 
  17095  wasm::StackMap* stackMap = stackMaps.create(nTotalBytes / sizeof(void*));
  17096  if (!stackMap) {
  17097    return false;
  17098  }
  17099  if (safepoint.wasmSafepointKind() == WasmSafepointKind::Trap) {
  17100    stackMap->setExitStubWords(trapExitLayoutNumWords);
  17101  }
  17102 
  17103  // REG DUMP AREA, if any.
  17104  size_t regDumpWords = 0;
  17105  const LiveGeneralRegisterSet wasmAnyRefRegs = safepoint.wasmAnyRefRegs();
  17106  const LiveGeneralRegisterSet wasmStructDataRegs =
  17107      safepoint.wasmStructDataRegs();
  17108  const LiveGeneralRegisterSet wasmArrayDataRegs =
  17109      safepoint.wasmArrayDataRegs();
  17110 
  17111  // These three sets should be disjoint.
  17112  MOZ_ASSERT(GeneralRegisterSet::Intersect(wasmAnyRefRegs.set(),
  17113                                           wasmStructDataRegs.set())
  17114                 .empty());
  17115  MOZ_ASSERT(GeneralRegisterSet::Intersect(wasmStructDataRegs.set(),
  17116                                           wasmArrayDataRegs.set())
  17117                 .empty());
  17118  MOZ_ASSERT(GeneralRegisterSet::Intersect(wasmArrayDataRegs.set(),
  17119                                           wasmAnyRefRegs.set())
  17120                 .empty());
  17121  const LiveGeneralRegisterSet refRegs(GeneralRegisterSet::Union(
  17122      wasmAnyRefRegs.set(),
  17123      GeneralRegisterSet::Union(wasmStructDataRegs.set(),
  17124                                wasmArrayDataRegs.set())));
  17125 
  17126  GeneralRegisterForwardIterator refRegsIter(refRegs);
  17127  switch (safepoint.wasmSafepointKind()) {
  17128    case WasmSafepointKind::LirCall:
  17129    case WasmSafepointKind::StackSwitch:
  17130    case WasmSafepointKind::CodegenCall: {
  17131      size_t spilledNumWords = nRegisterDumpBytes / sizeof(void*);
  17132      regDumpWords += spilledNumWords;
  17133 
  17134      for (; refRegsIter.more(); ++refRegsIter) {
  17135        Register reg = *refRegsIter;
  17136        size_t offsetFromSpillBase =
  17137            safepoint.liveRegs().gprs().offsetOfPushedRegister(reg) /
  17138            sizeof(void*);
  17139        MOZ_ASSERT(0 < offsetFromSpillBase &&
  17140                   offsetFromSpillBase <= spilledNumWords);
  17141        size_t index = spilledNumWords - offsetFromSpillBase;
  17142 
  17143        if (wasmAnyRefRegs.has(reg)) {
  17144          stackMap->set(index, wasm::StackMap::AnyRef);
  17145        } else if (wasmStructDataRegs.has(reg)) {
  17146          stackMap->set(index, wasm::StackMap::StructDataPointer);
  17147        } else {
  17148          MOZ_ASSERT(wasmArrayDataRegs.has(reg));
  17149          stackMap->set(index, wasm::StackMap::ArrayDataPointer);
  17150        }
  17151      }
  17152      // Float and vector registers do not have to be handled; they cannot
  17153      // contain wasm anyrefs, and they are spilled after general-purpose
  17154      // registers. Gprs are therefore closest to the spill base and thus their
  17155      // offset calculation does not need to account for other spills.
  17156    } break;
  17157    case WasmSafepointKind::Trap: {
  17158      regDumpWords += trapExitLayoutNumWords;
  17159 
  17160      for (; refRegsIter.more(); ++refRegsIter) {
  17161        Register reg = *refRegsIter;
  17162        size_t offsetFromTop = trapExitLayout.getOffset(reg);
  17163 
  17164        // If this doesn't hold, the associated register wasn't saved by
  17165        // the trap exit stub.  Better to crash now than much later, in
  17166        // some obscure place, and possibly with security consequences.
  17167        MOZ_RELEASE_ASSERT(offsetFromTop < trapExitLayoutNumWords);
  17168 
  17169        // offsetFromTop is an offset in words down from the highest
  17170        // address in the exit stub save area.  Switch it around to be an
  17171        // offset up from the bottom of the (integer register) save area.
  17172        size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop;
  17173 
  17174        if (wasmAnyRefRegs.has(reg)) {
  17175          stackMap->set(offsetFromBottom, wasm::StackMap::AnyRef);
  17176        } else if (wasmStructDataRegs.has(reg)) {
  17177          stackMap->set(offsetFromBottom, wasm::StackMap::StructDataPointer);
  17178        } else {
  17179          MOZ_ASSERT(wasmArrayDataRegs.has(reg));
  17180          stackMap->set(offsetFromBottom, wasm::StackMap::ArrayDataPointer);
  17181        }
  17182      }
  17183    } break;
  17184    default:
  17185      MOZ_CRASH("unreachable");
  17186  }
  17187 
  17188  // Ensure other reg/slot collections on LSafepoint are empty.
  17189  MOZ_ASSERT(safepoint.gcRegs().empty() && safepoint.gcSlots().empty());
  17190 #ifdef JS_NUNBOX32
  17191  MOZ_ASSERT(safepoint.nunboxParts().empty());
  17192 #elif JS_PUNBOX64
  17193  MOZ_ASSERT(safepoint.valueRegs().empty() && safepoint.valueSlots().empty());
  17194 #endif
  17195 
  17196  // BODY (GENERAL SPILL) AREA and FRAME and INCOMING ARGS
  17197  // Deal with roots on the stack.
  17198  const LSafepoint::SlotList& wasmAnyRefSlots = safepoint.wasmAnyRefSlots();
  17199  for (SafepointSlotEntry wasmAnyRefSlot : wasmAnyRefSlots) {
  17200    // The following needs to correspond with JitFrameLayout::slotRef
  17201    // wasmAnyRefSlot.stack == 0 means the slot is in the args area
  17202    if (wasmAnyRefSlot.stack) {
  17203      // It's a slot in the body allocation, so .slot is interpreted
  17204      // as an index downwards from the Frame*
  17205      MOZ_ASSERT(wasmAnyRefSlot.slot <= nBodyBytes);
  17206      uint32_t offsetInBytes = nBodyBytes - wasmAnyRefSlot.slot;
  17207      MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
  17208      stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
  17209                    wasm::StackMap::AnyRef);
  17210    } else {
  17211      // It's an argument slot
  17212      MOZ_ASSERT(wasmAnyRefSlot.slot < nInboundStackArgBytes);
  17213      uint32_t offsetInBytes = nBodyBytes + nFrameBytes + wasmAnyRefSlot.slot;
  17214      MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
  17215      stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
  17216                    wasm::StackMap::AnyRef);
  17217    }
  17218  }
  17219 
  17220  // Track struct data pointers on the stack
  17221  for (SafepointSlotEntry slot : safepoint.wasmStructDataSlots()) {
  17222    MOZ_ASSERT(slot.stack);
  17223    // It's a slot in the body allocation, so .slot is interpreted
  17224    // as an index downwards from the Frame*
  17225    MOZ_ASSERT(slot.slot <= nBodyBytes);
  17226    uint32_t offsetInBytes = nBodyBytes - slot.slot;
  17227    MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
  17228    stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
  17229                  wasm::StackMap::Kind::StructDataPointer);
  17230  }
  17231 
  17232  // Track array data pointers on the stack
  17233  for (SafepointSlotEntry slot : safepoint.wasmArrayDataSlots()) {
  17234    MOZ_ASSERT(slot.stack);
  17235    // It's a slot in the body allocation, so .slot is interpreted
  17236    // as an index downwards from the Frame*
  17237    MOZ_ASSERT(slot.slot <= nBodyBytes);
  17238    uint32_t offsetInBytes = nBodyBytes - slot.slot;
  17239    MOZ_ASSERT(offsetInBytes % sizeof(void*) == 0);
  17240    stackMap->set(regDumpWords + offsetInBytes / sizeof(void*),
  17241                  wasm::StackMap::Kind::ArrayDataPointer);
  17242  }
  17243 
  17244  // Record in the map, how far down from the highest address the Frame* is.
  17245  // Take the opportunity to check that we haven't marked any part of the
  17246  // Frame itself as a pointer.
  17247  stackMap->setFrameOffsetFromTop((nInboundStackArgBytesAligned + nFrameBytes) /
  17248                                  sizeof(void*));
  17249 #ifdef DEBUG
  17250  for (uint32_t i = 0; i < nFrameBytes / sizeof(void*); i++) {
  17251    MOZ_ASSERT(stackMap->get(stackMap->header.numMappedWords -
  17252                             stackMap->header.frameOffsetFromTop + i) ==
  17253               wasm::StackMap::Kind::POD);
  17254  }
  17255 #endif
  17256 
  17257  *result = stackMap;
  17258  return true;
  17259 }
  17260 
  17261 bool CodeGenerator::generateWasm(wasm::CallIndirectId callIndirectId,
  17262                                 const wasm::TrapSiteDesc& entryTrapSiteDesc,
  17263                                 const wasm::ArgTypeVector& argTypes,
  17264                                 const RegisterOffsets& trapExitLayout,
  17265                                 size_t trapExitLayoutNumWords,
  17266                                 wasm::FuncOffsets* offsets,
  17267                                 wasm::StackMaps* stackMaps,
  17268                                 wasm::Decoder* decoder) {
  17269  AutoCreatedBy acb(masm, "CodeGenerator::generateWasm");
  17270 
  17271  JitSpew(JitSpew_Codegen, "# Emitting wasm code");
  17272 
  17273  size_t nInboundStackArgBytes =
  17274      StackArgAreaSizeUnaligned(argTypes, ABIKind::Wasm);
  17275  inboundStackArgBytes_ = nInboundStackArgBytes;
  17276 
  17277  perfSpewer().markStartOffset(masm.currentOffset());
  17278  perfSpewer().recordOffset(masm, "Prologue");
  17279  wasm::GenerateFunctionPrologue(masm, callIndirectId, mozilla::Nothing(),
  17280                                 offsets);
  17281 
  17282 #ifdef DEBUG
  17283  // If we are doing full debug checks, always load the instance pointer into
  17284  // the usual spot in the frame so that it can be loaded later regardless of
  17285  // what is in InstanceReg. See CodeGenerator::emitDebugResultChecks.
  17286  if (JitOptions.fullDebugChecks) {
  17287    masm.storePtr(InstanceReg,
  17288                  Address(FramePointer,
  17289                          wasm::FrameWithInstances::calleeInstanceOffset()));
  17290  }
  17291 #endif
  17292 
  17293  MOZ_ASSERT(masm.framePushed() == 0);
  17294 
  17295  // Very large frames are implausible, probably an attack.
  17296  if (frameSize() > wasm::MaxFrameSize) {
  17297    return decoder->fail(decoder->beginOffset(), "stack frame is too large");
  17298  }
  17299 
  17300  if (omitOverRecursedStackCheck()) {
  17301    masm.reserveStack(frameSize());
  17302 
  17303    // If we don't need to check the stack for recursion, we definitely don't
  17304    // need to check for interrupts.
  17305    MOZ_ASSERT(omitOverRecursedInterruptCheck());
  17306  } else {
  17307    auto* ool = new (alloc())
  17308        LambdaOutOfLineCode([this, entryTrapSiteDesc](OutOfLineCode& ool) {
  17309          masm.wasmTrap(wasm::Trap::StackOverflow, entryTrapSiteDesc);
  17310          return true;
  17311        });
  17312    addOutOfLineCode(ool, (const BytecodeSite*)nullptr);
  17313    masm.wasmReserveStackChecked(frameSize(), ool->entry());
  17314 
  17315    if (!omitOverRecursedInterruptCheck()) {
  17316      wasm::StackMap* functionEntryStackMap = nullptr;
  17317      if (!CreateStackMapForFunctionEntryTrap(
  17318              argTypes, trapExitLayout, trapExitLayoutNumWords, frameSize(),
  17319              nInboundStackArgBytes, *stackMaps, &functionEntryStackMap)) {
  17320        return false;
  17321      }
  17322 
  17323      // In debug builds, we'll always have a stack map, even if there are no
  17324      // refs to track.
  17325      MOZ_ASSERT(functionEntryStackMap);
  17326 
  17327      auto* ool = new (alloc()) LambdaOutOfLineCode(
  17328          [this, stackMaps, functionEntryStackMap](OutOfLineCode& ool) {
  17329            masm.wasmTrap(wasm::Trap::CheckInterrupt, wasm::TrapSiteDesc());
  17330            CodeOffset trapInsnOffset = CodeOffset(masm.currentOffset());
  17331 
  17332            if (functionEntryStackMap &&
  17333                !stackMaps->add(trapInsnOffset.offset(),
  17334                                functionEntryStackMap)) {
  17335              return false;
  17336            }
  17337            masm.jump(ool.rejoin());
  17338            return true;
  17339          });
  17340 
  17341      addOutOfLineCode(ool, (const BytecodeSite*)nullptr);
  17342      masm.branch32(Assembler::NotEqual,
  17343                    Address(InstanceReg, wasm::Instance::offsetOfInterrupt()),
  17344                    Imm32(0), ool->entry());
  17345      masm.bind(ool->rejoin());
  17346    }
  17347  }
  17348 
  17349  MOZ_ASSERT(masm.framePushed() == frameSize());
  17350 
  17351  if (!generateBody()) {
  17352    return false;
  17353  }
  17354 
  17355  perfSpewer().recordOffset(masm, "Epilogue");
  17356  masm.bind(&returnLabel_);
  17357  wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets);
  17358 
  17359  perfSpewer().recordOffset(masm, "OOLBlocks");
  17360  // This must come before we generate OOL code, as OOL blocks may
  17361  // generate OOL code.
  17362  if (!generateOutOfLineBlocks()) {
  17363    return false;
  17364  }
  17365 
  17366  perfSpewer().recordOffset(masm, "OOLCode");
  17367  if (!generateOutOfLineCode()) {
  17368    return false;
  17369  }
  17370 
  17371  masm.flush();
  17372  if (masm.oom()) {
  17373    return false;
  17374  }
  17375 
  17376  offsets->end = masm.currentOffset();
  17377 
  17378  MOZ_ASSERT(!masm.failureLabel()->used());
  17379  MOZ_ASSERT(snapshots_.listSize() == 0);
  17380  MOZ_ASSERT(snapshots_.RVATableSize() == 0);
  17381  MOZ_ASSERT(recovers_.size() == 0);
  17382  MOZ_ASSERT(graph.numConstants() == 0);
  17383  MOZ_ASSERT(osiIndices_.empty());
  17384  MOZ_ASSERT(icList_.empty());
  17385  MOZ_ASSERT(safepoints_.size() == 0);
  17386  MOZ_ASSERT(!scriptCounts_);
  17387 
  17388  // Convert the safepoints to stackmaps and add them to our running
  17389  // collection thereof.
  17390  for (CodegenSafepointIndex& index : safepointIndices_) {
  17391    wasm::StackMap* stackMap = nullptr;
  17392    if (!CreateStackMapFromLSafepoint(
  17393            *index.safepoint(), trapExitLayout, trapExitLayoutNumWords,
  17394            nInboundStackArgBytes, *stackMaps, &stackMap)) {
  17395      return false;
  17396    }
  17397 
  17398    // In debug builds, we'll always have a stack map.
  17399    MOZ_ASSERT(stackMap);
  17400    if (!stackMap) {
  17401      continue;
  17402    }
  17403 
  17404    if (!stackMaps->finalize(index.displacement(), stackMap)) {
  17405      return false;
  17406    }
  17407  }
  17408 
  17409  return true;
  17410 }
  17411 
  17412 bool CodeGenerator::generate(const WarpSnapshot* snapshot) {
  17413  AutoCreatedBy acb(masm, "CodeGenerator::generate");
  17414 
  17415  MOZ_ASSERT(snapshot);
  17416  snapshot_ = snapshot;
  17417 
  17418  JitSpew(JitSpew_Codegen, "# Emitting code for script %s:%u:%u",
  17419          gen->outerInfo().script()->filename(),
  17420          gen->outerInfo().script()->lineno(),
  17421          gen->outerInfo().script()->column().oneOriginValue());
  17422 
  17423  // Initialize native code table with an entry to the start of
  17424  // top-level script.
  17425  InlineScriptTree* tree = gen->outerInfo().inlineScriptTree();
  17426  jsbytecode* startPC = tree->script()->code();
  17427  BytecodeSite* startSite = new (gen->alloc()) BytecodeSite(tree, startPC);
  17428  if (!addNativeToBytecodeEntry(startSite)) {
  17429    return false;
  17430  }
  17431 
  17432  if (!safepoints_.init(gen->alloc())) {
  17433    return false;
  17434  }
  17435 
  17436  size_t maxSafepointIndices =
  17437      graph.numSafepoints() + graph.extraSafepointUses();
  17438  if (!safepointIndices_.reserve(maxSafepointIndices)) {
  17439    return false;
  17440  }
  17441  if (!osiIndices_.reserve(graph.numSafepoints())) {
  17442    return false;
  17443  }
  17444 
  17445  perfSpewer().recordOffset(masm, "Prologue");
  17446  if (!generatePrologue()) {
  17447    return false;
  17448  }
  17449 
  17450  // Reset native => bytecode map table with top-level script and startPc.
  17451  if (!addNativeToBytecodeEntry(startSite)) {
  17452    return false;
  17453  }
  17454 
  17455  if (!generateBody()) {
  17456    return false;
  17457  }
  17458 
  17459  // Reset native => bytecode map table with top-level script and startPc.
  17460  if (!addNativeToBytecodeEntry(startSite)) {
  17461    return false;
  17462  }
  17463 
  17464  perfSpewer().recordOffset(masm, "Epilogue");
  17465  if (!generateEpilogue()) {
  17466    return false;
  17467  }
  17468 
  17469  // Reset native => bytecode map table with top-level script and startPc.
  17470  if (!addNativeToBytecodeEntry(startSite)) {
  17471    return false;
  17472  }
  17473 
  17474  perfSpewer().recordOffset(masm, "InvalidateEpilogue");
  17475  generateInvalidateEpilogue();
  17476 
  17477  perfSpewer().recordOffset(masm, "OOLBlocks");
  17478  // This must come before we generate OOL code, as OOL blocks may
  17479  // generate OOL code.
  17480  if (!generateOutOfLineBlocks()) {
  17481    return false;
  17482  }
  17483 
  17484  // native => bytecode entries for OOL code will be added
  17485  // by CodeGeneratorShared::generateOutOfLineCode
  17486  perfSpewer().recordOffset(masm, "OOLCode");
  17487  if (!generateOutOfLineCode()) {
  17488    return false;
  17489  }
  17490 
  17491  // Add terminal entry.
  17492  if (!addNativeToBytecodeEntry(startSite)) {
  17493    return false;
  17494  }
  17495 
  17496  // Dump Native to bytecode entries to spew.
  17497  dumpNativeToBytecodeEntries();
  17498 
  17499  // We encode safepoints after the OSI-point offsets have been determined.
  17500  if (!encodeSafepoints()) {
  17501    return false;
  17502  }
  17503 
  17504  // If this assertion trips, then you have multiple things to do:
  17505  //
  17506  // This assertion will report if a safepoint is used multiple times for the
  17507  // same instruction. To fix this assertion make sure to call
  17508  // `lirGraph_.addExtraSafepointUses(..);` in the Lowering phase.
  17509  //
  17510  // However, this non-worrying issue might hide a more dramatic security issue,
  17511  // which is that having multiple encoding of a safepoint in a single LIR
  17512  // instruction is not safe, unless:
  17513  //
  17514  //   - The multiple uses of the safepoints are in different code path. i-e
  17515  //     there should be not single execution trace making use of multiple
  17516  //     calls within a single instruction.
  17517  //
  17518  //   - There is enough space to encode data in-place of the call instruction.
  17519  //     Such that a patched-call site does not corrupt the code path on another
  17520  //     execution trace.
  17521  //
  17522  // This issue is caused by the way invalidation works, to keep the code alive
  17523  // when invalidated code is only referenced by the stack. This works by
  17524  // storing data in-place of the calling code, which thus becomes unsafe to
  17525  // execute.
  17526  MOZ_ASSERT(safepointIndices_.length() <= maxSafepointIndices);
  17527 
  17528  // For each instruction with a safepoint, we have an OSI point inserted after
  17529  // which handles bailouts in case of invalidation of the code.
  17530  MOZ_ASSERT(osiIndices_.length() == graph.numSafepoints());
  17531 
  17532  return !masm.oom();
  17533 }
  17534 
  17535 static bool AddInlinedCompilations(JSContext* cx, HandleScript script,
  17536                                   IonCompilationId compilationId,
  17537                                   const WarpSnapshot* snapshot,
  17538                                   bool* isValid) {
  17539  MOZ_ASSERT(!*isValid);
  17540  IonScriptKey ionScriptKey(script, compilationId);
  17541 
  17542  JitZone* jitZone = cx->zone()->jitZone();
  17543 
  17544  for (const auto* scriptSnapshot : snapshot->scripts()) {
  17545    JSScript* inlinedScript = scriptSnapshot->script();
  17546    if (inlinedScript == script) {
  17547      continue;
  17548    }
  17549 
  17550    // TODO(post-Warp): This matches FinishCompilation and is necessary to
  17551    // ensure in-progress compilations are canceled when an inlined functon
  17552    // becomes a debuggee. See the breakpoint-14.js jit-test.
  17553    // When TI is gone, try to clean this up by moving AddInlinedCompilations to
  17554    // WarpOracle so that we can handle this as part of addPendingRecompile
  17555    // instead of requiring this separate check.
  17556    if (inlinedScript->isDebuggee()) {
  17557      *isValid = false;
  17558      return true;
  17559    }
  17560 
  17561    if (!jitZone->addInlinedCompilation(ionScriptKey, inlinedScript)) {
  17562      return false;
  17563    }
  17564  }
  17565 
  17566  *isValid = true;
  17567  return true;
  17568 }
  17569 
  17570 template <auto FuseMember, CompilationDependency::Type DepType>
  17571 struct RuntimeFuseDependency final : public CompilationDependency {
  17572  explicit RuntimeFuseDependency() : CompilationDependency(DepType) {}
  17573 
  17574  bool registerDependency(JSContext* cx,
  17575                          const IonScriptKey& ionScript) override {
  17576    MOZ_ASSERT(checkDependency(cx));
  17577    return (cx->runtime()->runtimeFuses.ref().*FuseMember)
  17578        .addFuseDependency(cx, ionScript);
  17579  }
  17580 
  17581  CompilationDependency* clone(TempAllocator& alloc) const override {
  17582    return new (alloc.fallible()) RuntimeFuseDependency<FuseMember, DepType>();
  17583  }
  17584 
  17585  bool checkDependency(JSContext* cx) const override {
  17586    return (cx->runtime()->runtimeFuses.ref().*FuseMember).intact();
  17587  }
  17588 
  17589  HashNumber hash() const override { return mozilla::HashGeneric(type); }
  17590 
  17591  bool operator==(const CompilationDependency& dep) const override {
  17592    // Since this dependency is runtime wide, they are all equal.
  17593    return dep.type == type;
  17594  }
  17595 };
  17596 
  17597 bool CodeGenerator::addHasSeenObjectEmulateUndefinedFuseDependency() {
  17598  using Dependency =
  17599      RuntimeFuseDependency<&RuntimeFuses::hasSeenObjectEmulateUndefinedFuse,
  17600                            CompilationDependency::Type::EmulatesUndefined>;
  17601  return mirGen().tracker.addDependency(alloc(), Dependency());
  17602 }
  17603 
  17604 bool CodeGenerator::addHasSeenArrayExceedsInt32LengthFuseDependency() {
  17605  using Dependency = RuntimeFuseDependency<
  17606      &RuntimeFuses::hasSeenArrayExceedsInt32LengthFuse,
  17607      CompilationDependency::Type::ArrayExceedsInt32Length>;
  17608  return mirGen().tracker.addDependency(alloc(), Dependency());
  17609 }
  17610 
  17611 bool CodeGenerator::link(JSContext* cx) {
  17612  AutoCreatedBy acb(masm, "CodeGenerator::link");
  17613 
  17614  // We cancel off-thread Ion compilations in a few places during GC, but if
  17615  // this compilation was performed off-thread it will already have been
  17616  // removed from the relevant lists by this point. Don't allow GC here.
  17617  JS::AutoAssertNoGC nogc(cx);
  17618 
  17619  RootedScript script(cx, gen->outerInfo().script());
  17620  MOZ_ASSERT(!script->hasIonScript());
  17621 
  17622  if (scriptCounts_ && !script->hasScriptCounts() &&
  17623      !script->initScriptCounts(cx)) {
  17624    return false;
  17625  }
  17626 
  17627  // Add all used nursery-values to the Value constant pool that's copied to the
  17628  // IonScript.
  17629  for (NurseryValueLabel& label : nurseryValueLabels_) {
  17630    Value v = snapshot_->nurseryValues()[label.nurseryIndex];
  17631    MOZ_ASSERT(v.isGCThing());
  17632    if (!graph.addConstantToPool(v, &label.constantPoolIndex)) {
  17633      return false;
  17634    }
  17635  }
  17636 
  17637  JitZone* jitZone = cx->zone()->jitZone();
  17638 
  17639  IonCompilationId compilationId =
  17640      cx->runtime()->jitRuntime()->nextCompilationId();
  17641  jitZone->currentCompilationIdRef().emplace(compilationId);
  17642  auto resetCurrentId = mozilla::MakeScopeExit(
  17643      [jitZone] { jitZone->currentCompilationIdRef().reset(); });
  17644 
  17645  // Record constraints. If an error occured, returns false and potentially
  17646  // prevent future compilations. Otherwise, if an invalidation occured, then
  17647  // skip the current compilation.
  17648  bool isValid = false;
  17649 
  17650  // If an inlined script is invalidated (for example, by attaching
  17651  // a debugger), we must also invalidate the parent IonScript.
  17652  if (!AddInlinedCompilations(cx, script, compilationId, snapshot_, &isValid)) {
  17653    return false;
  17654  }
  17655 
  17656  // This compilation is no longer valid; don't proceed, but return true as this
  17657  // isn't an error case either.
  17658  if (!isValid) {
  17659    return true;
  17660  }
  17661 
  17662  CompilationDependencyTracker& tracker = mirGen().tracker;
  17663  // Make sure we're using the same realm as this context.
  17664  MOZ_ASSERT(mirGen().realm->realmPtr() == cx->realm());
  17665  if (!tracker.checkDependencies(cx)) {
  17666    return true;
  17667  }
  17668 
  17669  IonScriptKey ionScriptKey(script, compilationId);
  17670  for (auto r(tracker.dependencies.all()); !r.empty(); r.popFront()) {
  17671    CompilationDependency* dep = r.front();
  17672    if (!dep->registerDependency(cx, ionScriptKey)) {
  17673      return false;
  17674    }
  17675  }
  17676 
  17677  uint32_t argumentSlots = (gen->outerInfo().nargs() + 1) * sizeof(Value);
  17678 
  17679  size_t numNurseryObjects = snapshot_->nurseryObjects().length();
  17680 
  17681  IonScript* ionScript = IonScript::New(
  17682      cx, compilationId, graph.localSlotsSize(), argumentSlots, frameDepth_,
  17683      snapshots_.listSize(), snapshots_.RVATableSize(), recovers_.size(),
  17684      graph.numConstants(), numNurseryObjects, safepointIndices_.length(),
  17685      osiIndices_.length(), icList_.length(), runtimeData_.length(),
  17686      safepoints_.size());
  17687  if (!ionScript) {
  17688    return false;
  17689  }
  17690 #ifdef DEBUG
  17691  ionScript->setICHash(snapshot_->icHash());
  17692 #endif
  17693 
  17694  auto freeIonScript = mozilla::MakeScopeExit([&ionScript] {
  17695    // Use js_free instead of IonScript::Destroy: the cache list is still
  17696    // uninitialized.
  17697    js_free(ionScript);
  17698  });
  17699 
  17700  Linker linker(masm);
  17701  JitCode* code = linker.newCode(cx, CodeKind::Ion);
  17702  if (!code) {
  17703    return false;
  17704  }
  17705 
  17706  // Encode native to bytecode map if profiling is enabled.
  17707  if (isProfilerInstrumentationEnabled()) {
  17708    // Generate native-to-bytecode main table.
  17709    IonEntry::ScriptList scriptList;
  17710    if (!generateCompactNativeToBytecodeMap(cx, code, scriptList)) {
  17711      return false;
  17712    }
  17713 
  17714    // Find the realmId. We do not do cross-realm inlining, so it should be the
  17715    // same for every inlined script.
  17716    uint64_t realmId = script->realm()->creationOptions().profilerRealmID();
  17717 #ifdef DEBUG
  17718    for (const auto* scriptSnapshot : snapshot_->scripts()) {
  17719      JSScript* inlinedScript = scriptSnapshot->script();
  17720      MOZ_ASSERT(inlinedScript->realm()->creationOptions().profilerRealmID() ==
  17721                 realmId);
  17722    }
  17723 #endif
  17724 
  17725    uint8_t* ionTableAddr =
  17726        ((uint8_t*)nativeToBytecodeMap_.get()) + nativeToBytecodeTableOffset_;
  17727    JitcodeIonTable* ionTable = (JitcodeIonTable*)ionTableAddr;
  17728 
  17729    // Construct the IonEntry that will go into the global table.
  17730    auto entry = MakeJitcodeGlobalEntry<IonEntry>(
  17731        cx, code, code->raw(), code->rawEnd(), std::move(scriptList), ionTable,
  17732        realmId);
  17733    if (!entry) {
  17734      return false;
  17735    }
  17736    (void)nativeToBytecodeMap_.release();  // Table is now owned by |entry|.
  17737 
  17738    // Add entry to the global table.
  17739    JitcodeGlobalTable* globalTable =
  17740        cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
  17741    if (!globalTable->addEntry(std::move(entry))) {
  17742      return false;
  17743    }
  17744 
  17745    // Mark the jitcode as having a bytecode map.
  17746    code->setHasBytecodeMap();
  17747  } else {
  17748    // Add a dumy jitcodeGlobalTable entry.
  17749    auto entry = MakeJitcodeGlobalEntry<DummyEntry>(cx, code, code->raw(),
  17750                                                    code->rawEnd());
  17751    if (!entry) {
  17752      return false;
  17753    }
  17754 
  17755    // Add entry to the global table.
  17756    JitcodeGlobalTable* globalTable =
  17757        cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
  17758    if (!globalTable->addEntry(std::move(entry))) {
  17759      return false;
  17760    }
  17761 
  17762    // Mark the jitcode as having a bytecode map.
  17763    code->setHasBytecodeMap();
  17764  }
  17765 
  17766  ionScript->setMethod(code);
  17767 
  17768  // If the Gecko Profiler is enabled, mark IonScript as having been
  17769  // instrumented accordingly.
  17770  if (isProfilerInstrumentationEnabled()) {
  17771    ionScript->setHasProfilingInstrumentation();
  17772  }
  17773 
  17774  Assembler::PatchDataWithValueCheck(
  17775      CodeLocationLabel(code, invalidateEpilogueData_), ImmPtr(ionScript),
  17776      ImmPtr((void*)-1));
  17777 
  17778  for (CodeOffset offset : ionScriptLabels_) {
  17779    Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, offset),
  17780                                       ImmPtr(ionScript), ImmPtr((void*)-1));
  17781  }
  17782 
  17783  for (NurseryObjectLabel label : nurseryObjectLabels_) {
  17784    void* entry = ionScript->addressOfNurseryObject(label.nurseryIndex);
  17785    Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label.offset),
  17786                                       ImmPtr(entry), ImmPtr((void*)-1));
  17787  }
  17788  for (NurseryValueLabel label : nurseryValueLabels_) {
  17789    void* entry = &ionScript->getConstant(label.constantPoolIndex);
  17790    Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, label.offset),
  17791                                       ImmPtr(entry), ImmPtr((void*)-1));
  17792  }
  17793 
  17794  // for generating inline caches during the execution.
  17795  if (runtimeData_.length()) {
  17796    ionScript->copyRuntimeData(&runtimeData_[0]);
  17797  }
  17798  if (icList_.length()) {
  17799    ionScript->copyICEntries(&icList_[0]);
  17800  }
  17801 
  17802  for (size_t i = 0; i < icInfo_.length(); i++) {
  17803    IonIC& ic = ionScript->getICFromIndex(i);
  17804    Assembler::PatchDataWithValueCheck(
  17805        CodeLocationLabel(code, icInfo_[i].icOffsetForJump),
  17806        ImmPtr(ic.codeRawPtr()), ImmPtr((void*)-1));
  17807    Assembler::PatchDataWithValueCheck(
  17808        CodeLocationLabel(code, icInfo_[i].icOffsetForPush), ImmPtr(&ic),
  17809        ImmPtr((void*)-1));
  17810  }
  17811 
  17812  JitSpew(JitSpew_Codegen, "Created IonScript %p (raw %p)", (void*)ionScript,
  17813          (void*)code->raw());
  17814 
  17815  ionScript->setInvalidationEpilogueDataOffset(
  17816      invalidateEpilogueData_.offset());
  17817  if (jsbytecode* osrPc = gen->outerInfo().osrPc()) {
  17818    ionScript->setOsrPc(osrPc);
  17819    ionScript->setOsrEntryOffset(getOsrEntryOffset());
  17820  }
  17821  ionScript->setInvalidationEpilogueOffset(invalidate_.offset());
  17822 
  17823  perfSpewer().saveJSProfile(cx, script, code);
  17824 
  17825 #ifdef MOZ_VTUNE
  17826  vtune::MarkScript(code, script, "ion");
  17827 #endif
  17828 
  17829  // Set a Ion counter hint for this script.
  17830  if (cx->runtime()->jitRuntime()->hasJitHintsMap()) {
  17831    JitHintsMap* jitHints = cx->runtime()->jitRuntime()->getJitHintsMap();
  17832    jitHints->recordIonCompilation(script);
  17833  }
  17834 
  17835  // for marking during GC.
  17836  if (safepointIndices_.length()) {
  17837    ionScript->copySafepointIndices(&safepointIndices_[0]);
  17838  }
  17839  if (safepoints_.size()) {
  17840    ionScript->copySafepoints(&safepoints_);
  17841  }
  17842 
  17843  // for recovering from an Ion Frame.
  17844  if (osiIndices_.length()) {
  17845    ionScript->copyOsiIndices(&osiIndices_[0]);
  17846  }
  17847  if (snapshots_.listSize()) {
  17848    ionScript->copySnapshots(&snapshots_);
  17849  }
  17850  MOZ_ASSERT_IF(snapshots_.listSize(), recovers_.size());
  17851  if (recovers_.size()) {
  17852    ionScript->copyRecovers(&recovers_);
  17853  }
  17854  if (graph.numConstants()) {
  17855    const Value* vp = graph.constantPool();
  17856    ionScript->copyConstants(vp);
  17857  }
  17858 
  17859  // Attach any generated script counts to the script.
  17860  if (IonScriptCounts* counts = extractScriptCounts()) {
  17861    script->addIonCounts(counts);
  17862  }
  17863  // WARNING: Code after this point must be infallible!
  17864 
  17865  // Copy the list of nursery objects. Note that the store buffer can add
  17866  // HeapPtr edges that must be cleared in IonScript::Destroy. See the
  17867  // infallibility warning above.
  17868  const auto& nurseryObjects = snapshot_->nurseryObjects();
  17869  for (size_t i = 0; i < nurseryObjects.length(); i++) {
  17870    ionScript->nurseryObjects()[i].init(nurseryObjects[i]);
  17871  }
  17872 
  17873  // Transfer ownership of the IonScript to the JitScript. At this point enough
  17874  // of the IonScript must be initialized for IonScript::Destroy to work.
  17875  freeIonScript.release();
  17876  script->jitScript()->setIonScript(script, ionScript);
  17877 
  17878  return true;
  17879 }
  17880 
  17881 void CodeGenerator::visitUnboxFloatingPoint(LUnboxFloatingPoint* lir) {
  17882  ValueOperand box = ToValue(lir->input());
  17883  const LDefinition* result = lir->output();
  17884 
  17885  // Out-of-line path to convert int32 to double or bailout
  17886  // if this instruction is fallible.
  17887  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  17888    ValueOperand value = ToValue(lir->input());
  17889 
  17890    if (lir->mir()->fallible()) {
  17891      Label bail;
  17892      masm.branchTestInt32(Assembler::NotEqual, value, &bail);
  17893      bailoutFrom(&bail, lir->snapshot());
  17894    }
  17895    masm.convertInt32ToDouble(value.payloadOrValueReg(),
  17896                              ToFloatRegister(lir->output()));
  17897    masm.jump(ool.rejoin());
  17898  });
  17899  addOutOfLineCode(ool, lir->mir());
  17900 
  17901  FloatRegister resultReg = ToFloatRegister(result);
  17902  masm.branchTestDouble(Assembler::NotEqual, box, ool->entry());
  17903  masm.unboxDouble(box, resultReg);
  17904  masm.bind(ool->rejoin());
  17905 }
  17906 
  17907 void CodeGenerator::visitCallBindVar(LCallBindVar* lir) {
  17908  pushArg(ToRegister(lir->environmentChain()));
  17909 
  17910  using Fn = JSObject* (*)(JSContext*, JSObject*);
  17911  callVM<Fn, BindVarOperation>(lir);
  17912 }
  17913 
  17914 void CodeGenerator::visitMegamorphicSetElement(LMegamorphicSetElement* lir) {
  17915  Register obj = ToRegister(lir->object());
  17916  ValueOperand idVal = ToValue(lir->index());
  17917  ValueOperand value = ToValue(lir->value());
  17918 
  17919  Register temp0 = ToRegister(lir->temp0());
  17920  // See comment in LIROps.yaml (x86 is short on registers)
  17921 #ifndef JS_CODEGEN_X86
  17922  Register temp1 = ToRegister(lir->temp1());
  17923  Register temp2 = ToRegister(lir->temp2());
  17924 #endif
  17925 
  17926  // The instruction is marked as call-instruction so only these registers are
  17927  // live.
  17928  LiveRegisterSet liveRegs;
  17929  liveRegs.addUnchecked(obj);
  17930  liveRegs.addUnchecked(idVal);
  17931  liveRegs.addUnchecked(value);
  17932  liveRegs.addUnchecked(temp0);
  17933 #ifndef JS_CODEGEN_X86
  17934  liveRegs.addUnchecked(temp1);
  17935  liveRegs.addUnchecked(temp2);
  17936 #endif
  17937 
  17938  Label cacheHit, done;
  17939 #ifdef JS_CODEGEN_X86
  17940  masm.emitMegamorphicCachedSetSlot(
  17941      idVal, obj, temp0, value, liveRegs, &cacheHit,
  17942      [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
  17943        EmitPreBarrier(masm, addr, mirType);
  17944      });
  17945 #else
  17946  masm.emitMegamorphicCachedSetSlot(
  17947      idVal, obj, temp0, temp1, temp2, value, liveRegs, &cacheHit,
  17948      [](MacroAssembler& masm, const Address& addr, MIRType mirType) {
  17949        EmitPreBarrier(masm, addr, mirType);
  17950      });
  17951 #endif
  17952 
  17953  pushArg(Imm32(lir->mir()->strict()));
  17954  pushArg(ToValue(lir->value()));
  17955  pushArg(ToValue(lir->index()));
  17956  pushArg(obj);
  17957 
  17958  using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
  17959  callVM<Fn, js::jit::SetElementMegamorphic<true>>(lir);
  17960 
  17961  masm.jump(&done);
  17962  masm.bind(&cacheHit);
  17963 
  17964  masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
  17965  masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
  17966 
  17967  // Note: because this is a call-instruction, no registers need to be saved.
  17968  MOZ_ASSERT(lir->isCall());
  17969  emitPostWriteBarrier(obj);
  17970 
  17971  masm.bind(&done);
  17972 }
  17973 
  17974 void CodeGenerator::visitLoadScriptedProxyHandler(
  17975    LLoadScriptedProxyHandler* ins) {
  17976  Register obj = ToRegister(ins->object());
  17977  Register output = ToRegister(ins->output());
  17978 
  17979  masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), output);
  17980 
  17981  Label bail;
  17982  Address handlerAddr(output, js::detail::ProxyReservedSlots::offsetOfSlot(
  17983                                  ScriptedProxyHandler::HANDLER_EXTRA));
  17984  masm.fallibleUnboxObject(handlerAddr, output, &bail);
  17985  bailoutFrom(&bail, ins->snapshot());
  17986 }
  17987 
  17988 #ifdef JS_PUNBOX64
  17989 void CodeGenerator::visitCheckScriptedProxyGetResult(
  17990    LCheckScriptedProxyGetResult* ins) {
  17991  ValueOperand target = ToValue(ins->target());
  17992  ValueOperand value = ToValue(ins->value());
  17993  ValueOperand id = ToValue(ins->id());
  17994  Register scratch = ToRegister(ins->temp0());
  17995  Register scratch2 = ToRegister(ins->temp1());
  17996 
  17997  using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue,
  17998                      MutableHandleValue);
  17999  OutOfLineCode* ool = oolCallVM<Fn, CheckProxyGetByValueResult>(
  18000      ins, ArgList(scratch, id, value), StoreValueTo(value));
  18001 
  18002  masm.unboxObject(target, scratch);
  18003  masm.branchTestObjectNeedsProxyResultValidation(Assembler::NonZero, scratch,
  18004                                                  scratch2, ool->entry());
  18005  masm.bind(ool->rejoin());
  18006 }
  18007 #endif
  18008 
  18009 void CodeGenerator::visitIdToStringOrSymbol(LIdToStringOrSymbol* ins) {
  18010  ValueOperand id = ToValue(ins->idVal());
  18011  ValueOperand output = ToOutValue(ins);
  18012  Register scratch = ToRegister(ins->temp0());
  18013 
  18014  masm.moveValue(id, output);
  18015 
  18016  Label done, callVM;
  18017  Label bail;
  18018  {
  18019    ScratchTagScope tag(masm, output);
  18020    masm.splitTagForTest(output, tag);
  18021    masm.branchTestString(Assembler::Equal, tag, &done);
  18022    masm.branchTestSymbol(Assembler::Equal, tag, &done);
  18023    masm.branchTestInt32(Assembler::NotEqual, tag, &bail);
  18024  }
  18025 
  18026  masm.unboxInt32(output, scratch);
  18027 
  18028  using Fn = JSLinearString* (*)(JSContext*, int);
  18029  OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
  18030      ins, ArgList(scratch), StoreRegisterTo(output.scratchReg()));
  18031 
  18032  masm.lookupStaticIntString(scratch, output.scratchReg(),
  18033                             gen->runtime->staticStrings(), ool->entry());
  18034 
  18035  masm.bind(ool->rejoin());
  18036  masm.tagValue(JSVAL_TYPE_STRING, output.scratchReg(), output);
  18037  masm.bind(&done);
  18038 
  18039  bailoutFrom(&bail, ins->snapshot());
  18040 }
  18041 
  18042 void CodeGenerator::visitLoadFixedSlotV(LLoadFixedSlotV* ins) {
  18043  const Register obj = ToRegister(ins->object());
  18044  size_t slot = ins->mir()->slot();
  18045  ValueOperand result = ToOutValue(ins);
  18046 
  18047  masm.loadValue(Address(obj, NativeObject::getFixedSlotOffset(slot)), result);
  18048 }
  18049 
  18050 void CodeGenerator::visitLoadFixedSlotT(LLoadFixedSlotT* ins) {
  18051  const Register obj = ToRegister(ins->object());
  18052  size_t slot = ins->mir()->slot();
  18053  AnyRegister result = ToAnyRegister(ins->output());
  18054  MIRType type = ins->mir()->type();
  18055 
  18056  masm.loadUnboxedValue(Address(obj, NativeObject::getFixedSlotOffset(slot)),
  18057                        type, result);
  18058 }
  18059 
  18060 void CodeGenerator::visitLoadFixedSlotFromOffset(
  18061    LLoadFixedSlotFromOffset* lir) {
  18062  Register obj = ToRegister(lir->object());
  18063  Register offset = ToRegister(lir->offset());
  18064  ValueOperand out = ToOutValue(lir);
  18065 
  18066  // obj[offset]
  18067  masm.loadValue(BaseIndex(obj, offset, TimesOne), out);
  18068 }
  18069 
  18070 void CodeGenerator::visitStoreFixedSlotFromOffsetV(
  18071    LStoreFixedSlotFromOffsetV* lir) {
  18072  Register obj = ToRegister(lir->object());
  18073  Register offset = ToRegister(lir->offset());
  18074  ValueOperand value = ToValue(lir->value());
  18075  Register temp = ToRegister(lir->temp0());
  18076 
  18077  BaseIndex baseIndex(obj, offset, TimesOne);
  18078  masm.computeEffectiveAddress(baseIndex, temp);
  18079 
  18080  Address slot(temp, 0);
  18081  if (lir->mir()->needsBarrier()) {
  18082    emitPreBarrier(slot);
  18083  }
  18084 
  18085  // obj[offset]
  18086  masm.storeValue(value, slot);
  18087 }
  18088 
  18089 void CodeGenerator::visitStoreFixedSlotFromOffsetT(
  18090    LStoreFixedSlotFromOffsetT* lir) {
  18091  Register obj = ToRegister(lir->object());
  18092  Register offset = ToRegister(lir->offset());
  18093  const LAllocation* value = lir->value();
  18094  MIRType valueType = lir->mir()->value()->type();
  18095  Register temp = ToRegister(lir->temp0());
  18096 
  18097  BaseIndex baseIndex(obj, offset, TimesOne);
  18098  masm.computeEffectiveAddress(baseIndex, temp);
  18099 
  18100  Address slot(temp, 0);
  18101  if (lir->mir()->needsBarrier()) {
  18102    emitPreBarrier(slot);
  18103  }
  18104 
  18105  // obj[offset]
  18106  ConstantOrRegister nvalue =
  18107      value->isConstant()
  18108          ? ConstantOrRegister(value->toConstant()->toJSValue())
  18109          : TypedOrValueRegister(valueType, ToAnyRegister(value));
  18110  masm.storeConstantOrRegister(nvalue, slot);
  18111 }
  18112 
  18113 template <typename T>
  18114 static void EmitLoadAndUnbox(MacroAssembler& masm, const T& src, MIRType type,
  18115                             bool fallible, AnyRegister dest, Register64 temp,
  18116                             Label* fail) {
  18117  MOZ_ASSERT_IF(type == MIRType::Double, temp != Register64::Invalid());
  18118  if (type == MIRType::Double) {
  18119    MOZ_ASSERT(dest.isFloat());
  18120 #if defined(JS_NUNBOX32)
  18121    auto tempVal = ValueOperand(temp.high, temp.low);
  18122 #else
  18123    auto tempVal = ValueOperand(temp.reg);
  18124 #endif
  18125    masm.loadValue(src, tempVal);
  18126    masm.ensureDouble(tempVal, dest.fpu(), fail);
  18127    return;
  18128  }
  18129  if (fallible) {
  18130    switch (type) {
  18131      case MIRType::Int32:
  18132        masm.fallibleUnboxInt32(src, dest.gpr(), fail);
  18133        break;
  18134      case MIRType::Boolean:
  18135        masm.fallibleUnboxBoolean(src, dest.gpr(), fail);
  18136        break;
  18137      case MIRType::Object:
  18138        masm.fallibleUnboxObject(src, dest.gpr(), fail);
  18139        break;
  18140      case MIRType::String:
  18141        masm.fallibleUnboxString(src, dest.gpr(), fail);
  18142        break;
  18143      case MIRType::Symbol:
  18144        masm.fallibleUnboxSymbol(src, dest.gpr(), fail);
  18145        break;
  18146      case MIRType::BigInt:
  18147        masm.fallibleUnboxBigInt(src, dest.gpr(), fail);
  18148        break;
  18149      default:
  18150        MOZ_CRASH("Unexpected MIRType");
  18151    }
  18152    return;
  18153  }
  18154  masm.loadUnboxedValue(src, type, dest);
  18155 }
  18156 
  18157 void CodeGenerator::visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* ins) {
  18158  const MLoadFixedSlotAndUnbox* mir = ins->mir();
  18159  MIRType type = mir->type();
  18160  Register input = ToRegister(ins->object());
  18161  AnyRegister result = ToAnyRegister(ins->output());
  18162  Register64 maybeTemp = ToTempRegister64OrInvalid(ins->temp0());
  18163  size_t slot = mir->slot();
  18164 
  18165  Address address(input, NativeObject::getFixedSlotOffset(slot));
  18166 
  18167  Label bail;
  18168  EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, maybeTemp,
  18169                   &bail);
  18170  if (mir->fallible()) {
  18171    bailoutFrom(&bail, ins->snapshot());
  18172  }
  18173 }
  18174 
  18175 void CodeGenerator::visitLoadDynamicSlotAndUnbox(
  18176    LLoadDynamicSlotAndUnbox* ins) {
  18177  const MLoadDynamicSlotAndUnbox* mir = ins->mir();
  18178  MIRType type = mir->type();
  18179  Register input = ToRegister(ins->slots());
  18180  AnyRegister result = ToAnyRegister(ins->output());
  18181  Register64 maybeTemp = ToTempRegister64OrInvalid(ins->temp0());
  18182  size_t slot = mir->slot();
  18183 
  18184  Address address(input, slot * sizeof(JS::Value));
  18185 
  18186  Label bail;
  18187  EmitLoadAndUnbox(masm, address, type, mir->fallible(), result, maybeTemp,
  18188                   &bail);
  18189  if (mir->fallible()) {
  18190    bailoutFrom(&bail, ins->snapshot());
  18191  }
  18192 }
  18193 
  18194 void CodeGenerator::visitLoadElementAndUnbox(LLoadElementAndUnbox* ins) {
  18195  const MLoadElementAndUnbox* mir = ins->mir();
  18196  MIRType type = mir->type();
  18197  Register elements = ToRegister(ins->elements());
  18198  AnyRegister result = ToAnyRegister(ins->output());
  18199  Register64 maybeTemp = ToTempRegister64OrInvalid(ins->temp0());
  18200 
  18201  auto source = ToAddressOrBaseObjectElementIndex(elements, ins->index());
  18202 
  18203  Label bail;
  18204  source.match([&](const auto& source) {
  18205    EmitLoadAndUnbox(masm, source, type, mir->fallible(), result, maybeTemp,
  18206                     &bail);
  18207  });
  18208 
  18209  if (mir->fallible()) {
  18210    bailoutFrom(&bail, ins->snapshot());
  18211  }
  18212 }
  18213 
  18214 void CodeGenerator::emitMaybeAtomizeSlot(LInstruction* ins, Register stringReg,
  18215                                         Address slotAddr,
  18216                                         TypedOrValueRegister dest) {
  18217  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  18218    // This code is called with a non-atomic string in |stringReg|.
  18219    // When it returns, |stringReg| contains an unboxed pointer to an
  18220    // atomized version of that string, and |slotAddr| contains a
  18221    // StringValue pointing to that atom. If |dest| is a ValueOperand,
  18222    // it contains the same StringValue; otherwise we assert that |dest|
  18223    // is |stringReg|.
  18224 
  18225    saveLive(ins);
  18226    pushArg(stringReg);
  18227 
  18228    using Fn = JSAtom* (*)(JSContext*, JSString*);
  18229    callVM<Fn, js::AtomizeString>(ins);
  18230    StoreRegisterTo(stringReg).generate(this);
  18231    restoreLiveIgnore(ins, StoreRegisterTo(stringReg).clobbered());
  18232 
  18233    if (dest.hasValue()) {
  18234      masm.moveValue(
  18235          TypedOrValueRegister(MIRType::String, AnyRegister(stringReg)),
  18236          dest.valueReg());
  18237    } else {
  18238      MOZ_ASSERT(dest.typedReg().gpr() == stringReg);
  18239    }
  18240 
  18241    emitPreBarrier(slotAddr);
  18242    masm.storeTypedOrValue(dest, slotAddr);
  18243 
  18244    // We don't need a post-barrier because atoms aren't nursery-allocated.
  18245 #ifdef DEBUG
  18246    // We need a temp register for the nursery check. Spill something.
  18247    AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
  18248    allRegs.take(stringReg);
  18249    Register temp = allRegs.takeAny();
  18250    masm.push(temp);
  18251 
  18252    Label tenured;
  18253    masm.branchPtrInNurseryChunk(Assembler::NotEqual, stringReg, temp,
  18254                                 &tenured);
  18255    masm.assumeUnreachable("AtomizeString returned a nursery pointer");
  18256    masm.bind(&tenured);
  18257 
  18258    masm.pop(temp);
  18259 #endif
  18260 
  18261    masm.jump(ool.rejoin());
  18262  });
  18263  addOutOfLineCode(ool, ins->mirRaw()->toInstruction());
  18264  masm.branchTest32(Assembler::NonZero,
  18265                    Address(stringReg, JSString::offsetOfFlags()),
  18266                    Imm32(JSString::ATOM_BIT), ool->rejoin());
  18267 
  18268  masm.branchTest32(Assembler::Zero,
  18269                    Address(stringReg, JSString::offsetOfFlags()),
  18270                    Imm32(JSString::ATOM_REF_BIT), ool->entry());
  18271  masm.loadPtr(Address(stringReg, JSAtomRefString::offsetOfAtom()), stringReg);
  18272 
  18273  if (dest.hasValue()) {
  18274    masm.moveValue(
  18275        TypedOrValueRegister(MIRType::String, AnyRegister(stringReg)),
  18276        dest.valueReg());
  18277  } else {
  18278    MOZ_ASSERT(dest.typedReg().gpr() == stringReg);
  18279  }
  18280 
  18281  emitPreBarrier(slotAddr);
  18282  masm.storeTypedOrValue(dest, slotAddr);
  18283 
  18284  masm.bind(ool->rejoin());
  18285 }
  18286 
  18287 void CodeGenerator::visitLoadFixedSlotAndAtomize(
  18288    LLoadFixedSlotAndAtomize* ins) {
  18289  Register obj = ToRegister(ins->object());
  18290  Register temp = ToRegister(ins->temp0());
  18291  size_t slot = ins->mir()->slot();
  18292  ValueOperand result = ToOutValue(ins);
  18293 
  18294  Address slotAddr(obj, NativeObject::getFixedSlotOffset(slot));
  18295  masm.loadValue(slotAddr, result);
  18296 
  18297  Label notString;
  18298  masm.branchTestString(Assembler::NotEqual, result, &notString);
  18299  masm.unboxString(result, temp);
  18300  emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
  18301  masm.bind(&notString);
  18302 }
  18303 
  18304 void CodeGenerator::visitLoadDynamicSlotAndAtomize(
  18305    LLoadDynamicSlotAndAtomize* ins) {
  18306  ValueOperand result = ToOutValue(ins);
  18307  Register temp = ToRegister(ins->temp0());
  18308  Register base = ToRegister(ins->input());
  18309  int32_t offset = ins->mir()->slot() * sizeof(js::Value);
  18310 
  18311  Address slotAddr(base, offset);
  18312  masm.loadValue(slotAddr, result);
  18313 
  18314  Label notString;
  18315  masm.branchTestString(Assembler::NotEqual, result, &notString);
  18316  masm.unboxString(result, temp);
  18317  emitMaybeAtomizeSlot(ins, temp, slotAddr, result);
  18318  masm.bind(&notString);
  18319 }
  18320 
  18321 void CodeGenerator::visitLoadFixedSlotUnboxAndAtomize(
  18322    LLoadFixedSlotUnboxAndAtomize* ins) {
  18323  const MLoadFixedSlotAndUnbox* mir = ins->mir();
  18324  MOZ_ASSERT(mir->type() == MIRType::String);
  18325  Register input = ToRegister(ins->object());
  18326  AnyRegister result = ToAnyRegister(ins->output());
  18327  size_t slot = mir->slot();
  18328 
  18329  Address slotAddr(input, NativeObject::getFixedSlotOffset(slot));
  18330 
  18331  Label bail;
  18332  EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
  18333                   Register64::Invalid(), &bail);
  18334  emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
  18335                       TypedOrValueRegister(MIRType::String, result));
  18336 
  18337  if (mir->fallible()) {
  18338    bailoutFrom(&bail, ins->snapshot());
  18339  }
  18340 }
  18341 
  18342 void CodeGenerator::visitLoadDynamicSlotUnboxAndAtomize(
  18343    LLoadDynamicSlotUnboxAndAtomize* ins) {
  18344  const MLoadDynamicSlotAndUnbox* mir = ins->mir();
  18345  MOZ_ASSERT(mir->type() == MIRType::String);
  18346  Register input = ToRegister(ins->slots());
  18347  AnyRegister result = ToAnyRegister(ins->output());
  18348  size_t slot = mir->slot();
  18349 
  18350  Address slotAddr(input, slot * sizeof(JS::Value));
  18351 
  18352  Label bail;
  18353  EmitLoadAndUnbox(masm, slotAddr, MIRType::String, mir->fallible(), result,
  18354                   Register64::Invalid(), &bail);
  18355  emitMaybeAtomizeSlot(ins, result.gpr(), slotAddr,
  18356                       TypedOrValueRegister(MIRType::String, result));
  18357 
  18358  if (mir->fallible()) {
  18359    bailoutFrom(&bail, ins->snapshot());
  18360  }
  18361 }
  18362 
  18363 void CodeGenerator::visitAddAndStoreSlot(LAddAndStoreSlot* ins) {
  18364  MOZ_ASSERT(!ins->mir()->preserveWrapper());
  18365 
  18366  Register obj = ToRegister(ins->object());
  18367  ValueOperand value = ToValue(ins->value());
  18368  Register maybeTemp = ToTempRegisterOrInvalid(ins->temp0());
  18369 
  18370  Shape* shape = ins->mir()->shape();
  18371  masm.storeObjShape(shape, obj, [](MacroAssembler& masm, const Address& addr) {
  18372    EmitPreBarrier(masm, addr, MIRType::Shape);
  18373  });
  18374 
  18375  // Perform the store. No pre-barrier required since this is a new
  18376  // initialization.
  18377 
  18378  uint32_t offset = ins->mir()->slotOffset();
  18379  if (ins->mir()->kind() == MAddAndStoreSlot::Kind::FixedSlot) {
  18380    Address slot(obj, offset);
  18381    masm.storeValue(value, slot);
  18382  } else {
  18383    masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), maybeTemp);
  18384    Address slot(maybeTemp, offset);
  18385    masm.storeValue(value, slot);
  18386  }
  18387 }
  18388 
  18389 void CodeGenerator::visitAddAndStoreSlotPreserveWrapper(
  18390    LAddAndStoreSlotPreserveWrapper* ins) {
  18391  MOZ_ASSERT(ins->mir()->preserveWrapper());
  18392 
  18393  Register obj = ToRegister(ins->object());
  18394  ValueOperand value = ToValue(ins->value());
  18395  Register temp0 = ToTempRegisterOrInvalid(ins->temp0());
  18396  Register temp1 = ToTempRegisterOrInvalid(ins->temp1());
  18397 
  18398  LiveRegisterSet liveRegs = liveVolatileRegs(ins);
  18399  liveRegs.takeUnchecked(temp0);
  18400  liveRegs.takeUnchecked(temp1);
  18401  masm.preserveWrapper(obj, temp0, temp1, liveRegs);
  18402  bailoutIfFalseBool(temp0, ins->snapshot());
  18403 
  18404  Shape* shape = ins->mir()->shape();
  18405  masm.storeObjShape(shape, obj, [](MacroAssembler& masm, const Address& addr) {
  18406    EmitPreBarrier(masm, addr, MIRType::Shape);
  18407  });
  18408 
  18409  // Perform the store. No pre-barrier required since this is a new
  18410  // initialization.
  18411 
  18412  uint32_t offset = ins->mir()->slotOffset();
  18413  if (ins->mir()->kind() == MAddAndStoreSlot::Kind::FixedSlot) {
  18414    Address slot(obj, offset);
  18415    masm.storeValue(value, slot);
  18416  } else {
  18417    masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), temp0);
  18418    Address slot(temp0, offset);
  18419    masm.storeValue(value, slot);
  18420  }
  18421 }
  18422 
  18423 void CodeGenerator::visitAllocateAndStoreSlot(LAllocateAndStoreSlot* ins) {
  18424  Register obj = ToRegister(ins->object());
  18425  ValueOperand value = ToValue(ins->value());
  18426  Register temp0 = ToRegister(ins->temp0());
  18427  Register temp1 = ToRegister(ins->temp1());
  18428 
  18429  if (ins->mir()->preserveWrapper()) {
  18430    LiveRegisterSet liveRegs;
  18431    liveRegs.addUnchecked(obj);
  18432    liveRegs.addUnchecked(value);
  18433    masm.preserveWrapper(obj, temp0, temp1, liveRegs);
  18434    bailoutIfFalseBool(temp0, ins->snapshot());
  18435  }
  18436 
  18437  masm.Push(obj);
  18438  masm.Push(value);
  18439 
  18440  using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
  18441  masm.setupAlignedABICall();
  18442  masm.loadJSContext(temp0);
  18443  masm.passABIArg(temp0);
  18444  masm.passABIArg(obj);
  18445  masm.move32(Imm32(ins->mir()->numNewSlots()), temp1);
  18446  masm.passABIArg(temp1);
  18447  masm.callWithABI<Fn, NativeObject::growSlotsPure>();
  18448  masm.storeCallPointerResult(temp0);
  18449 
  18450  masm.Pop(value);
  18451  masm.Pop(obj);
  18452 
  18453  bailoutIfFalseBool(temp0, ins->snapshot());
  18454 
  18455  masm.storeObjShape(ins->mir()->shape(), obj,
  18456                     [](MacroAssembler& masm, const Address& addr) {
  18457                       EmitPreBarrier(masm, addr, MIRType::Shape);
  18458                     });
  18459 
  18460  // Perform the store. No pre-barrier required since this is a new
  18461  // initialization.
  18462  masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), temp0);
  18463  Address slot(temp0, ins->mir()->slotOffset());
  18464  masm.storeValue(value, slot);
  18465 }
  18466 
  18467 void CodeGenerator::visitAddSlotAndCallAddPropHook(
  18468    LAddSlotAndCallAddPropHook* ins) {
  18469  Register obj = ToRegister(ins->object());
  18470  ValueOperand value = ToValue(ins->value());
  18471 
  18472  pushArg(ImmGCPtr(ins->mir()->shape()));
  18473  pushArg(value);
  18474  pushArg(obj);
  18475 
  18476  using Fn =
  18477      bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
  18478  callVM<Fn, AddSlotAndCallAddPropHook>(ins);
  18479 }
  18480 
  18481 void CodeGenerator::visitStoreFixedSlotV(LStoreFixedSlotV* ins) {
  18482  Register obj = ToRegister(ins->obj());
  18483  size_t slot = ins->mir()->slot();
  18484 
  18485  ValueOperand value = ToValue(ins->value());
  18486 
  18487  Address address(obj, NativeObject::getFixedSlotOffset(slot));
  18488  if (ins->mir()->needsBarrier()) {
  18489    emitPreBarrier(address);
  18490  }
  18491 
  18492  masm.storeValue(value, address);
  18493 }
  18494 
  18495 void CodeGenerator::visitStoreFixedSlotT(LStoreFixedSlotT* ins) {
  18496  const Register obj = ToRegister(ins->obj());
  18497  size_t slot = ins->mir()->slot();
  18498 
  18499  const LAllocation* value = ins->value();
  18500  MIRType valueType = ins->mir()->value()->type();
  18501 
  18502  Address address(obj, NativeObject::getFixedSlotOffset(slot));
  18503  if (ins->mir()->needsBarrier()) {
  18504    emitPreBarrier(address);
  18505  }
  18506 
  18507  ConstantOrRegister nvalue =
  18508      value->isConstant()
  18509          ? ConstantOrRegister(value->toConstant()->toJSValue())
  18510          : TypedOrValueRegister(valueType, ToAnyRegister(value));
  18511  masm.storeConstantOrRegister(nvalue, address);
  18512 }
  18513 
  18514 void CodeGenerator::visitGetNameCache(LGetNameCache* ins) {
  18515  LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
  18516  Register envChain = ToRegister(ins->envObj());
  18517  ValueOperand output = ToOutValue(ins);
  18518  Register temp = ToRegister(ins->temp0());
  18519 
  18520  IonGetNameIC ic(liveRegs, envChain, output, temp);
  18521  addIC(ins, allocateIC(ic));
  18522 }
  18523 
  18524 static bool IsConstantNonIndexString(const ConstantOrRegister& id) {
  18525  if (!id.constant() || !id.value().isString()) {
  18526    return false;
  18527  }
  18528  return !id.value().toString()->asOffThreadAtom().isIndex();
  18529 }
  18530 
  18531 void CodeGenerator::addGetPropertyCache(LInstruction* ins,
  18532                                        LiveRegisterSet liveRegs,
  18533                                        TypedOrValueRegister value,
  18534                                        const ConstantOrRegister& id,
  18535                                        ValueOperand output) {
  18536  CacheKind kind = CacheKind::GetElem;
  18537  if (IsConstantNonIndexString(id)) {
  18538    kind = CacheKind::GetProp;
  18539  }
  18540  IonGetPropertyIC cache(kind, liveRegs, value, id, output);
  18541  addIC(ins, allocateIC(cache));
  18542 }
  18543 
  18544 void CodeGenerator::addSetPropertyCache(LInstruction* ins,
  18545                                        LiveRegisterSet liveRegs,
  18546                                        Register objReg, Register temp,
  18547                                        const ConstantOrRegister& id,
  18548                                        const ConstantOrRegister& value,
  18549                                        bool strict) {
  18550  CacheKind kind = CacheKind::SetElem;
  18551  if (IsConstantNonIndexString(id)) {
  18552    kind = CacheKind::SetProp;
  18553  }
  18554  IonSetPropertyIC cache(kind, liveRegs, objReg, temp, id, value, strict);
  18555  addIC(ins, allocateIC(cache));
  18556 }
  18557 
  18558 ConstantOrRegister CodeGenerator::toConstantOrRegister(LInstruction* lir,
  18559                                                       size_t n, MIRType type) {
  18560  if (type == MIRType::Value) {
  18561    return TypedOrValueRegister(ToValue(lir->getBoxOperand(n)));
  18562  }
  18563 
  18564  const LAllocation* value = lir->getOperand(n);
  18565  if (value->isConstant()) {
  18566    return ConstantOrRegister(value->toConstant()->toJSValue());
  18567  }
  18568 
  18569  return TypedOrValueRegister(type, ToAnyRegister(value));
  18570 }
  18571 
  18572 void CodeGenerator::visitGetPropertyCache(LGetPropertyCache* ins) {
  18573  LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
  18574  TypedOrValueRegister value =
  18575      toConstantOrRegister(ins, LGetPropertyCache::ValueIndex,
  18576                           ins->mir()->value()->type())
  18577          .reg();
  18578  ConstantOrRegister id = toConstantOrRegister(ins, LGetPropertyCache::IdIndex,
  18579                                               ins->mir()->idval()->type());
  18580  ValueOperand output = ToOutValue(ins);
  18581  addGetPropertyCache(ins, liveRegs, value, id, output);
  18582 }
  18583 
  18584 void CodeGenerator::visitGetPropSuperCache(LGetPropSuperCache* ins) {
  18585  LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
  18586  Register obj = ToRegister(ins->obj());
  18587  TypedOrValueRegister receiver =
  18588      toConstantOrRegister(ins, LGetPropSuperCache::ReceiverIndex,
  18589                           ins->mir()->receiver()->type())
  18590          .reg();
  18591  ConstantOrRegister id = toConstantOrRegister(ins, LGetPropSuperCache::IdIndex,
  18592                                               ins->mir()->idval()->type());
  18593  ValueOperand output = ToOutValue(ins);
  18594 
  18595  CacheKind kind = CacheKind::GetElemSuper;
  18596  if (IsConstantNonIndexString(id)) {
  18597    kind = CacheKind::GetPropSuper;
  18598  }
  18599 
  18600  IonGetPropSuperIC cache(kind, liveRegs, obj, receiver, id, output);
  18601  addIC(ins, allocateIC(cache));
  18602 }
  18603 
  18604 void CodeGenerator::visitBindNameCache(LBindNameCache* ins) {
  18605  LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
  18606  Register envChain = ToRegister(ins->environmentChain());
  18607  Register output = ToRegister(ins->output());
  18608  Register temp = ToRegister(ins->temp0());
  18609 
  18610  IonBindNameIC ic(liveRegs, envChain, output, temp);
  18611  addIC(ins, allocateIC(ic));
  18612 }
  18613 
  18614 void CodeGenerator::visitHasOwnCache(LHasOwnCache* ins) {
  18615  LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
  18616  TypedOrValueRegister value =
  18617      toConstantOrRegister(ins, LHasOwnCache::ValueIndex,
  18618                           ins->mir()->value()->type())
  18619          .reg();
  18620  TypedOrValueRegister id = toConstantOrRegister(ins, LHasOwnCache::IdIndex,
  18621                                                 ins->mir()->idval()->type())
  18622                                .reg();
  18623  Register output = ToRegister(ins->output());
  18624 
  18625  IonHasOwnIC cache(liveRegs, value, id, output);
  18626  addIC(ins, allocateIC(cache));
  18627 }
  18628 
  18629 void CodeGenerator::visitCheckPrivateFieldCache(LCheckPrivateFieldCache* ins) {
  18630  LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
  18631  TypedOrValueRegister value =
  18632      toConstantOrRegister(ins, LCheckPrivateFieldCache::ValueIndex,
  18633                           ins->mir()->value()->type())
  18634          .reg();
  18635  TypedOrValueRegister id =
  18636      toConstantOrRegister(ins, LCheckPrivateFieldCache::IdIndex,
  18637                           ins->mir()->idval()->type())
  18638          .reg();
  18639  Register output = ToRegister(ins->output());
  18640 
  18641  IonCheckPrivateFieldIC cache(liveRegs, value, id, output);
  18642  addIC(ins, allocateIC(cache));
  18643 }
  18644 
  18645 void CodeGenerator::visitNewPrivateName(LNewPrivateName* ins) {
  18646  pushArg(ImmGCPtr(ins->mir()->name()));
  18647 
  18648  using Fn = JS::Symbol* (*)(JSContext*, Handle<JSAtom*>);
  18649  callVM<Fn, NewPrivateName>(ins);
  18650 }
  18651 
  18652 void CodeGenerator::visitDeleteProperty(LDeleteProperty* lir) {
  18653  pushArg(ImmGCPtr(lir->mir()->name()));
  18654  pushArg(ToValue(lir->value()));
  18655 
  18656  using Fn = bool (*)(JSContext*, HandleValue, Handle<PropertyName*>, bool*);
  18657  if (lir->mir()->strict()) {
  18658    callVM<Fn, DelPropOperation<true>>(lir);
  18659  } else {
  18660    callVM<Fn, DelPropOperation<false>>(lir);
  18661  }
  18662 }
  18663 
  18664 void CodeGenerator::visitDeleteElement(LDeleteElement* lir) {
  18665  pushArg(ToValue(lir->index()));
  18666  pushArg(ToValue(lir->value()));
  18667 
  18668  using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
  18669  if (lir->mir()->strict()) {
  18670    callVM<Fn, DelElemOperation<true>>(lir);
  18671  } else {
  18672    callVM<Fn, DelElemOperation<false>>(lir);
  18673  }
  18674 }
  18675 
  18676 void CodeGenerator::visitObjectToIterator(LObjectToIterator* lir) {
  18677  Register obj = ToRegister(lir->object());
  18678  Register iterObj = ToRegister(lir->output());
  18679  Register temp = ToRegister(lir->temp0());
  18680  Register temp2 = ToRegister(lir->temp1());
  18681  Register temp3 = ToRegister(lir->temp2());
  18682 
  18683  using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
  18684  OutOfLineCode* ool = nullptr;
  18685 
  18686  if (lir->mir()->skipRegistration()) {
  18687    if (lir->mir()->wantsIndices()) {
  18688      ool = oolCallVM<Fn, GetIteratorWithIndicesForObjectKeys>(
  18689          lir, ArgList(obj), StoreRegisterTo(iterObj));
  18690    } else {
  18691      ool = oolCallVM<Fn, GetIteratorForObjectKeys>(lir, ArgList(obj),
  18692                                                    StoreRegisterTo(iterObj));
  18693    }
  18694  } else {
  18695    if (lir->mir()->wantsIndices()) {
  18696      ool = oolCallVM<Fn, GetIteratorWithIndices>(lir, ArgList(obj),
  18697                                                  StoreRegisterTo(iterObj));
  18698    } else {
  18699      ool = oolCallVM<Fn, GetIterator>(lir, ArgList(obj),
  18700                                       StoreRegisterTo(iterObj));
  18701    }
  18702  }
  18703 
  18704 #ifdef DEBUG
  18705  if (!lir->mir()->getAliasSet().isStore()) {
  18706    MOZ_ASSERT(lir->mir()->skipRegistration());
  18707    Label done;
  18708    masm.branchTestObjectIsProxy(false, obj, temp, &done);
  18709    masm.assumeUnreachable("ObjectToIterator on a proxy must be a store.");
  18710    masm.bind(&done);
  18711  }
  18712 #endif
  18713 
  18714  masm.maybeLoadIteratorFromShape(obj, iterObj, temp, temp2, temp3,
  18715                                  ool->entry(),
  18716                                  !lir->mir()->skipRegistration());
  18717 
  18718  Register nativeIter = temp;
  18719  masm.loadPrivate(
  18720      Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
  18721      nativeIter);
  18722 
  18723  Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlags());
  18724  if (lir->mir()->wantsIndices()) {
  18725    // At least one consumer of the output of this iterator has been optimized
  18726    // to use iterator indices. If the cached iterator doesn't include indices,
  18727    // but it was marked to indicate that we can create them if needed, then we
  18728    // do a VM call to replace the cached iterator with a fresh iterator
  18729    // including indices.
  18730    masm.branchTest32(Assembler::NonZero, iterFlagsAddr,
  18731                      Imm32(NativeIterator::Flags::IndicesSupported),
  18732                      ool->entry());
  18733  }
  18734 
  18735  if (!lir->mir()->skipRegistration()) {
  18736    masm.storePtr(obj, Address(nativeIter,
  18737                               NativeIterator::offsetOfObjectBeingIterated()));
  18738    masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
  18739 
  18740    Register enumeratorsAddr = temp2;
  18741    masm.movePtr(ImmPtr(lir->mir()->enumeratorsAddr()), enumeratorsAddr);
  18742    masm.registerIterator(enumeratorsAddr, nativeIter, temp3);
  18743 
  18744    // Generate post-write barrier for storing to
  18745    // |iterObj->objectBeingIterated_|. We already know that |iterObj| is
  18746    // tenured, so we only have to check |obj|.
  18747    Label skipBarrier;
  18748    masm.branchPtrInNurseryChunk(Assembler::NotEqual, obj, temp2, &skipBarrier);
  18749    {
  18750      LiveRegisterSet save = liveVolatileRegs(lir);
  18751      save.takeUnchecked(temp);
  18752      save.takeUnchecked(temp2);
  18753      save.takeUnchecked(temp3);
  18754      if (iterObj.volatile_()) {
  18755        save.addUnchecked(iterObj);
  18756      }
  18757 
  18758      masm.PushRegsInMask(save);
  18759      emitPostWriteBarrier(iterObj);
  18760      masm.PopRegsInMask(save);
  18761    }
  18762    masm.bind(&skipBarrier);
  18763  }
  18764 
  18765  masm.bind(ool->rejoin());
  18766 }
  18767 
  18768 void CodeGenerator::visitValueToIterator(LValueToIterator* lir) {
  18769  pushArg(ToValue(lir->value()));
  18770 
  18771  using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
  18772  callVM<Fn, ValueToIterator>(lir);
  18773 }
  18774 
  18775 void CodeGenerator::emitIteratorHasIndicesAndBranch(Register iterator,
  18776                                                    Register object,
  18777                                                    Register temp,
  18778                                                    Register temp2,
  18779                                                    Label* ifFalse) {
  18780  // Check that the iterator has indices available.
  18781  Address nativeIterAddr(iterator,
  18782                         PropertyIteratorObject::offsetOfIteratorSlot());
  18783  masm.loadPrivate(nativeIterAddr, temp);
  18784  masm.branchTest32(Assembler::Zero,
  18785                    Address(temp, NativeIterator::offsetOfFlags()),
  18786                    Imm32(NativeIterator::Flags::IndicesAvailable), ifFalse);
  18787 
  18788  // Guard that the first shape stored in the iterator matches the current
  18789  // shape of the iterated object.
  18790  Address objShapeAddr(temp, NativeIterator::offsetOfObjectShape());
  18791  masm.loadPtr(objShapeAddr, temp);
  18792  masm.branchTestObjShape(Assembler::NotEqual, object, temp, temp2, object,
  18793                          ifFalse);
  18794 }
  18795 
  18796 void CodeGenerator::visitIteratorHasIndicesAndBranch(
  18797    LIteratorHasIndicesAndBranch* lir) {
  18798  Register iterator = ToRegister(lir->iterator());
  18799  Register object = ToRegister(lir->object());
  18800  Register temp = ToRegister(lir->temp0());
  18801  Register temp2 = ToRegister(lir->temp1());
  18802  Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
  18803  Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
  18804 
  18805  emitIteratorHasIndicesAndBranch(iterator, object, temp, temp2, ifFalse);
  18806 
  18807  if (!isNextBlock(lir->ifTrue()->lir())) {
  18808    masm.jump(ifTrue);
  18809  }
  18810 }
  18811 
  18812 void CodeGenerator::visitIteratorsMatchAndHaveIndicesAndBranch(
  18813    LIteratorsMatchAndHaveIndicesAndBranch* lir) {
  18814  Register iterator = ToRegister(lir->iterator());
  18815  Register otherIterator = ToRegister(lir->otherIterator());
  18816  Register object = ToRegister(lir->object());
  18817  Register temp = ToRegister(lir->temp0());
  18818  Register temp2 = ToRegister(lir->temp1());
  18819  Label* ifTrue = getJumpLabelForBranch(lir->ifTrue());
  18820  Label* ifFalse = getJumpLabelForBranch(lir->ifFalse());
  18821 
  18822  // Check that the iterators match, and then we can use either iterator
  18823  // as a basis as if this were visitIteratorHasIndicesAndBranch
  18824  masm.branchPtr(Assembler::NotEqual, iterator, otherIterator, ifFalse);
  18825 
  18826  emitIteratorHasIndicesAndBranch(iterator, object, temp, temp2, ifFalse);
  18827 
  18828  if (!isNextBlock(lir->ifTrue()->lir())) {
  18829    masm.jump(ifTrue);
  18830  }
  18831 }
  18832 
  18833 void CodeGenerator::visitLoadSlotByIteratorIndexCommon(Register object,
  18834                                                       Register indexScratch,
  18835                                                       Register kindScratch,
  18836                                                       ValueOperand result) {
  18837  Label notDynamicSlot, notFixedSlot, done;
  18838  masm.branch32(Assembler::NotEqual, kindScratch,
  18839                Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
  18840                &notDynamicSlot);
  18841  masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), kindScratch);
  18842  masm.loadValue(BaseValueIndex(kindScratch, indexScratch), result);
  18843  masm.jump(&done);
  18844 
  18845  masm.bind(&notDynamicSlot);
  18846  masm.branch32(Assembler::NotEqual, kindScratch,
  18847                Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
  18848  // Fixed slot
  18849  masm.loadValue(BaseValueIndex(object, indexScratch, sizeof(NativeObject)),
  18850                 result);
  18851  masm.jump(&done);
  18852  masm.bind(&notFixedSlot);
  18853 
  18854 #ifdef DEBUG
  18855  Label kindOkay;
  18856  masm.branch32(Assembler::Equal, kindScratch,
  18857                Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
  18858  masm.assumeUnreachable("Invalid PropertyIndex::Kind");
  18859  masm.bind(&kindOkay);
  18860 #endif
  18861 
  18862  // Dense element
  18863  masm.loadPtr(Address(object, NativeObject::offsetOfElements()), kindScratch);
  18864  Label indexOkay;
  18865  Address initLength(kindScratch, ObjectElements::offsetOfInitializedLength());
  18866  masm.branch32(Assembler::Above, initLength, indexScratch, &indexOkay);
  18867  masm.assumeUnreachable("Dense element out of bounds");
  18868  masm.bind(&indexOkay);
  18869 
  18870  masm.loadValue(BaseObjectElementIndex(kindScratch, indexScratch), result);
  18871  masm.bind(&done);
  18872 }
  18873 
  18874 void CodeGenerator::visitLoadSlotByIteratorIndex(
  18875    LLoadSlotByIteratorIndex* lir) {
  18876  Register object = ToRegister(lir->object());
  18877  Register iterator = ToRegister(lir->iterator());
  18878  Register indexScratch = ToRegister(lir->temp0());
  18879  Register kindScratch = ToRegister(lir->temp1());
  18880  ValueOperand result = ToOutValue(lir);
  18881 
  18882  masm.extractCurrentIndexAndKindFromIterator(iterator, indexScratch,
  18883                                              kindScratch);
  18884 
  18885  visitLoadSlotByIteratorIndexCommon(object, indexScratch, kindScratch, result);
  18886 }
  18887 
  18888 void CodeGenerator::visitLoadSlotByIteratorIndexIndexed(
  18889    LLoadSlotByIteratorIndexIndexed* lir) {
  18890  Register object = ToRegister(lir->object());
  18891  Register iterator = ToRegister(lir->iterator());
  18892  Register index = ToRegister(lir->index());
  18893  Register indexScratch = ToRegister(lir->temp0());
  18894  Register kindScratch = ToRegister(lir->temp1());
  18895  ValueOperand result = ToOutValue(lir);
  18896 
  18897  masm.extractIndexAndKindFromIteratorByIterIndex(iterator, index, kindScratch,
  18898                                                  indexScratch);
  18899 
  18900  visitLoadSlotByIteratorIndexCommon(object, indexScratch, kindScratch, result);
  18901 }
  18902 
  18903 void CodeGenerator::visitStoreSlotByIteratorIndexCommon(Register object,
  18904                                                        Register indexScratch,
  18905                                                        Register kindScratch,
  18906                                                        ValueOperand value) {
  18907  Label notDynamicSlot, notFixedSlot, done, doStore;
  18908  masm.branch32(Assembler::NotEqual, kindScratch,
  18909                Imm32(uint32_t(PropertyIndex::Kind::DynamicSlot)),
  18910                &notDynamicSlot);
  18911  masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), kindScratch);
  18912  masm.computeEffectiveAddress(BaseValueIndex(kindScratch, indexScratch),
  18913                               indexScratch);
  18914  masm.jump(&doStore);
  18915 
  18916  masm.bind(&notDynamicSlot);
  18917  masm.branch32(Assembler::NotEqual, kindScratch,
  18918                Imm32(uint32_t(PropertyIndex::Kind::FixedSlot)), &notFixedSlot);
  18919  // Fixed slot
  18920  masm.computeEffectiveAddress(
  18921      BaseValueIndex(object, indexScratch, sizeof(NativeObject)), indexScratch);
  18922  masm.jump(&doStore);
  18923  masm.bind(&notFixedSlot);
  18924 
  18925 #ifdef DEBUG
  18926  Label kindOkay;
  18927  masm.branch32(Assembler::Equal, kindScratch,
  18928                Imm32(uint32_t(PropertyIndex::Kind::Element)), &kindOkay);
  18929  masm.assumeUnreachable("Invalid PropertyIndex::Kind");
  18930  masm.bind(&kindOkay);
  18931 #endif
  18932 
  18933  // Dense element
  18934  masm.loadPtr(Address(object, NativeObject::offsetOfElements()), kindScratch);
  18935  Label indexOkay;
  18936  Address initLength(kindScratch, ObjectElements::offsetOfInitializedLength());
  18937  masm.branch32(Assembler::Above, initLength, indexScratch, &indexOkay);
  18938  masm.assumeUnreachable("Dense element out of bounds");
  18939  masm.bind(&indexOkay);
  18940 
  18941  BaseObjectElementIndex elementAddress(kindScratch, indexScratch);
  18942  masm.computeEffectiveAddress(elementAddress, indexScratch);
  18943 
  18944  masm.bind(&doStore);
  18945  Address storeAddress(indexScratch, 0);
  18946  emitPreBarrier(storeAddress);
  18947  masm.storeValue(value, storeAddress);
  18948 
  18949  masm.branchPtrInNurseryChunk(Assembler::Equal, object, kindScratch, &done);
  18950  masm.branchValueIsNurseryCell(Assembler::NotEqual, value, kindScratch, &done);
  18951 
  18952  saveVolatile(kindScratch);
  18953  emitPostWriteBarrier(object);
  18954  restoreVolatile(kindScratch);
  18955 
  18956  masm.bind(&done);
  18957 }
  18958 
  18959 void CodeGenerator::visitStoreSlotByIteratorIndex(
  18960    LStoreSlotByIteratorIndex* lir) {
  18961  Register object = ToRegister(lir->object());
  18962  Register iterator = ToRegister(lir->iterator());
  18963  ValueOperand value = ToValue(lir->value());
  18964  Register indexScratch = ToRegister(lir->temp0());
  18965  Register kindScratch = ToRegister(lir->temp1());
  18966 
  18967  masm.extractCurrentIndexAndKindFromIterator(iterator, indexScratch,
  18968                                              kindScratch);
  18969 
  18970  visitStoreSlotByIteratorIndexCommon(object, indexScratch, kindScratch, value);
  18971 }
  18972 
  18973 void CodeGenerator::visitStoreSlotByIteratorIndexIndexed(
  18974    LStoreSlotByIteratorIndexIndexed* lir) {
  18975  Register object = ToRegister(lir->object());
  18976  Register iterator = ToRegister(lir->iterator());
  18977  Register index = ToRegister(lir->index());
  18978  ValueOperand value = ToValue(lir->value());
  18979  Register indexScratch = ToRegister(lir->temp0());
  18980  Register kindScratch = ToRegister(lir->temp1());
  18981 
  18982  masm.extractIndexAndKindFromIteratorByIterIndex(iterator, index, kindScratch,
  18983                                                  indexScratch);
  18984 
  18985  visitStoreSlotByIteratorIndexCommon(object, indexScratch, kindScratch, value);
  18986 }
  18987 
  18988 void CodeGenerator::visitSetPropertyCache(LSetPropertyCache* ins) {
  18989  LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
  18990  Register objReg = ToRegister(ins->object());
  18991  Register temp = ToRegister(ins->temp0());
  18992 
  18993  ConstantOrRegister id = toConstantOrRegister(ins, LSetPropertyCache::IdIndex,
  18994                                               ins->mir()->idval()->type());
  18995  ConstantOrRegister value = toConstantOrRegister(
  18996      ins, LSetPropertyCache::ValueIndex, ins->mir()->value()->type());
  18997 
  18998  addSetPropertyCache(ins, liveRegs, objReg, temp, id, value,
  18999                      ins->mir()->strict());
  19000 }
  19001 
  19002 void CodeGenerator::visitThrow(LThrow* lir) {
  19003  pushArg(ToValue(lir->value()));
  19004 
  19005  using Fn = bool (*)(JSContext*, HandleValue);
  19006  callVM<Fn, js::ThrowOperation>(lir);
  19007 }
  19008 
  19009 void CodeGenerator::visitThrowWithStack(LThrowWithStack* lir) {
  19010  pushArg(ToValue(lir->stack()));
  19011  pushArg(ToValue(lir->value()));
  19012 
  19013  using Fn = bool (*)(JSContext*, HandleValue, HandleValue);
  19014  callVM<Fn, js::ThrowWithStackOperation>(lir);
  19015 }
  19016 
  19017 void CodeGenerator::emitTypeOfJSType(JSValueType type, Register output) {
  19018  switch (type) {
  19019    case JSVAL_TYPE_OBJECT:
  19020      masm.move32(Imm32(JSTYPE_OBJECT), output);
  19021      break;
  19022    case JSVAL_TYPE_DOUBLE:
  19023    case JSVAL_TYPE_INT32:
  19024      masm.move32(Imm32(JSTYPE_NUMBER), output);
  19025      break;
  19026    case JSVAL_TYPE_BOOLEAN:
  19027      masm.move32(Imm32(JSTYPE_BOOLEAN), output);
  19028      break;
  19029    case JSVAL_TYPE_UNDEFINED:
  19030      masm.move32(Imm32(JSTYPE_UNDEFINED), output);
  19031      break;
  19032    case JSVAL_TYPE_NULL:
  19033      masm.move32(Imm32(JSTYPE_OBJECT), output);
  19034      break;
  19035    case JSVAL_TYPE_STRING:
  19036      masm.move32(Imm32(JSTYPE_STRING), output);
  19037      break;
  19038    case JSVAL_TYPE_SYMBOL:
  19039      masm.move32(Imm32(JSTYPE_SYMBOL), output);
  19040      break;
  19041    case JSVAL_TYPE_BIGINT:
  19042      masm.move32(Imm32(JSTYPE_BIGINT), output);
  19043      break;
  19044    default:
  19045      MOZ_CRASH("Unsupported JSValueType");
  19046  }
  19047 }
  19048 
  19049 void CodeGenerator::emitTypeOfCheck(JSValueType type, Register tag,
  19050                                    Register output, Label* done,
  19051                                    Label* oolObject) {
  19052  Label notMatch;
  19053  switch (type) {
  19054    case JSVAL_TYPE_OBJECT:
  19055      // The input may be a callable object (result is "function") or
  19056      // may emulate undefined (result is "undefined"). Use an OOL path.
  19057      masm.branchTestObject(Assembler::Equal, tag, oolObject);
  19058      return;
  19059    case JSVAL_TYPE_DOUBLE:
  19060    case JSVAL_TYPE_INT32:
  19061      masm.branchTestNumber(Assembler::NotEqual, tag, &notMatch);
  19062      break;
  19063    default:
  19064      masm.branchTestType(Assembler::NotEqual, tag, type, &notMatch);
  19065      break;
  19066  }
  19067 
  19068  emitTypeOfJSType(type, output);
  19069  masm.jump(done);
  19070  masm.bind(&notMatch);
  19071 }
  19072 
  19073 void CodeGenerator::visitTypeOfV(LTypeOfV* lir) {
  19074  ValueOperand value = ToValue(lir->input());
  19075  Register output = ToRegister(lir->output());
  19076  Register tag = masm.extractTag(value, output);
  19077 
  19078  Label done;
  19079 
  19080  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  19081    ValueOperand input = ToValue(lir->input());
  19082    Register temp = ToTempUnboxRegister(lir->temp0());
  19083    Register output = ToRegister(lir->output());
  19084 
  19085    Register obj = masm.extractObject(input, temp);
  19086    emitTypeOfObject(obj, output, ool.rejoin());
  19087    masm.jump(ool.rejoin());
  19088  });
  19089  addOutOfLineCode(ool, lir->mir());
  19090 
  19091  const std::initializer_list<JSValueType> defaultOrder = {
  19092      JSVAL_TYPE_OBJECT, JSVAL_TYPE_DOUBLE,  JSVAL_TYPE_UNDEFINED,
  19093      JSVAL_TYPE_NULL,   JSVAL_TYPE_BOOLEAN, JSVAL_TYPE_STRING,
  19094      JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
  19095 
  19096  mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
  19097 
  19098  // Generate checks for previously observed types first.
  19099  // The TypeDataList is sorted by descending frequency.
  19100  for (auto& observed : lir->mir()->observedTypes()) {
  19101    JSValueType type = observed.type();
  19102 
  19103    // Unify number types.
  19104    if (type == JSVAL_TYPE_INT32) {
  19105      type = JSVAL_TYPE_DOUBLE;
  19106    }
  19107 
  19108    remaining -= type;
  19109 
  19110    emitTypeOfCheck(type, tag, output, &done, ool->entry());
  19111  }
  19112 
  19113  // Generate checks for remaining types.
  19114  for (auto type : defaultOrder) {
  19115    if (!remaining.contains(type)) {
  19116      continue;
  19117    }
  19118    remaining -= type;
  19119 
  19120    if (remaining.isEmpty() && type != JSVAL_TYPE_OBJECT) {
  19121      // We can skip the check for the last remaining type, unless the type is
  19122      // JSVAL_TYPE_OBJECT, which may have to go through the OOL path.
  19123 #ifdef DEBUG
  19124      emitTypeOfCheck(type, tag, output, &done, ool->entry());
  19125      masm.assumeUnreachable("Unexpected Value type in visitTypeOfV");
  19126 #else
  19127      emitTypeOfJSType(type, output);
  19128 #endif
  19129    } else {
  19130      emitTypeOfCheck(type, tag, output, &done, ool->entry());
  19131    }
  19132  }
  19133  MOZ_ASSERT(remaining.isEmpty());
  19134 
  19135  masm.bind(&done);
  19136  masm.bind(ool->rejoin());
  19137 }
  19138 
  19139 void CodeGenerator::emitTypeOfObject(Register obj, Register output,
  19140                                     Label* done) {
  19141  Label slowCheck, isObject, isCallable, isUndefined;
  19142  masm.typeOfObject(obj, output, &slowCheck, &isObject, &isCallable,
  19143                    &isUndefined);
  19144 
  19145  masm.bind(&isCallable);
  19146  masm.move32(Imm32(JSTYPE_FUNCTION), output);
  19147  masm.jump(done);
  19148 
  19149  masm.bind(&isUndefined);
  19150  masm.move32(Imm32(JSTYPE_UNDEFINED), output);
  19151  masm.jump(done);
  19152 
  19153  masm.bind(&isObject);
  19154  masm.move32(Imm32(JSTYPE_OBJECT), output);
  19155  masm.jump(done);
  19156 
  19157  masm.bind(&slowCheck);
  19158 
  19159  saveVolatile(output);
  19160  using Fn = JSType (*)(JSObject*);
  19161  masm.setupAlignedABICall();
  19162  masm.passABIArg(obj);
  19163  masm.callWithABI<Fn, js::TypeOfObject>();
  19164  masm.storeCallInt32Result(output);
  19165  restoreVolatile(output);
  19166 }
  19167 
  19168 void CodeGenerator::visitTypeOfO(LTypeOfO* lir) {
  19169  Register obj = ToRegister(lir->object());
  19170  Register output = ToRegister(lir->output());
  19171 
  19172  Label done;
  19173  emitTypeOfObject(obj, output, &done);
  19174  masm.bind(&done);
  19175 }
  19176 
  19177 void CodeGenerator::visitTypeOfName(LTypeOfName* lir) {
  19178  Register input = ToRegister(lir->input());
  19179  Register output = ToRegister(lir->output());
  19180 
  19181 #ifdef DEBUG
  19182  Label ok;
  19183  masm.branch32(Assembler::Below, input, Imm32(JSTYPE_LIMIT), &ok);
  19184  masm.assumeUnreachable("bad JSType");
  19185  masm.bind(&ok);
  19186 #endif
  19187 
  19188  static_assert(JSTYPE_UNDEFINED == 0);
  19189 
  19190  masm.movePtr(ImmPtr(&gen->runtime->names().undefined), output);
  19191  masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
  19192 }
  19193 
  19194 void CodeGenerator::emitTypeOfIsObjectOOL(MTypeOfIs* mir, Register obj,
  19195                                          Register output) {
  19196  saveVolatile(output);
  19197  using Fn = JSType (*)(JSObject*);
  19198  masm.setupAlignedABICall();
  19199  masm.passABIArg(obj);
  19200  masm.callWithABI<Fn, js::TypeOfObject>();
  19201  masm.storeCallInt32Result(output);
  19202  restoreVolatile(output);
  19203 
  19204  auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
  19205  masm.cmp32Set(cond, output, Imm32(mir->jstype()), output);
  19206 }
  19207 
  19208 void CodeGenerator::emitTypeOfIsObject(MTypeOfIs* mir, Register obj,
  19209                                       Register output, Label* success,
  19210                                       Label* fail, Label* slowCheck) {
  19211  Label* isObject = fail;
  19212  Label* isFunction = fail;
  19213  Label* isUndefined = fail;
  19214 
  19215  switch (mir->jstype()) {
  19216    case JSTYPE_UNDEFINED:
  19217      isUndefined = success;
  19218      break;
  19219 
  19220    case JSTYPE_OBJECT:
  19221      isObject = success;
  19222      break;
  19223 
  19224    case JSTYPE_FUNCTION:
  19225      isFunction = success;
  19226      break;
  19227 
  19228    case JSTYPE_STRING:
  19229    case JSTYPE_NUMBER:
  19230    case JSTYPE_BOOLEAN:
  19231    case JSTYPE_SYMBOL:
  19232    case JSTYPE_BIGINT:
  19233    case JSTYPE_LIMIT:
  19234      MOZ_CRASH("Primitive type");
  19235  }
  19236 
  19237  masm.typeOfObject(obj, output, slowCheck, isObject, isFunction, isUndefined);
  19238 
  19239  auto op = mir->jsop();
  19240 
  19241  Label done;
  19242  masm.bind(fail);
  19243  masm.move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
  19244  masm.jump(&done);
  19245  masm.bind(success);
  19246  masm.move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
  19247  masm.bind(&done);
  19248 }
  19249 
  19250 void CodeGenerator::visitTypeOfIsNonPrimitiveV(LTypeOfIsNonPrimitiveV* lir) {
  19251  ValueOperand input = ToValue(lir->input());
  19252  Register output = ToRegister(lir->output());
  19253  Register temp = ToTempUnboxRegister(lir->temp0());
  19254 
  19255  auto* mir = lir->mir();
  19256 
  19257  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  19258    ValueOperand input = ToValue(lir->input());
  19259    Register output = ToRegister(lir->output());
  19260    Register temp = ToTempUnboxRegister(lir->temp0());
  19261 
  19262    Register obj = masm.extractObject(input, temp);
  19263 
  19264    emitTypeOfIsObjectOOL(lir->mir(), obj, output);
  19265 
  19266    masm.jump(ool.rejoin());
  19267  });
  19268  addOutOfLineCode(ool, mir);
  19269 
  19270  Label success, fail;
  19271 
  19272  switch (mir->jstype()) {
  19273    case JSTYPE_UNDEFINED: {
  19274      ScratchTagScope tag(masm, input);
  19275      masm.splitTagForTest(input, tag);
  19276 
  19277      masm.branchTestUndefined(Assembler::Equal, tag, &success);
  19278      masm.branchTestObject(Assembler::NotEqual, tag, &fail);
  19279      break;
  19280    }
  19281 
  19282    case JSTYPE_OBJECT: {
  19283      ScratchTagScope tag(masm, input);
  19284      masm.splitTagForTest(input, tag);
  19285 
  19286      masm.branchTestNull(Assembler::Equal, tag, &success);
  19287      masm.branchTestObject(Assembler::NotEqual, tag, &fail);
  19288      break;
  19289    }
  19290 
  19291    case JSTYPE_FUNCTION: {
  19292      masm.branchTestObject(Assembler::NotEqual, input, &fail);
  19293      break;
  19294    }
  19295 
  19296    case JSTYPE_STRING:
  19297    case JSTYPE_NUMBER:
  19298    case JSTYPE_BOOLEAN:
  19299    case JSTYPE_SYMBOL:
  19300    case JSTYPE_BIGINT:
  19301    case JSTYPE_LIMIT:
  19302      MOZ_CRASH("Primitive type");
  19303  }
  19304 
  19305  Register obj = masm.extractObject(input, temp);
  19306 
  19307  emitTypeOfIsObject(mir, obj, output, &success, &fail, ool->entry());
  19308 
  19309  masm.bind(ool->rejoin());
  19310 }
  19311 
  19312 void CodeGenerator::visitTypeOfIsNonPrimitiveO(LTypeOfIsNonPrimitiveO* lir) {
  19313  Register input = ToRegister(lir->input());
  19314  Register output = ToRegister(lir->output());
  19315 
  19316  auto* mir = lir->mir();
  19317 
  19318  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  19319    Register input = ToRegister(lir->input());
  19320    Register output = ToRegister(lir->output());
  19321 
  19322    emitTypeOfIsObjectOOL(lir->mir(), input, output);
  19323 
  19324    masm.jump(ool.rejoin());
  19325  });
  19326  addOutOfLineCode(ool, mir);
  19327 
  19328  Label success, fail;
  19329  emitTypeOfIsObject(mir, input, output, &success, &fail, ool->entry());
  19330 
  19331  masm.bind(ool->rejoin());
  19332 }
  19333 
  19334 void CodeGenerator::visitTypeOfIsPrimitive(LTypeOfIsPrimitive* lir) {
  19335  ValueOperand input = ToValue(lir->input());
  19336  Register output = ToRegister(lir->output());
  19337 
  19338  auto* mir = lir->mir();
  19339  auto cond = JSOpToCondition(mir->jsop(), /* isSigned = */ false);
  19340 
  19341  switch (mir->jstype()) {
  19342    case JSTYPE_STRING:
  19343      masm.testStringSet(cond, input, output);
  19344      break;
  19345    case JSTYPE_NUMBER:
  19346      masm.testNumberSet(cond, input, output);
  19347      break;
  19348    case JSTYPE_BOOLEAN:
  19349      masm.testBooleanSet(cond, input, output);
  19350      break;
  19351    case JSTYPE_SYMBOL:
  19352      masm.testSymbolSet(cond, input, output);
  19353      break;
  19354    case JSTYPE_BIGINT:
  19355      masm.testBigIntSet(cond, input, output);
  19356      break;
  19357 
  19358    case JSTYPE_UNDEFINED:
  19359    case JSTYPE_OBJECT:
  19360    case JSTYPE_FUNCTION:
  19361    case JSTYPE_LIMIT:
  19362      MOZ_CRASH("Non-primitive type");
  19363  }
  19364 }
  19365 
  19366 void CodeGenerator::visitToAsyncIter(LToAsyncIter* lir) {
  19367  pushArg(ToValue(lir->nextMethod()));
  19368  pushArg(ToRegister(lir->iterator()));
  19369 
  19370  using Fn = JSObject* (*)(JSContext*, HandleObject, HandleValue);
  19371  callVM<Fn, js::CreateAsyncFromSyncIterator>(lir);
  19372 }
  19373 
  19374 void CodeGenerator::visitToPropertyKeyCache(LToPropertyKeyCache* lir) {
  19375  LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
  19376  ValueOperand input = ToValue(lir->input());
  19377  ValueOperand output = ToOutValue(lir);
  19378 
  19379  IonToPropertyKeyIC ic(liveRegs, input, output);
  19380  addIC(lir, allocateIC(ic));
  19381 }
  19382 
  19383 void CodeGenerator::visitLoadElementV(LLoadElementV* load) {
  19384  Register elements = ToRegister(load->elements());
  19385  const ValueOperand out = ToOutValue(load);
  19386 
  19387  auto source = ToAddressOrBaseObjectElementIndex(elements, load->index());
  19388 
  19389  source.match([&](auto const& source) { masm.loadValue(source, out); });
  19390 
  19391  if (load->mir()->needsHoleCheck()) {
  19392    Label testMagic;
  19393    masm.branchTestMagic(Assembler::Equal, out, &testMagic);
  19394    bailoutFrom(&testMagic, load->snapshot());
  19395  } else {
  19396 #ifdef DEBUG
  19397    Label ok;
  19398    masm.branchTestMagic(Assembler::NotEqual, out, &ok);
  19399    masm.assumeUnreachable("LoadElementV had incorrect needsHoleCheck");
  19400    masm.bind(&ok);
  19401 #endif
  19402  }
  19403 }
  19404 
  19405 void CodeGenerator::visitLoadElementHole(LLoadElementHole* lir) {
  19406  Register elements = ToRegister(lir->elements());
  19407  Register index = ToRegister(lir->index());
  19408  Register initLength = ToRegister(lir->initLength());
  19409  const ValueOperand out = ToOutValue(lir);
  19410 
  19411  const MLoadElementHole* mir = lir->mir();
  19412 
  19413  // If the index is out of bounds, load |undefined|. Otherwise, load the
  19414  // value.
  19415  Label outOfBounds, done;
  19416  masm.spectreBoundsCheck32(index, initLength, out.scratchReg(), &outOfBounds);
  19417 
  19418  masm.loadValue(BaseObjectElementIndex(elements, index), out);
  19419 
  19420  // If the value wasn't a hole, we're done. Otherwise, we'll load undefined.
  19421  masm.branchTestMagic(Assembler::NotEqual, out, &done);
  19422 
  19423  if (mir->needsNegativeIntCheck()) {
  19424    Label loadUndefined;
  19425    masm.jump(&loadUndefined);
  19426 
  19427    masm.bind(&outOfBounds);
  19428 
  19429    bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
  19430 
  19431    masm.bind(&loadUndefined);
  19432  } else {
  19433    masm.bind(&outOfBounds);
  19434  }
  19435  masm.moveValue(UndefinedValue(), out);
  19436 
  19437  masm.bind(&done);
  19438 }
  19439 
  19440 CodeGenerator::AddressOrBaseIndex CodeGenerator::ToAddressOrBaseIndex(
  19441    Register elements, const LAllocation* index, Scalar::Type type) {
  19442  if (index->isConstant()) {
  19443    return AddressOrBaseIndex(ToAddress(elements, index, type));
  19444  }
  19445  return AddressOrBaseIndex(
  19446      BaseIndex(elements, ToRegister(index), ScaleFromScalarType(type)));
  19447 }
  19448 
  19449 void CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir) {
  19450  Register elements = ToRegister(lir->elements());
  19451  Register temp0 = ToTempRegisterOrInvalid(lir->temp0());
  19452  Register temp1 = ToTempRegisterOrInvalid(lir->temp1());
  19453  AnyRegister out = ToAnyRegister(lir->output());
  19454 
  19455  Scalar::Type storageType = lir->mir()->storageType();
  19456 
  19457  LiveRegisterSet volatileRegs;
  19458  if (MacroAssembler::LoadRequiresCall(storageType)) {
  19459    volatileRegs = liveVolatileRegs(lir);
  19460  }
  19461 
  19462  auto source = ToAddressOrBaseIndex(elements, lir->index(), storageType);
  19463 
  19464  Label fail;
  19465  source.match([&](const auto& source) {
  19466    masm.loadFromTypedArray(storageType, source, out, temp0, temp1, &fail,
  19467                            volatileRegs);
  19468  });
  19469 
  19470  if (fail.used()) {
  19471    bailoutFrom(&fail, lir->snapshot());
  19472  }
  19473 }
  19474 
  19475 void CodeGenerator::visitLoadUnboxedInt64(LLoadUnboxedInt64* lir) {
  19476  Register elements = ToRegister(lir->elements());
  19477  Register64 out = ToOutRegister64(lir);
  19478 
  19479  Scalar::Type storageType = lir->mir()->storageType();
  19480 
  19481  auto source = ToAddressOrBaseIndex(elements, lir->index(), storageType);
  19482 
  19483  source.match([&](const auto& source) { masm.load64(source, out); });
  19484 }
  19485 
  19486 void CodeGenerator::visitLoadDataViewElement(LLoadDataViewElement* lir) {
  19487  Register elements = ToRegister(lir->elements());
  19488  const LAllocation* littleEndian = lir->littleEndian();
  19489  Register temp1 = ToTempRegisterOrInvalid(lir->temp0());
  19490  Register temp2 = ToTempRegisterOrInvalid(lir->temp1());
  19491  Register64 temp64 = ToTempRegister64OrInvalid(lir->temp2());
  19492  AnyRegister out = ToAnyRegister(lir->output());
  19493 
  19494  Scalar::Type storageType = lir->mir()->storageType();
  19495 
  19496  LiveRegisterSet volatileRegs;
  19497  if (MacroAssembler::LoadRequiresCall(storageType)) {
  19498    volatileRegs = liveVolatileRegs(lir);
  19499  }
  19500 
  19501  auto source = ToAddressOrBaseIndex(elements, lir->index(), Scalar::Uint8);
  19502 
  19503  bool noSwap = littleEndian->isConstant() &&
  19504                ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
  19505 
  19506  // Directly load if no byte swap is needed and the platform supports unaligned
  19507  // accesses for the access.  (Such support is assumed for integer types.)
  19508  if (noSwap && (!Scalar::isFloatingType(storageType) ||
  19509                 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
  19510    Label fail;
  19511    source.match([&](const auto& source) {
  19512      masm.loadFromTypedArray(storageType, source, out, temp1, temp2, &fail,
  19513                              volatileRegs);
  19514    });
  19515 
  19516    if (fail.used()) {
  19517      bailoutFrom(&fail, lir->snapshot());
  19518    }
  19519    return;
  19520  }
  19521 
  19522  // Load the value into a gpr register.
  19523  source.match([&](const auto& source) {
  19524    switch (storageType) {
  19525      case Scalar::Int16:
  19526        masm.load16UnalignedSignExtend(source, out.gpr());
  19527        break;
  19528      case Scalar::Uint16:
  19529        masm.load16UnalignedZeroExtend(source, out.gpr());
  19530        break;
  19531      case Scalar::Int32:
  19532        masm.load32Unaligned(source, out.gpr());
  19533        break;
  19534      case Scalar::Uint32:
  19535        masm.load32Unaligned(source, out.isFloat() ? temp1 : out.gpr());
  19536        break;
  19537      case Scalar::Float16:
  19538        masm.load16UnalignedZeroExtend(source, temp1);
  19539        break;
  19540      case Scalar::Float32:
  19541        masm.load32Unaligned(source, temp1);
  19542        break;
  19543      case Scalar::Float64:
  19544        masm.load64Unaligned(source, temp64);
  19545        break;
  19546      case Scalar::Int8:
  19547      case Scalar::Uint8:
  19548      case Scalar::Uint8Clamped:
  19549      case Scalar::BigInt64:
  19550      case Scalar::BigUint64:
  19551      default:
  19552        MOZ_CRASH("Invalid typed array type");
  19553    }
  19554  });
  19555 
  19556  if (!noSwap) {
  19557    // Swap the bytes in the loaded value.
  19558    Label skip;
  19559    if (!littleEndian->isConstant()) {
  19560      masm.branch32(
  19561          MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
  19562          ToRegister(littleEndian), Imm32(0), &skip);
  19563    }
  19564 
  19565    switch (storageType) {
  19566      case Scalar::Int16:
  19567        masm.byteSwap16SignExtend(out.gpr());
  19568        break;
  19569      case Scalar::Uint16:
  19570        masm.byteSwap16ZeroExtend(out.gpr());
  19571        break;
  19572      case Scalar::Int32:
  19573        masm.byteSwap32(out.gpr());
  19574        break;
  19575      case Scalar::Uint32:
  19576        masm.byteSwap32(out.isFloat() ? temp1 : out.gpr());
  19577        break;
  19578      case Scalar::Float16:
  19579        masm.byteSwap16ZeroExtend(temp1);
  19580        break;
  19581      case Scalar::Float32:
  19582        masm.byteSwap32(temp1);
  19583        break;
  19584      case Scalar::Float64:
  19585        masm.byteSwap64(temp64);
  19586        break;
  19587      case Scalar::Int8:
  19588      case Scalar::Uint8:
  19589      case Scalar::Uint8Clamped:
  19590      case Scalar::BigInt64:
  19591      case Scalar::BigUint64:
  19592      default:
  19593        MOZ_CRASH("Invalid typed array type");
  19594    }
  19595 
  19596    if (skip.used()) {
  19597      masm.bind(&skip);
  19598    }
  19599  }
  19600 
  19601  // Move the value into the output register.
  19602  switch (storageType) {
  19603    case Scalar::Int16:
  19604    case Scalar::Uint16:
  19605    case Scalar::Int32:
  19606      break;
  19607    case Scalar::Uint32:
  19608      if (out.isFloat()) {
  19609        masm.convertUInt32ToDouble(temp1, out.fpu());
  19610      } else {
  19611        // Bail out if the value doesn't fit into a signed int32 value. This
  19612        // is what allows MLoadDataViewElement to have a type() of
  19613        // MIRType::Int32 for UInt32 array loads.
  19614        bailoutTest32(Assembler::Signed, out.gpr(), out.gpr(), lir->snapshot());
  19615      }
  19616      break;
  19617    case Scalar::Float16:
  19618      masm.moveGPRToFloat16(temp1, out.fpu(), temp2, volatileRegs);
  19619      masm.canonicalizeFloat(out.fpu());
  19620      break;
  19621    case Scalar::Float32:
  19622      masm.moveGPRToFloat32(temp1, out.fpu());
  19623      masm.canonicalizeFloat(out.fpu());
  19624      break;
  19625    case Scalar::Float64:
  19626      masm.moveGPR64ToDouble(temp64, out.fpu());
  19627      masm.canonicalizeDouble(out.fpu());
  19628      break;
  19629    case Scalar::Int8:
  19630    case Scalar::Uint8:
  19631    case Scalar::Uint8Clamped:
  19632    case Scalar::BigInt64:
  19633    case Scalar::BigUint64:
  19634    default:
  19635      MOZ_CRASH("Invalid typed array type");
  19636  }
  19637 }
  19638 
  19639 void CodeGenerator::visitLoadDataViewElement64(LLoadDataViewElement64* lir) {
  19640  Register elements = ToRegister(lir->elements());
  19641  const LAllocation* littleEndian = lir->littleEndian();
  19642  Register64 out = ToOutRegister64(lir);
  19643 
  19644  MOZ_ASSERT(Scalar::isBigIntType(lir->mir()->storageType()));
  19645 
  19646  auto source = ToAddressOrBaseIndex(elements, lir->index(), Scalar::Uint8);
  19647 
  19648  bool noSwap = littleEndian->isConstant() &&
  19649                ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
  19650 
  19651  // Load the value into a register.
  19652  source.match([&](const auto& source) { masm.load64Unaligned(source, out); });
  19653 
  19654  if (!noSwap) {
  19655    // Swap the bytes in the loaded value.
  19656    Label skip;
  19657    if (!littleEndian->isConstant()) {
  19658      masm.branch32(
  19659          MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
  19660          ToRegister(littleEndian), Imm32(0), &skip);
  19661    }
  19662 
  19663    masm.byteSwap64(out);
  19664 
  19665    if (skip.used()) {
  19666      masm.bind(&skip);
  19667    }
  19668  }
  19669 }
  19670 
  19671 void CodeGenerator::visitLoadTypedArrayElementHole(
  19672    LLoadTypedArrayElementHole* lir) {
  19673  Register elements = ToRegister(lir->elements());
  19674  Register index = ToRegister(lir->index());
  19675  Register length = ToRegister(lir->length());
  19676  Register temp = ToTempRegisterOrInvalid(lir->temp0());
  19677  const ValueOperand out = ToOutValue(lir);
  19678 
  19679  Register scratch = out.scratchReg();
  19680 
  19681  // Load undefined if index >= length.
  19682  Label outOfBounds, done;
  19683  masm.spectreBoundsCheckPtr(index, length, scratch, &outOfBounds);
  19684 
  19685  Scalar::Type arrayType = lir->mir()->arrayType();
  19686 
  19687  LiveRegisterSet volatileRegs;
  19688  if (MacroAssembler::LoadRequiresCall(arrayType)) {
  19689    volatileRegs = liveVolatileRegs(lir);
  19690  }
  19691 
  19692  Label fail;
  19693  BaseIndex source(elements, index, ScaleFromScalarType(arrayType));
  19694  MacroAssembler::Uint32Mode uint32Mode =
  19695      lir->mir()->forceDouble() ? MacroAssembler::Uint32Mode::ForceDouble
  19696                                : MacroAssembler::Uint32Mode::FailOnDouble;
  19697  masm.loadFromTypedArray(arrayType, source, out, uint32Mode, temp, &fail,
  19698                          volatileRegs);
  19699  masm.jump(&done);
  19700 
  19701  masm.bind(&outOfBounds);
  19702  masm.moveValue(UndefinedValue(), out);
  19703 
  19704  if (fail.used()) {
  19705    bailoutFrom(&fail, lir->snapshot());
  19706  }
  19707 
  19708  masm.bind(&done);
  19709 }
  19710 
  19711 void CodeGenerator::visitLoadTypedArrayElementHoleBigInt(
  19712    LLoadTypedArrayElementHoleBigInt* lir) {
  19713  Register elements = ToRegister(lir->elements());
  19714  Register index = ToRegister(lir->index());
  19715  Register length = ToRegister(lir->length());
  19716  const ValueOperand out = ToOutValue(lir);
  19717 
  19718  Register temp = ToRegister(lir->temp0());
  19719 
  19720  // On x86 there are not enough registers. In that case reuse the output
  19721  // registers as temporaries.
  19722 #ifdef JS_CODEGEN_X86
  19723  MOZ_ASSERT(lir->temp1().isBogusTemp());
  19724  Register64 temp64 = out.toRegister64();
  19725 #else
  19726  Register64 temp64 = ToRegister64(lir->temp1());
  19727 #endif
  19728 
  19729  // Load undefined if index >= length.
  19730  Label outOfBounds, done;
  19731  masm.spectreBoundsCheckPtr(index, length, temp, &outOfBounds);
  19732 
  19733  Scalar::Type arrayType = lir->mir()->arrayType();
  19734  BaseIndex source(elements, index, ScaleFromScalarType(arrayType));
  19735  masm.load64(source, temp64);
  19736 
  19737 #ifdef JS_CODEGEN_X86
  19738  Register bigInt = temp;
  19739  Register maybeTemp = InvalidReg;
  19740 #else
  19741  Register bigInt = out.scratchReg();
  19742  Register maybeTemp = temp;
  19743 #endif
  19744  emitCreateBigInt(lir, arrayType, temp64, bigInt, maybeTemp);
  19745 
  19746  masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, out);
  19747  masm.jump(&done);
  19748 
  19749  masm.bind(&outOfBounds);
  19750  masm.moveValue(UndefinedValue(), out);
  19751 
  19752  masm.bind(&done);
  19753 }
  19754 
  19755 template <typename T>
  19756 static inline void StoreToTypedArray(MacroAssembler& masm,
  19757                                     Scalar::Type writeType,
  19758                                     const LAllocation* value, const T& dest,
  19759                                     Register temp,
  19760                                     LiveRegisterSet volatileRegs) {
  19761  if (Scalar::isFloatingType(writeType)) {
  19762    masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest, temp,
  19763                                volatileRegs);
  19764  } else {
  19765    if (value->isConstant()) {
  19766      masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
  19767    } else {
  19768      masm.storeToTypedIntArray(writeType, ToRegister(value), dest);
  19769    }
  19770  }
  19771 }
  19772 
  19773 void CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir) {
  19774  Register elements = ToRegister(lir->elements());
  19775  Register temp = ToTempRegisterOrInvalid(lir->temp0());
  19776  const LAllocation* value = lir->value();
  19777 
  19778  Scalar::Type writeType = lir->mir()->writeType();
  19779 
  19780  LiveRegisterSet volatileRegs;
  19781  if (MacroAssembler::StoreRequiresCall(writeType)) {
  19782    volatileRegs = liveVolatileRegs(lir);
  19783  }
  19784 
  19785  auto dest = ToAddressOrBaseIndex(elements, lir->index(), writeType);
  19786 
  19787  dest.match([&](const auto& dest) {
  19788    StoreToTypedArray(masm, writeType, value, dest, temp, volatileRegs);
  19789  });
  19790 }
  19791 
  19792 template <typename T>
  19793 static inline void StoreToTypedBigIntArray(MacroAssembler& masm,
  19794                                           const LInt64Allocation& value,
  19795                                           const T& dest) {
  19796  if (IsConstant(value)) {
  19797    masm.storeToTypedBigIntArray(Imm64(ToInt64(value)), dest);
  19798  } else {
  19799    masm.storeToTypedBigIntArray(ToRegister64(value), dest);
  19800  }
  19801 }
  19802 
  19803 void CodeGenerator::visitStoreUnboxedInt64(LStoreUnboxedInt64* lir) {
  19804  Register elements = ToRegister(lir->elements());
  19805  LInt64Allocation value = lir->value();
  19806 
  19807  Scalar::Type writeType = lir->mir()->writeType();
  19808  MOZ_ASSERT(Scalar::isBigIntType(writeType));
  19809 
  19810  auto dest = ToAddressOrBaseIndex(elements, lir->index(), writeType);
  19811 
  19812  dest.match(
  19813      [&](const auto& dest) { StoreToTypedBigIntArray(masm, value, dest); });
  19814 }
  19815 
  19816 void CodeGenerator::visitStoreDataViewElement(LStoreDataViewElement* lir) {
  19817  Register elements = ToRegister(lir->elements());
  19818  const LAllocation* value = lir->value();
  19819  const LAllocation* littleEndian = lir->littleEndian();
  19820  Register temp = ToTempRegisterOrInvalid(lir->temp0());
  19821  Register64 temp64 = ToTempRegister64OrInvalid(lir->temp1());
  19822 
  19823  Scalar::Type writeType = lir->mir()->writeType();
  19824 
  19825  LiveRegisterSet volatileRegs;
  19826  if (MacroAssembler::StoreRequiresCall(writeType)) {
  19827    volatileRegs = liveVolatileRegs(lir);
  19828  }
  19829 
  19830  auto dest = ToAddressOrBaseIndex(elements, lir->index(), Scalar::Uint8);
  19831 
  19832  bool noSwap = littleEndian->isConstant() &&
  19833                ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
  19834 
  19835  // Directly store if no byte swap is needed and the platform supports
  19836  // unaligned accesses for the access.  (Such support is assumed for integer
  19837  // types.)
  19838  if (noSwap && (!Scalar::isFloatingType(writeType) ||
  19839                 MacroAssembler::SupportsFastUnalignedFPAccesses())) {
  19840    dest.match([&](const auto& dest) {
  19841      StoreToTypedArray(masm, writeType, value, dest, temp, volatileRegs);
  19842    });
  19843    return;
  19844  }
  19845 
  19846  // Load the value into a gpr register.
  19847  switch (writeType) {
  19848    case Scalar::Int16:
  19849    case Scalar::Uint16:
  19850    case Scalar::Int32:
  19851    case Scalar::Uint32:
  19852      if (value->isConstant()) {
  19853        masm.move32(Imm32(ToInt32(value)), temp);
  19854      } else {
  19855        masm.move32(ToRegister(value), temp);
  19856      }
  19857      break;
  19858    case Scalar::Float16: {
  19859      FloatRegister fvalue = ToFloatRegister(value);
  19860      masm.moveFloat16ToGPR(fvalue, temp, volatileRegs);
  19861      break;
  19862    }
  19863    case Scalar::Float32: {
  19864      FloatRegister fvalue = ToFloatRegister(value);
  19865      masm.moveFloat32ToGPR(fvalue, temp);
  19866      break;
  19867    }
  19868    case Scalar::Float64: {
  19869      FloatRegister fvalue = ToFloatRegister(value);
  19870      masm.moveDoubleToGPR64(fvalue, temp64);
  19871      break;
  19872    }
  19873    case Scalar::Int8:
  19874    case Scalar::Uint8:
  19875    case Scalar::Uint8Clamped:
  19876    case Scalar::BigInt64:
  19877    case Scalar::BigUint64:
  19878    default:
  19879      MOZ_CRASH("Invalid typed array type");
  19880  }
  19881 
  19882  if (!noSwap) {
  19883    // Swap the bytes in the loaded value.
  19884    Label skip;
  19885    if (!littleEndian->isConstant()) {
  19886      masm.branch32(
  19887          MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
  19888          ToRegister(littleEndian), Imm32(0), &skip);
  19889    }
  19890 
  19891    switch (writeType) {
  19892      case Scalar::Int16:
  19893        masm.byteSwap16SignExtend(temp);
  19894        break;
  19895      case Scalar::Uint16:
  19896      case Scalar::Float16:
  19897        masm.byteSwap16ZeroExtend(temp);
  19898        break;
  19899      case Scalar::Int32:
  19900      case Scalar::Uint32:
  19901      case Scalar::Float32:
  19902        masm.byteSwap32(temp);
  19903        break;
  19904      case Scalar::Float64:
  19905        masm.byteSwap64(temp64);
  19906        break;
  19907      case Scalar::Int8:
  19908      case Scalar::Uint8:
  19909      case Scalar::Uint8Clamped:
  19910      case Scalar::BigInt64:
  19911      case Scalar::BigUint64:
  19912      default:
  19913        MOZ_CRASH("Invalid typed array type");
  19914    }
  19915 
  19916    if (skip.used()) {
  19917      masm.bind(&skip);
  19918    }
  19919  }
  19920 
  19921  // Store the value into the destination.
  19922  dest.match([&](const auto& dest) {
  19923    switch (writeType) {
  19924      case Scalar::Int16:
  19925      case Scalar::Uint16:
  19926      case Scalar::Float16:
  19927        masm.store16Unaligned(temp, dest);
  19928        break;
  19929      case Scalar::Int32:
  19930      case Scalar::Uint32:
  19931      case Scalar::Float32:
  19932        masm.store32Unaligned(temp, dest);
  19933        break;
  19934      case Scalar::Float64:
  19935        masm.store64Unaligned(temp64, dest);
  19936        break;
  19937      case Scalar::Int8:
  19938      case Scalar::Uint8:
  19939      case Scalar::Uint8Clamped:
  19940      case Scalar::BigInt64:
  19941      case Scalar::BigUint64:
  19942      default:
  19943        MOZ_CRASH("Invalid typed array type");
  19944    }
  19945  });
  19946 }
  19947 
  19948 void CodeGenerator::visitStoreDataViewElement64(LStoreDataViewElement64* lir) {
  19949  Register elements = ToRegister(lir->elements());
  19950  LInt64Allocation value = lir->value();
  19951  const LAllocation* littleEndian = lir->littleEndian();
  19952  Register64 temp = ToTempRegister64OrInvalid(lir->temp0());
  19953 
  19954  MOZ_ASSERT(Scalar::isBigIntType(lir->mir()->writeType()));
  19955 
  19956  auto dest = ToAddressOrBaseIndex(elements, lir->index(), Scalar::Uint8);
  19957 
  19958  bool noSwap = littleEndian->isConstant() &&
  19959                ToBoolean(littleEndian) == MOZ_LITTLE_ENDIAN();
  19960 
  19961  // Directly store if no byte swap is needed and the platform supports
  19962  // unaligned accesses for the access.  (Such support is assumed for integer
  19963  // types.)
  19964  if (noSwap) {
  19965    dest.match(
  19966        [&](const auto& dest) { StoreToTypedBigIntArray(masm, value, dest); });
  19967    return;
  19968  }
  19969 
  19970  Register64 valueReg = Register64::Invalid();
  19971  if (IsConstant(value)) {
  19972    MOZ_ASSERT(temp != Register64::Invalid());
  19973    masm.move64(Imm64(ToInt64(value)), temp);
  19974  } else {
  19975    valueReg = ToRegister64(value);
  19976 
  19977    // Preserve the input value.
  19978    if (temp != Register64::Invalid()) {
  19979      masm.move64(valueReg, temp);
  19980    } else {
  19981      masm.Push(valueReg);
  19982      temp = valueReg;
  19983    }
  19984  }
  19985 
  19986  // Swap the bytes in the loaded value.
  19987  Label skip;
  19988  if (!littleEndian->isConstant()) {
  19989    masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
  19990                  ToRegister(littleEndian), Imm32(0), &skip);
  19991  }
  19992 
  19993  masm.byteSwap64(temp);
  19994 
  19995  if (skip.used()) {
  19996    masm.bind(&skip);
  19997  }
  19998 
  19999  // Store the value into the destination.
  20000  dest.match([&](const auto& dest) { masm.store64Unaligned(temp, dest); });
  20001 
  20002  // Restore |value| if it was modified.
  20003  if (valueReg == temp) {
  20004    masm.Pop(valueReg);
  20005  }
  20006 }
  20007 
  20008 void CodeGenerator::visitStoreTypedArrayElementHole(
  20009    LStoreTypedArrayElementHole* lir) {
  20010  Register elements = ToRegister(lir->elements());
  20011  const LAllocation* value = lir->value();
  20012 
  20013  Scalar::Type arrayType = lir->mir()->arrayType();
  20014 
  20015  Register index = ToRegister(lir->index());
  20016  const LAllocation* length = lir->length();
  20017  Register temp = ToTempRegisterOrInvalid(lir->temp0());
  20018 
  20019  LiveRegisterSet volatileRegs;
  20020  if (MacroAssembler::StoreRequiresCall(arrayType)) {
  20021    volatileRegs = liveVolatileRegs(lir);
  20022  }
  20023 
  20024  Label skip;
  20025  if (length->isGeneralReg()) {
  20026    masm.spectreBoundsCheckPtr(index, ToRegister(length), temp, &skip);
  20027  } else {
  20028    masm.spectreBoundsCheckPtr(index, ToAddress(length), temp, &skip);
  20029  }
  20030 
  20031  BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
  20032  StoreToTypedArray(masm, arrayType, value, dest, temp, volatileRegs);
  20033 
  20034  masm.bind(&skip);
  20035 }
  20036 
  20037 void CodeGenerator::visitStoreTypedArrayElementHoleInt64(
  20038    LStoreTypedArrayElementHoleInt64* lir) {
  20039  Register elements = ToRegister(lir->elements());
  20040  LInt64Allocation value = lir->value();
  20041 
  20042  Scalar::Type arrayType = lir->mir()->arrayType();
  20043  MOZ_ASSERT(Scalar::isBigIntType(arrayType));
  20044 
  20045  Register index = ToRegister(lir->index());
  20046  const LAllocation* length = lir->length();
  20047  Register spectreTemp = ToTempRegisterOrInvalid(lir->temp0());
  20048 
  20049  Label skip;
  20050  if (length->isGeneralReg()) {
  20051    masm.spectreBoundsCheckPtr(index, ToRegister(length), spectreTemp, &skip);
  20052  } else {
  20053    masm.spectreBoundsCheckPtr(index, ToAddress(length), spectreTemp, &skip);
  20054  }
  20055 
  20056  BaseIndex dest(elements, index, ScaleFromScalarType(arrayType));
  20057  StoreToTypedBigIntArray(masm, value, dest);
  20058 
  20059  masm.bind(&skip);
  20060 }
  20061 
  20062 void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
  20063  masm.memoryBarrier(ins->barrier());
  20064 }
  20065 
  20066 void CodeGenerator::visitAtomicIsLockFree(LAtomicIsLockFree* lir) {
  20067  Register value = ToRegister(lir->value());
  20068  Register output = ToRegister(lir->output());
  20069 
  20070  masm.atomicIsLockFreeJS(value, output);
  20071 }
  20072 
  20073 void CodeGenerator::visitAtomicPause(LAtomicPause* lir) { masm.atomicPause(); }
  20074 
  20075 void CodeGenerator::visitClampIToUint8(LClampIToUint8* lir) {
  20076  Register output = ToRegister(lir->output());
  20077  MOZ_ASSERT(output == ToRegister(lir->input()));
  20078  masm.clampIntToUint8(output);
  20079 }
  20080 
  20081 void CodeGenerator::visitClampDToUint8(LClampDToUint8* lir) {
  20082  FloatRegister input = ToFloatRegister(lir->input());
  20083  Register output = ToRegister(lir->output());
  20084  masm.clampDoubleToUint8(input, output);
  20085 }
  20086 
  20087 void CodeGenerator::visitClampVToUint8(LClampVToUint8* lir) {
  20088  ValueOperand operand = ToValue(lir->input());
  20089  FloatRegister tempFloat = ToFloatRegister(lir->temp0());
  20090  Register output = ToRegister(lir->output());
  20091 
  20092  using Fn = bool (*)(JSContext*, JSString*, double*);
  20093  OutOfLineCode* oolString = oolCallVM<Fn, StringToNumber>(
  20094      lir, ArgList(output), StoreFloatRegisterTo(tempFloat));
  20095  Label* stringEntry = oolString->entry();
  20096  Label* stringRejoin = oolString->rejoin();
  20097 
  20098  Label fails;
  20099  masm.clampValueToUint8(operand, stringEntry, stringRejoin, output, tempFloat,
  20100                         output, &fails);
  20101 
  20102  bailoutFrom(&fails, lir->snapshot());
  20103 }
  20104 
  20105 void CodeGenerator::visitInCache(LInCache* ins) {
  20106  LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
  20107 
  20108  ConstantOrRegister key =
  20109      toConstantOrRegister(ins, LInCache::LhsIndex, ins->mir()->key()->type());
  20110  Register object = ToRegister(ins->rhs());
  20111  Register output = ToRegister(ins->output());
  20112  Register temp = ToRegister(ins->temp0());
  20113 
  20114  IonInIC cache(liveRegs, key, object, output, temp);
  20115  addIC(ins, allocateIC(cache));
  20116 }
  20117 
  20118 void CodeGenerator::visitInArray(LInArray* lir) {
  20119  const MInArray* mir = lir->mir();
  20120  Register elements = ToRegister(lir->elements());
  20121  Register initLength = ToRegister(lir->initLength());
  20122  Register output = ToRegister(lir->output());
  20123 
  20124  Label falseBranch, done, trueBranch;
  20125 
  20126  if (lir->index()->isConstant()) {
  20127    int32_t index = ToInt32(lir->index());
  20128 
  20129    if (index < 0) {
  20130      MOZ_ASSERT(mir->needsNegativeIntCheck());
  20131      bailout(lir->snapshot());
  20132      return;
  20133    }
  20134 
  20135    masm.branch32(Assembler::BelowOrEqual, initLength, Imm32(index),
  20136                  &falseBranch);
  20137 
  20138    NativeObject::elementsSizeMustNotOverflow();
  20139    Address address = Address(elements, index * sizeof(Value));
  20140    masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
  20141  } else {
  20142    Register index = ToRegister(lir->index());
  20143 
  20144    Label negativeIntCheck;
  20145    Label* failedInitLength = &falseBranch;
  20146    if (mir->needsNegativeIntCheck()) {
  20147      failedInitLength = &negativeIntCheck;
  20148    }
  20149 
  20150    masm.branch32(Assembler::BelowOrEqual, initLength, index, failedInitLength);
  20151 
  20152    BaseObjectElementIndex address(elements, index);
  20153    masm.branchTestMagic(Assembler::Equal, address, &falseBranch);
  20154 
  20155    if (mir->needsNegativeIntCheck()) {
  20156      masm.jump(&trueBranch);
  20157      masm.bind(&negativeIntCheck);
  20158 
  20159      bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
  20160 
  20161      masm.jump(&falseBranch);
  20162    }
  20163  }
  20164 
  20165  masm.bind(&trueBranch);
  20166  masm.move32(Imm32(1), output);
  20167  masm.jump(&done);
  20168 
  20169  masm.bind(&falseBranch);
  20170  masm.move32(Imm32(0), output);
  20171  masm.bind(&done);
  20172 }
  20173 
  20174 void CodeGenerator::visitGuardElementNotHole(LGuardElementNotHole* lir) {
  20175  Register elements = ToRegister(lir->elements());
  20176  const LAllocation* index = lir->index();
  20177 
  20178  auto source = ToAddressOrBaseObjectElementIndex(elements, index);
  20179 
  20180  Label testMagic;
  20181  source.match([&](const auto& source) {
  20182    masm.branchTestMagic(Assembler::Equal, source, &testMagic);
  20183  });
  20184  bailoutFrom(&testMagic, lir->snapshot());
  20185 }
  20186 
  20187 void CodeGenerator::visitInstanceOfO(LInstanceOfO* ins) {
  20188  Register protoReg = ToRegister(ins->rhs());
  20189  emitInstanceOf(ins, protoReg);
  20190 }
  20191 
  20192 void CodeGenerator::visitInstanceOfV(LInstanceOfV* ins) {
  20193  Register protoReg = ToRegister(ins->rhs());
  20194  emitInstanceOf(ins, protoReg);
  20195 }
  20196 
  20197 void CodeGenerator::emitInstanceOf(LInstruction* ins, Register protoReg) {
  20198  // This path implements fun_hasInstance when the function's prototype is
  20199  // known to be the object in protoReg
  20200 
  20201  Label done;
  20202  Register output = ToRegister(ins->getDef(0));
  20203 
  20204  // If the lhs is a primitive, the result is false.
  20205  Register objReg;
  20206  if (ins->isInstanceOfV()) {
  20207    Label isObject;
  20208    ValueOperand lhsValue = ToValue(ins->toInstanceOfV()->lhs());
  20209    masm.branchTestObject(Assembler::Equal, lhsValue, &isObject);
  20210    masm.mov(ImmWord(0), output);
  20211    masm.jump(&done);
  20212    masm.bind(&isObject);
  20213    objReg = masm.extractObject(lhsValue, output);
  20214  } else {
  20215    objReg = ToRegister(ins->toInstanceOfO()->lhs());
  20216  }
  20217 
  20218  // Crawl the lhs's prototype chain in a loop to search for prototypeObject.
  20219  // This follows the main loop of js::IsPrototypeOf, though additionally breaks
  20220  // out of the loop on Proxy::LazyProto.
  20221 
  20222  // Load the lhs's prototype.
  20223  masm.loadObjProto(objReg, output);
  20224 
  20225  Label testLazy;
  20226  {
  20227    Label loopPrototypeChain;
  20228    masm.bind(&loopPrototypeChain);
  20229 
  20230    // Test for the target prototype object.
  20231    Label notPrototypeObject;
  20232    masm.branchPtr(Assembler::NotEqual, output, protoReg, &notPrototypeObject);
  20233    masm.mov(ImmWord(1), output);
  20234    masm.jump(&done);
  20235    masm.bind(&notPrototypeObject);
  20236 
  20237    MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
  20238 
  20239    // Test for nullptr or Proxy::LazyProto
  20240    masm.branchPtr(Assembler::BelowOrEqual, output, ImmWord(1), &testLazy);
  20241 
  20242    // Load the current object's prototype.
  20243    masm.loadObjProto(output, output);
  20244 
  20245    masm.jump(&loopPrototypeChain);
  20246  }
  20247 
  20248  // Make a VM call if an object with a lazy proto was found on the prototype
  20249  // chain. This currently occurs only for cross compartment wrappers, which
  20250  // we do not expect to be compared with non-wrapper functions from this
  20251  // compartment. Otherwise, we stopped on a nullptr prototype and the output
  20252  // register is already correct.
  20253 
  20254  using Fn = bool (*)(JSContext*, HandleObject, JSObject*, bool*);
  20255  auto* ool = oolCallVM<Fn, IsPrototypeOf>(ins, ArgList(protoReg, objReg),
  20256                                           StoreRegisterTo(output));
  20257 
  20258  // Regenerate the original lhs object for the VM call.
  20259  Label regenerate, *lazyEntry;
  20260  if (objReg != output) {
  20261    lazyEntry = ool->entry();
  20262  } else {
  20263    masm.bind(&regenerate);
  20264    lazyEntry = &regenerate;
  20265    if (ins->isInstanceOfV()) {
  20266      ValueOperand lhsValue = ToValue(ins->toInstanceOfV()->lhs());
  20267      objReg = masm.extractObject(lhsValue, output);
  20268    } else {
  20269      objReg = ToRegister(ins->toInstanceOfO()->lhs());
  20270    }
  20271    MOZ_ASSERT(objReg == output);
  20272    masm.jump(ool->entry());
  20273  }
  20274 
  20275  masm.bind(&testLazy);
  20276  masm.branchPtr(Assembler::Equal, output, ImmWord(1), lazyEntry);
  20277 
  20278  masm.bind(&done);
  20279  masm.bind(ool->rejoin());
  20280 }
  20281 
  20282 void CodeGenerator::visitInstanceOfCache(LInstanceOfCache* ins) {
  20283  // The Lowering ensures that RHS is an object, and that LHS is a value.
  20284  LiveRegisterSet liveRegs = ins->safepoint()->liveRegs();
  20285  TypedOrValueRegister lhs = TypedOrValueRegister(ToValue(ins->obj()));
  20286  Register rhs = ToRegister(ins->proto());
  20287  Register output = ToRegister(ins->output());
  20288 
  20289  IonInstanceOfIC ic(liveRegs, lhs, rhs, output);
  20290  addIC(ins, allocateIC(ic));
  20291 }
  20292 
  20293 void CodeGenerator::visitGetDOMProperty(LGetDOMProperty* ins) {
  20294  const Register JSContextReg = ToRegister(ins->temp0());
  20295  const Register ObjectReg = ToRegister(ins->object());
  20296  const Register PrivateReg = ToRegister(ins->temp1());
  20297  const Register ValueReg = ToRegister(ins->temp2());
  20298 
  20299  Label haveValue;
  20300  if (ins->mir()->valueMayBeInSlot()) {
  20301    size_t slot = ins->mir()->domMemberSlotIndex();
  20302    // It's a bit annoying to redo these slot calculations, which duplcate
  20303    // LSlots and a few other things like that, but I'm not sure there's a
  20304    // way to reuse those here.
  20305    //
  20306    // If this ever gets fixed to work with proxies (by not assuming that
  20307    // reserved slot indices, which is what domMemberSlotIndex() returns,
  20308    // match fixed slot indices), we can reenable MGetDOMProperty for
  20309    // proxies in IonBuilder.
  20310    if (slot < NativeObject::MAX_FIXED_SLOTS) {
  20311      masm.loadValue(Address(ObjectReg, NativeObject::getFixedSlotOffset(slot)),
  20312                     JSReturnOperand);
  20313    } else {
  20314      // It's a dynamic slot.
  20315      slot -= NativeObject::MAX_FIXED_SLOTS;
  20316      // Use PrivateReg as a scratch register for the slots pointer.
  20317      masm.loadPtr(Address(ObjectReg, NativeObject::offsetOfSlots()),
  20318                   PrivateReg);
  20319      masm.loadValue(Address(PrivateReg, slot * sizeof(js::Value)),
  20320                     JSReturnOperand);
  20321    }
  20322    masm.branchTestUndefined(Assembler::NotEqual, JSReturnOperand, &haveValue);
  20323  }
  20324 
  20325  DebugOnly<uint32_t> initialStack = masm.framePushed();
  20326 
  20327  masm.checkStackAlignment();
  20328 
  20329  // Make space for the outparam.  Pre-initialize it to UndefinedValue so we
  20330  // can trace it at GC time.
  20331  masm.Push(UndefinedValue());
  20332  // We pass the pointer to our out param as an instance of
  20333  // JSJitGetterCallArgs, since on the binary level it's the same thing.
  20334  static_assert(sizeof(JSJitGetterCallArgs) == sizeof(Value*));
  20335  masm.moveStackPtrTo(ValueReg);
  20336 
  20337  masm.Push(ObjectReg);
  20338 
  20339  LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
  20340 
  20341  // Rooting will happen at GC time.
  20342  masm.moveStackPtrTo(ObjectReg);
  20343 
  20344  Realm* getterRealm = ins->mir()->getterRealm();
  20345  if (gen->realm->realmPtr() != getterRealm) {
  20346    // We use JSContextReg as scratch register here.
  20347    masm.switchToRealm(getterRealm, JSContextReg);
  20348  }
  20349 
  20350  uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
  20351  masm.loadJSContext(JSContextReg);
  20352  masm.enterFakeExitFrame(JSContextReg, JSContextReg,
  20353                          ExitFrameType::IonDOMGetter);
  20354 
  20355  markSafepointAt(safepointOffset, ins);
  20356 
  20357  masm.setupAlignedABICall();
  20358  masm.loadJSContext(JSContextReg);
  20359  masm.passABIArg(JSContextReg);
  20360  masm.passABIArg(ObjectReg);
  20361  masm.passABIArg(PrivateReg);
  20362  masm.passABIArg(ValueReg);
  20363  ensureOsiSpace();
  20364  masm.callWithABI(DynamicFunction<JSJitGetterOp>(ins->mir()->fun()),
  20365                   ABIType::General,
  20366                   CheckUnsafeCallWithABI::DontCheckHasExitFrame);
  20367 
  20368  if (ins->mir()->isInfallible()) {
  20369    masm.loadValue(Address(masm.getStackPointer(),
  20370                           IonDOMExitFrameLayout::offsetOfResult()),
  20371                   JSReturnOperand);
  20372  } else {
  20373    masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
  20374 
  20375    masm.loadValue(Address(masm.getStackPointer(),
  20376                           IonDOMExitFrameLayout::offsetOfResult()),
  20377                   JSReturnOperand);
  20378  }
  20379 
  20380  // Switch back to the current realm if needed. Note: if the getter threw an
  20381  // exception, the exception handler will do this.
  20382  if (gen->realm->realmPtr() != getterRealm) {
  20383    static_assert(!JSReturnOperand.aliases(ReturnReg),
  20384                  "Clobbering ReturnReg should not affect the return value");
  20385    masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
  20386  }
  20387 
  20388  // Until C++ code is instrumented against Spectre, prevent speculative
  20389  // execution from returning any private data.
  20390  if (JitOptions.spectreJitToCxxCalls && ins->mir()->hasLiveDefUses()) {
  20391    masm.speculationBarrier();
  20392  }
  20393 
  20394  masm.adjustStack(IonDOMExitFrameLayout::Size());
  20395 
  20396  masm.bind(&haveValue);
  20397 
  20398  MOZ_ASSERT(masm.framePushed() == initialStack);
  20399 }
  20400 
  20401 void CodeGenerator::visitGetDOMMemberV(LGetDOMMemberV* ins) {
  20402  // It's simpler to duplicate visitLoadFixedSlotV here than it is to try to
  20403  // use an LLoadFixedSlotV or some subclass of it for this case: that would
  20404  // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
  20405  // we'd have to duplicate a bunch of stuff we now get for free from
  20406  // MGetDOMProperty.
  20407  //
  20408  // If this ever gets fixed to work with proxies (by not assuming that
  20409  // reserved slot indices, which is what domMemberSlotIndex() returns,
  20410  // match fixed slot indices), we can reenable MGetDOMMember for
  20411  // proxies in IonBuilder.
  20412  Register object = ToRegister(ins->object());
  20413  size_t slot = ins->mir()->domMemberSlotIndex();
  20414  ValueOperand result = ToOutValue(ins);
  20415 
  20416  masm.loadValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
  20417                 result);
  20418 }
  20419 
  20420 void CodeGenerator::visitGetDOMMemberT(LGetDOMMemberT* ins) {
  20421  // It's simpler to duplicate visitLoadFixedSlotT here than it is to try to
  20422  // use an LLoadFixedSlotT or some subclass of it for this case: that would
  20423  // require us to have MGetDOMMember inherit from MLoadFixedSlot, and then
  20424  // we'd have to duplicate a bunch of stuff we now get for free from
  20425  // MGetDOMProperty.
  20426  //
  20427  // If this ever gets fixed to work with proxies (by not assuming that
  20428  // reserved slot indices, which is what domMemberSlotIndex() returns,
  20429  // match fixed slot indices), we can reenable MGetDOMMember for
  20430  // proxies in IonBuilder.
  20431  Register object = ToRegister(ins->object());
  20432  size_t slot = ins->mir()->domMemberSlotIndex();
  20433  AnyRegister result = ToAnyRegister(ins->output());
  20434  MIRType type = ins->mir()->type();
  20435 
  20436  masm.loadUnboxedValue(Address(object, NativeObject::getFixedSlotOffset(slot)),
  20437                        type, result);
  20438 }
  20439 
  20440 void CodeGenerator::visitSetDOMProperty(LSetDOMProperty* ins) {
  20441  const Register JSContextReg = ToRegister(ins->temp0());
  20442  const Register ObjectReg = ToRegister(ins->object());
  20443  const Register PrivateReg = ToRegister(ins->temp1());
  20444  const Register ValueReg = ToRegister(ins->temp2());
  20445 
  20446  DebugOnly<uint32_t> initialStack = masm.framePushed();
  20447 
  20448  masm.checkStackAlignment();
  20449 
  20450  // Push the argument. Rooting will happen at GC time.
  20451  ValueOperand argVal = ToValue(ins->value());
  20452  masm.Push(argVal);
  20453  // We pass the pointer to our out param as an instance of
  20454  // JSJitGetterCallArgs, since on the binary level it's the same thing.
  20455  static_assert(sizeof(JSJitSetterCallArgs) == sizeof(Value*));
  20456  masm.moveStackPtrTo(ValueReg);
  20457 
  20458  masm.Push(ObjectReg);
  20459 
  20460  LoadDOMPrivate(masm, ObjectReg, PrivateReg, ins->mir()->objectKind());
  20461 
  20462  // Rooting will happen at GC time.
  20463  masm.moveStackPtrTo(ObjectReg);
  20464 
  20465  Realm* setterRealm = ins->mir()->setterRealm();
  20466  if (gen->realm->realmPtr() != setterRealm) {
  20467    // We use JSContextReg as scratch register here.
  20468    masm.switchToRealm(setterRealm, JSContextReg);
  20469  }
  20470 
  20471  uint32_t safepointOffset = masm.buildFakeExitFrame(JSContextReg);
  20472  masm.loadJSContext(JSContextReg);
  20473  masm.enterFakeExitFrame(JSContextReg, JSContextReg,
  20474                          ExitFrameType::IonDOMSetter);
  20475 
  20476  markSafepointAt(safepointOffset, ins);
  20477 
  20478  masm.setupAlignedABICall();
  20479  masm.loadJSContext(JSContextReg);
  20480  masm.passABIArg(JSContextReg);
  20481  masm.passABIArg(ObjectReg);
  20482  masm.passABIArg(PrivateReg);
  20483  masm.passABIArg(ValueReg);
  20484  ensureOsiSpace();
  20485  masm.callWithABI(DynamicFunction<JSJitSetterOp>(ins->mir()->fun()),
  20486                   ABIType::General,
  20487                   CheckUnsafeCallWithABI::DontCheckHasExitFrame);
  20488 
  20489  masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
  20490 
  20491  // Switch back to the current realm if needed. Note: if the setter threw an
  20492  // exception, the exception handler will do this.
  20493  if (gen->realm->realmPtr() != setterRealm) {
  20494    masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
  20495  }
  20496 
  20497  masm.adjustStack(IonDOMExitFrameLayout::Size());
  20498 
  20499  MOZ_ASSERT(masm.framePushed() == initialStack);
  20500 }
  20501 
  20502 void CodeGenerator::visitLoadDOMExpandoValue(LLoadDOMExpandoValue* ins) {
  20503  Register proxy = ToRegister(ins->proxy());
  20504  ValueOperand out = ToOutValue(ins);
  20505 
  20506  masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
  20507               out.scratchReg());
  20508  masm.loadValue(Address(out.scratchReg(),
  20509                         js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
  20510                 out);
  20511 }
  20512 
  20513 void CodeGenerator::visitLoadDOMExpandoValueGuardGeneration(
  20514    LLoadDOMExpandoValueGuardGeneration* ins) {
  20515  Register proxy = ToRegister(ins->proxy());
  20516  ValueOperand out = ToOutValue(ins);
  20517 
  20518  Label bail;
  20519  masm.loadDOMExpandoValueGuardGeneration(proxy, out,
  20520                                          ins->mir()->expandoAndGeneration(),
  20521                                          ins->mir()->generation(), &bail);
  20522  bailoutFrom(&bail, ins->snapshot());
  20523 }
  20524 
  20525 void CodeGenerator::visitLoadDOMExpandoValueIgnoreGeneration(
  20526    LLoadDOMExpandoValueIgnoreGeneration* ins) {
  20527  Register proxy = ToRegister(ins->proxy());
  20528  ValueOperand out = ToOutValue(ins);
  20529 
  20530  masm.loadPtr(Address(proxy, ProxyObject::offsetOfReservedSlots()),
  20531               out.scratchReg());
  20532 
  20533  // Load the ExpandoAndGeneration* from the PrivateValue.
  20534  masm.loadPrivate(
  20535      Address(out.scratchReg(),
  20536              js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
  20537      out.scratchReg());
  20538 
  20539  // Load expandoAndGeneration->expando into the output Value register.
  20540  masm.loadValue(
  20541      Address(out.scratchReg(), ExpandoAndGeneration::offsetOfExpando()), out);
  20542 }
  20543 
  20544 void CodeGenerator::visitGuardDOMExpandoMissingOrGuardShape(
  20545    LGuardDOMExpandoMissingOrGuardShape* ins) {
  20546  Register temp = ToRegister(ins->temp0());
  20547  ValueOperand input = ToValue(ins->expando());
  20548 
  20549  Label done;
  20550  masm.branchTestUndefined(Assembler::Equal, input, &done);
  20551 
  20552  masm.debugAssertIsObject(input);
  20553  masm.unboxObject(input, temp);
  20554  // The expando object is not used in this case, so we don't need Spectre
  20555  // mitigations.
  20556  Label bail;
  20557  masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, temp,
  20558                                              ins->mir()->shape(), &bail);
  20559  bailoutFrom(&bail, ins->snapshot());
  20560 
  20561  masm.bind(&done);
  20562 }
  20563 
  20564 void CodeGenerator::emitIsCallableOOL(Register object, Register output) {
  20565  saveVolatile(output);
  20566  using Fn = bool (*)(JSObject* obj);
  20567  masm.setupAlignedABICall();
  20568  masm.passABIArg(object);
  20569  masm.callWithABI<Fn, ObjectIsCallable>();
  20570  masm.storeCallBoolResult(output);
  20571  restoreVolatile(output);
  20572 }
  20573 
  20574 void CodeGenerator::visitIsCallableO(LIsCallableO* ins) {
  20575  Register object = ToRegister(ins->object());
  20576  Register output = ToRegister(ins->output());
  20577 
  20578  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  20579    emitIsCallableOOL(object, output);
  20580    masm.jump(ool.rejoin());
  20581  });
  20582  addOutOfLineCode(ool, ins->mir());
  20583 
  20584  masm.isCallable(object, output, ool->entry());
  20585 
  20586  masm.bind(ool->rejoin());
  20587 }
  20588 
  20589 void CodeGenerator::visitIsCallableV(LIsCallableV* ins) {
  20590  ValueOperand val = ToValue(ins->object());
  20591  Register output = ToRegister(ins->output());
  20592  Register temp = ToRegister(ins->temp0());
  20593 
  20594  Label notObject;
  20595  masm.fallibleUnboxObject(val, temp, &notObject);
  20596 
  20597  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  20598    emitIsCallableOOL(temp, output);
  20599    masm.jump(ool.rejoin());
  20600  });
  20601  addOutOfLineCode(ool, ins->mir());
  20602 
  20603  masm.isCallable(temp, output, ool->entry());
  20604  masm.jump(ool->rejoin());
  20605 
  20606  masm.bind(&notObject);
  20607  masm.move32(Imm32(0), output);
  20608 
  20609  masm.bind(ool->rejoin());
  20610 }
  20611 
  20612 void CodeGenerator::visitIsConstructor(LIsConstructor* ins) {
  20613  Register object = ToRegister(ins->object());
  20614  Register output = ToRegister(ins->output());
  20615 
  20616  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  20617    saveVolatile(output);
  20618    using Fn = bool (*)(JSObject* obj);
  20619    masm.setupAlignedABICall();
  20620    masm.passABIArg(object);
  20621    masm.callWithABI<Fn, ObjectIsConstructor>();
  20622    masm.storeCallBoolResult(output);
  20623    restoreVolatile(output);
  20624    masm.jump(ool.rejoin());
  20625  });
  20626  addOutOfLineCode(ool, ins->mir());
  20627 
  20628  masm.isConstructor(object, output, ool->entry());
  20629 
  20630  masm.bind(ool->rejoin());
  20631 }
  20632 
  20633 void CodeGenerator::visitIsCrossRealmArrayConstructor(
  20634    LIsCrossRealmArrayConstructor* ins) {
  20635  Register object = ToRegister(ins->object());
  20636  Register output = ToRegister(ins->output());
  20637 
  20638  masm.setIsCrossRealmArrayConstructor(object, output);
  20639 }
  20640 
  20641 static void EmitObjectIsArray(MacroAssembler& masm, OutOfLineCode* ool,
  20642                              Register obj, Register output,
  20643                              Label* notArray = nullptr) {
  20644  masm.loadObjClassUnsafe(obj, output);
  20645 
  20646  Label isArray;
  20647  masm.branchPtr(Assembler::Equal, output, ImmPtr(&ArrayObject::class_),
  20648                 &isArray);
  20649 
  20650  // Branch to OOL path if it's a proxy.
  20651  masm.branchTestClassIsProxy(true, output, ool->entry());
  20652 
  20653  if (notArray) {
  20654    masm.bind(notArray);
  20655  }
  20656  masm.move32(Imm32(0), output);
  20657  masm.jump(ool->rejoin());
  20658 
  20659  masm.bind(&isArray);
  20660  masm.move32(Imm32(1), output);
  20661 
  20662  masm.bind(ool->rejoin());
  20663 }
  20664 
  20665 void CodeGenerator::visitIsArrayO(LIsArrayO* lir) {
  20666  Register object = ToRegister(lir->object());
  20667  Register output = ToRegister(lir->output());
  20668 
  20669  using Fn = bool (*)(JSContext*, HandleObject, bool*);
  20670  OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
  20671      lir, ArgList(object), StoreRegisterTo(output));
  20672  EmitObjectIsArray(masm, ool, object, output);
  20673 }
  20674 
  20675 void CodeGenerator::visitIsArrayV(LIsArrayV* lir) {
  20676  ValueOperand val = ToValue(lir->value());
  20677  Register output = ToRegister(lir->output());
  20678  Register temp = ToRegister(lir->temp0());
  20679 
  20680  Label notArray;
  20681  masm.fallibleUnboxObject(val, temp, &notArray);
  20682 
  20683  using Fn = bool (*)(JSContext*, HandleObject, bool*);
  20684  OutOfLineCode* ool = oolCallVM<Fn, js::IsArrayFromJit>(
  20685      lir, ArgList(temp), StoreRegisterTo(output));
  20686  EmitObjectIsArray(masm, ool, temp, output, &notArray);
  20687 }
  20688 
  20689 void CodeGenerator::visitIsTypedArray(LIsTypedArray* lir) {
  20690  Register object = ToRegister(lir->object());
  20691  Register output = ToRegister(lir->output());
  20692 
  20693  OutOfLineCode* ool = nullptr;
  20694  if (lir->mir()->isPossiblyWrapped()) {
  20695    using Fn = bool (*)(JSContext*, JSObject*, bool*);
  20696    ool = oolCallVM<Fn, jit::IsPossiblyWrappedTypedArray>(
  20697        lir, ArgList(object), StoreRegisterTo(output));
  20698  }
  20699 
  20700  Label notTypedArray;
  20701  Label done;
  20702 
  20703  masm.loadObjClassUnsafe(object, output);
  20704  masm.branchIfClassIsNotTypedArray(output, &notTypedArray);
  20705 
  20706  masm.move32(Imm32(1), output);
  20707  masm.jump(&done);
  20708  masm.bind(&notTypedArray);
  20709  if (ool) {
  20710    Label notProxy;
  20711    masm.branchTestClassIsProxy(false, output, &notProxy);
  20712    masm.branchTestProxyHandlerFamily(Assembler::Equal, object, output,
  20713                                      &Wrapper::family, ool->entry());
  20714    masm.bind(&notProxy);
  20715  }
  20716  masm.move32(Imm32(0), output);
  20717  masm.bind(&done);
  20718  if (ool) {
  20719    masm.bind(ool->rejoin());
  20720  }
  20721 }
  20722 
  20723 void CodeGenerator::visitIsObject(LIsObject* ins) {
  20724  Register output = ToRegister(ins->output());
  20725  ValueOperand value = ToValue(ins->object());
  20726  masm.testObjectSet(Assembler::Equal, value, output);
  20727 }
  20728 
  20729 void CodeGenerator::visitIsObjectAndBranch(LIsObjectAndBranch* ins) {
  20730  ValueOperand value = ToValue(ins->input());
  20731 
  20732  MBasicBlock* ifTrue = ins->ifTrue();
  20733  MBasicBlock* ifFalse = ins->ifFalse();
  20734 
  20735  if (isNextBlock(ifFalse->lir())) {
  20736    masm.branchTestObject(Assembler::Equal, value,
  20737                          getJumpLabelForBranch(ifTrue));
  20738  } else {
  20739    masm.branchTestObject(Assembler::NotEqual, value,
  20740                          getJumpLabelForBranch(ifFalse));
  20741    jumpToBlock(ifTrue);
  20742  }
  20743 }
  20744 
  20745 void CodeGenerator::visitIsNullOrUndefined(LIsNullOrUndefined* ins) {
  20746  Register output = ToRegister(ins->output());
  20747  ValueOperand value = ToValue(ins->value());
  20748 
  20749  Label isNotNull, done;
  20750  masm.branchTestNull(Assembler::NotEqual, value, &isNotNull);
  20751 
  20752  masm.move32(Imm32(1), output);
  20753  masm.jump(&done);
  20754 
  20755  masm.bind(&isNotNull);
  20756  masm.testUndefinedSet(Assembler::Equal, value, output);
  20757 
  20758  masm.bind(&done);
  20759 }
  20760 
  20761 void CodeGenerator::visitIsNullOrUndefinedAndBranch(
  20762    LIsNullOrUndefinedAndBranch* ins) {
  20763  Label* ifTrue = getJumpLabelForBranch(ins->ifTrue());
  20764  Label* ifFalse = getJumpLabelForBranch(ins->ifFalse());
  20765  ValueOperand value = ToValue(ins->input());
  20766 
  20767  ScratchTagScope tag(masm, value);
  20768  masm.splitTagForTest(value, tag);
  20769 
  20770  masm.branchTestNull(Assembler::Equal, tag, ifTrue);
  20771  masm.branchTestUndefined(Assembler::Equal, tag, ifTrue);
  20772 
  20773  if (!isNextBlock(ins->ifFalse()->lir())) {
  20774    masm.jump(ifFalse);
  20775  }
  20776 }
  20777 
  20778 void CodeGenerator::visitHasClass(LHasClass* ins) {
  20779  Register lhs = ToRegister(ins->lhs());
  20780  Register output = ToRegister(ins->output());
  20781 
  20782  masm.loadObjClassUnsafe(lhs, output);
  20783  masm.cmpPtrSet(Assembler::Equal, output, ImmPtr(ins->mir()->getClass()),
  20784                 output);
  20785 }
  20786 
  20787 void CodeGenerator::visitHasShape(LHasShape* ins) {
  20788  Register obj = ToRegister(ins->object());
  20789  Register output = ToRegister(ins->output());
  20790 
  20791  // Note: no Spectre mitigations are needed here because this shape check only
  20792  // affects correctness.
  20793  masm.loadObjShapeUnsafe(obj, output);
  20794  masm.cmpPtrSet(Assembler::Equal, output, ImmGCPtr(ins->mir()->shape()),
  20795                 output);
  20796 }
  20797 
  20798 void CodeGenerator::visitGuardToClass(LGuardToClass* ins) {
  20799  Register lhs = ToRegister(ins->lhs());
  20800  Register temp = ToRegister(ins->temp0());
  20801 
  20802  // branchTestObjClass may zero the object register on speculative paths
  20803  // (we should have a defineReuseInput allocation in this case).
  20804  Register spectreRegToZero = lhs;
  20805 
  20806  Label notEqual;
  20807 
  20808  masm.branchTestObjClass(Assembler::NotEqual, lhs, ins->mir()->getClass(),
  20809                          temp, spectreRegToZero, &notEqual);
  20810 
  20811  // Can't return null-return here, so bail.
  20812  bailoutFrom(&notEqual, ins->snapshot());
  20813 }
  20814 
  20815 void CodeGenerator::visitGuardToFunction(LGuardToFunction* ins) {
  20816  Register lhs = ToRegister(ins->lhs());
  20817  Register temp = ToRegister(ins->temp0());
  20818 
  20819  // branchTestObjClass may zero the object register on speculative paths
  20820  // (we should have a defineReuseInput allocation in this case).
  20821  Register spectreRegToZero = lhs;
  20822 
  20823  Label notEqual;
  20824 
  20825  masm.branchTestObjIsFunction(Assembler::NotEqual, lhs, temp, spectreRegToZero,
  20826                               &notEqual);
  20827 
  20828  // Can't return null-return here, so bail.
  20829  bailoutFrom(&notEqual, ins->snapshot());
  20830 }
  20831 
  20832 void CodeGenerator::visitObjectClassToString(LObjectClassToString* lir) {
  20833  Register obj = ToRegister(lir->object());
  20834  Register temp = ToRegister(lir->temp0());
  20835 
  20836  using Fn = JSString* (*)(JSContext*, JSObject*);
  20837  masm.setupAlignedABICall();
  20838  masm.loadJSContext(temp);
  20839  masm.passABIArg(temp);
  20840  masm.passABIArg(obj);
  20841  masm.callWithABI<Fn, js::ObjectClassToString>();
  20842 
  20843  bailoutCmpPtr(Assembler::Equal, ReturnReg, ImmWord(0), lir->snapshot());
  20844 }
  20845 
  20846 void CodeGenerator::visitWasmParameter(LWasmParameter* lir) {}
  20847 
  20848 void CodeGenerator::visitWasmParameterI64(LWasmParameterI64* lir) {}
  20849 
  20850 void CodeGenerator::visitWasmReturn(LWasmReturn* lir) {
  20851  //  Don't emit a jump to the return label if this is the last block.
  20852  if (current->mir() != *gen->graph().poBegin() || current->isOutOfLine()) {
  20853    masm.jump(&returnLabel_);
  20854  }
  20855 }
  20856 
  20857 void CodeGenerator::visitWasmReturnI64(LWasmReturnI64* lir) {
  20858  // Don't emit a jump to the return label if this is the last block.
  20859  if (current->mir() != *gen->graph().poBegin() || current->isOutOfLine()) {
  20860    masm.jump(&returnLabel_);
  20861  }
  20862 }
  20863 
  20864 void CodeGenerator::visitWasmReturnVoid(LWasmReturnVoid* lir) {
  20865  // Don't emit a jump to the return label if this is the last block.
  20866  if (current->mir() != *gen->graph().poBegin() || current->isOutOfLine()) {
  20867    masm.jump(&returnLabel_);
  20868  }
  20869 }
  20870 
  20871 void CodeGenerator::emitAssertRangeI(MIRType type, const Range* r,
  20872                                     Register input) {
  20873  // Check the lower bound.
  20874  if (r->hasInt32LowerBound() && r->lower() > INT32_MIN) {
  20875    Label success;
  20876    if (type == MIRType::Int32 || type == MIRType::Boolean) {
  20877      masm.branch32(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
  20878                    &success);
  20879    } else {
  20880      MOZ_ASSERT(type == MIRType::IntPtr);
  20881      masm.branchPtr(Assembler::GreaterThanOrEqual, input, Imm32(r->lower()),
  20882                     &success);
  20883    }
  20884    masm.assumeUnreachable(
  20885        "Integer input should be equal or higher than Lowerbound.");
  20886    masm.bind(&success);
  20887  }
  20888 
  20889  // Check the upper bound.
  20890  if (r->hasInt32UpperBound() && r->upper() < INT32_MAX) {
  20891    Label success;
  20892    if (type == MIRType::Int32 || type == MIRType::Boolean) {
  20893      masm.branch32(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
  20894                    &success);
  20895    } else {
  20896      MOZ_ASSERT(type == MIRType::IntPtr);
  20897      masm.branchPtr(Assembler::LessThanOrEqual, input, Imm32(r->upper()),
  20898                     &success);
  20899    }
  20900    masm.assumeUnreachable(
  20901        "Integer input should be lower or equal than Upperbound.");
  20902    masm.bind(&success);
  20903  }
  20904 
  20905  // For r->canHaveFractionalPart(), r->canBeNegativeZero(), and
  20906  // r->exponent(), there's nothing to check, because if we ended up in the
  20907  // integer range checking code, the value is already in an integer register
  20908  // in the integer range.
  20909 }
  20910 
  20911 void CodeGenerator::emitAssertRangeD(const Range* r, FloatRegister input,
  20912                                     FloatRegister temp) {
  20913  // Check the lower bound.
  20914  if (r->hasInt32LowerBound()) {
  20915    Label success;
  20916    masm.loadConstantDouble(r->lower(), temp);
  20917    if (r->canBeNaN()) {
  20918      masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
  20919    }
  20920    masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
  20921                      &success);
  20922    masm.assumeUnreachable(
  20923        "Double input should be equal or higher than Lowerbound.");
  20924    masm.bind(&success);
  20925  }
  20926  // Check the upper bound.
  20927  if (r->hasInt32UpperBound()) {
  20928    Label success;
  20929    masm.loadConstantDouble(r->upper(), temp);
  20930    if (r->canBeNaN()) {
  20931      masm.branchDouble(Assembler::DoubleUnordered, input, input, &success);
  20932    }
  20933    masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp, &success);
  20934    masm.assumeUnreachable(
  20935        "Double input should be lower or equal than Upperbound.");
  20936    masm.bind(&success);
  20937  }
  20938 
  20939  // This code does not yet check r->canHaveFractionalPart(). This would require
  20940  // new assembler interfaces to make rounding instructions available.
  20941 
  20942  if (!r->canBeNegativeZero()) {
  20943    Label success;
  20944 
  20945    // First, test for being equal to 0.0, which also includes -0.0.
  20946    masm.loadConstantDouble(0.0, temp);
  20947    masm.branchDouble(Assembler::DoubleNotEqualOrUnordered, input, temp,
  20948                      &success);
  20949 
  20950    // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0 is
  20951    // -Infinity instead of Infinity.
  20952    masm.loadConstantDouble(1.0, temp);
  20953    masm.divDouble(input, temp);
  20954    masm.branchDouble(Assembler::DoubleGreaterThan, temp, input, &success);
  20955 
  20956    masm.assumeUnreachable("Input shouldn't be negative zero.");
  20957 
  20958    masm.bind(&success);
  20959  }
  20960 
  20961  if (!r->hasInt32Bounds() && !r->canBeInfiniteOrNaN() &&
  20962      r->exponent() < FloatingPoint<double>::kExponentBias) {
  20963    // Check the bounds implied by the maximum exponent.
  20964    Label exponentLoOk;
  20965    masm.loadConstantDouble(pow(2.0, r->exponent() + 1), temp);
  20966    masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentLoOk);
  20967    masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, temp,
  20968                      &exponentLoOk);
  20969    masm.assumeUnreachable("Check for exponent failed.");
  20970    masm.bind(&exponentLoOk);
  20971 
  20972    Label exponentHiOk;
  20973    masm.loadConstantDouble(-pow(2.0, r->exponent() + 1), temp);
  20974    masm.branchDouble(Assembler::DoubleUnordered, input, input, &exponentHiOk);
  20975    masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input, temp,
  20976                      &exponentHiOk);
  20977    masm.assumeUnreachable("Check for exponent failed.");
  20978    masm.bind(&exponentHiOk);
  20979  } else if (!r->hasInt32Bounds() && !r->canBeNaN()) {
  20980    // If we think the value can't be NaN, check that it isn't.
  20981    Label notnan;
  20982    masm.branchDouble(Assembler::DoubleOrdered, input, input, &notnan);
  20983    masm.assumeUnreachable("Input shouldn't be NaN.");
  20984    masm.bind(&notnan);
  20985 
  20986    // If we think the value also can't be an infinity, check that it isn't.
  20987    if (!r->canBeInfiniteOrNaN()) {
  20988      Label notposinf;
  20989      masm.loadConstantDouble(PositiveInfinity<double>(), temp);
  20990      masm.branchDouble(Assembler::DoubleLessThan, input, temp, &notposinf);
  20991      masm.assumeUnreachable("Input shouldn't be +Inf.");
  20992      masm.bind(&notposinf);
  20993 
  20994      Label notneginf;
  20995      masm.loadConstantDouble(NegativeInfinity<double>(), temp);
  20996      masm.branchDouble(Assembler::DoubleGreaterThan, input, temp, &notneginf);
  20997      masm.assumeUnreachable("Input shouldn't be -Inf.");
  20998      masm.bind(&notneginf);
  20999    }
  21000  }
  21001 }
  21002 
  21003 void CodeGenerator::visitAssertClass(LAssertClass* ins) {
  21004  Register obj = ToRegister(ins->input());
  21005  Register temp = ToRegister(ins->temp0());
  21006 
  21007  Label success;
  21008  if (ins->mir()->getClass() == &FunctionClass) {
  21009    // Allow both possible function classes here.
  21010    masm.branchTestObjIsFunctionNoSpectreMitigations(Assembler::Equal, obj,
  21011                                                     temp, &success);
  21012  } else {
  21013    masm.branchTestObjClassNoSpectreMitigations(
  21014        Assembler::Equal, obj, ins->mir()->getClass(), temp, &success);
  21015  }
  21016  masm.assumeUnreachable("Wrong KnownClass during run-time");
  21017  masm.bind(&success);
  21018 }
  21019 
  21020 void CodeGenerator::visitAssertShape(LAssertShape* ins) {
  21021  Register obj = ToRegister(ins->input());
  21022 
  21023  Label success;
  21024  masm.branchTestObjShapeNoSpectreMitigations(Assembler::Equal, obj,
  21025                                              ins->mir()->shape(), &success);
  21026  masm.assumeUnreachable("Wrong Shape during run-time");
  21027  masm.bind(&success);
  21028 }
  21029 
  21030 void CodeGenerator::visitAssertRangeI(LAssertRangeI* ins) {
  21031  Register input = ToRegister(ins->input());
  21032  const Range* r = ins->mir()->assertedRange();
  21033 
  21034  emitAssertRangeI(ins->mir()->input()->type(), r, input);
  21035 }
  21036 
  21037 void CodeGenerator::visitAssertRangeD(LAssertRangeD* ins) {
  21038  FloatRegister input = ToFloatRegister(ins->input());
  21039  FloatRegister temp = ToFloatRegister(ins->temp0());
  21040  const Range* r = ins->mir()->assertedRange();
  21041 
  21042  emitAssertRangeD(r, input, temp);
  21043 }
  21044 
  21045 void CodeGenerator::visitAssertRangeF(LAssertRangeF* ins) {
  21046  FloatRegister input = ToFloatRegister(ins->input());
  21047  FloatRegister temp = ToFloatRegister(ins->temp0());
  21048  FloatRegister temp2 = ToFloatRegister(ins->temp1());
  21049 
  21050  const Range* r = ins->mir()->assertedRange();
  21051 
  21052  masm.convertFloat32ToDouble(input, temp);
  21053  emitAssertRangeD(r, temp, temp2);
  21054 }
  21055 
  21056 void CodeGenerator::visitAssertRangeV(LAssertRangeV* ins) {
  21057  const Range* r = ins->mir()->assertedRange();
  21058  ValueOperand value = ToValue(ins->input());
  21059  Label done;
  21060 
  21061  {
  21062    ScratchTagScope tag(masm, value);
  21063    masm.splitTagForTest(value, tag);
  21064 
  21065    {
  21066      Label isNotInt32;
  21067      masm.branchTestInt32(Assembler::NotEqual, tag, &isNotInt32);
  21068      {
  21069        ScratchTagScopeRelease _(&tag);
  21070        Register unboxInt32 = ToTempUnboxRegister(ins->temp0());
  21071        Register input = masm.extractInt32(value, unboxInt32);
  21072        emitAssertRangeI(MIRType::Int32, r, input);
  21073        masm.jump(&done);
  21074      }
  21075      masm.bind(&isNotInt32);
  21076    }
  21077 
  21078    {
  21079      Label isNotDouble;
  21080      masm.branchTestDouble(Assembler::NotEqual, tag, &isNotDouble);
  21081      {
  21082        ScratchTagScopeRelease _(&tag);
  21083        FloatRegister input = ToFloatRegister(ins->temp1());
  21084        FloatRegister temp = ToFloatRegister(ins->temp2());
  21085        masm.unboxDouble(value, input);
  21086        emitAssertRangeD(r, input, temp);
  21087        masm.jump(&done);
  21088      }
  21089      masm.bind(&isNotDouble);
  21090    }
  21091  }
  21092 
  21093  masm.assumeUnreachable("Incorrect range for Value.");
  21094  masm.bind(&done);
  21095 }
  21096 
  21097 void CodeGenerator::visitInterruptCheck(LInterruptCheck* lir) {
  21098  using Fn = bool (*)(JSContext*);
  21099  OutOfLineCode* ool =
  21100      oolCallVM<Fn, InterruptCheck>(lir, ArgList(), StoreNothing());
  21101 
  21102  const void* interruptAddr = gen->runtime->addressOfInterruptBits();
  21103  masm.branch32(Assembler::NotEqual, AbsoluteAddress(interruptAddr), Imm32(0),
  21104                ool->entry());
  21105  masm.bind(ool->rejoin());
  21106 }
  21107 
  21108 void CodeGenerator::visitWasmInterruptCheck(LWasmInterruptCheck* lir) {
  21109  MOZ_ASSERT(gen->compilingWasm());
  21110 
  21111  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  21112    emitResumableWasmTrapOOL(lir, masm.framePushed(),
  21113                             lir->mir()->trapSiteDesc(),
  21114                             wasm::Trap::CheckInterrupt);
  21115    masm.jump(ool.rejoin());
  21116  });
  21117  addOutOfLineCode(ool, lir->mir());
  21118  masm.branch32(
  21119      Assembler::NotEqual,
  21120      Address(ToRegister(lir->instance()), wasm::Instance::offsetOfInterrupt()),
  21121      Imm32(0), ool->entry());
  21122  masm.bind(ool->rejoin());
  21123 }
  21124 
  21125 void CodeGenerator::visitWasmTrap(LWasmTrap* lir) {
  21126  MOZ_ASSERT(gen->compilingWasm());
  21127  const MWasmTrap* mir = lir->mir();
  21128 
  21129  masm.wasmTrap(mir->trap(), mir->trapSiteDesc());
  21130 }
  21131 
  21132 void CodeGenerator::visitWasmRefAsNonNull(LWasmRefAsNonNull* lir) {
  21133  MOZ_ASSERT(gen->compilingWasm());
  21134  const MWasmRefAsNonNull* mir = lir->mir();
  21135  Label nonNull;
  21136  Register ref = ToRegister(lir->ref());
  21137 
  21138  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  21139    masm.wasmTrap(wasm::Trap::NullPointerDereference, mir->trapSiteDesc());
  21140  });
  21141  addOutOfLineCode(ool, mir);
  21142  masm.branchWasmAnyRefIsNull(true, ref, ool->entry());
  21143 }
  21144 
  21145 void CodeGenerator::visitWasmRefTestAbstract(LWasmRefTestAbstract* ins) {
  21146  MOZ_ASSERT(gen->compilingWasm());
  21147 
  21148  const MWasmRefTestAbstract* mir = ins->mir();
  21149  MOZ_ASSERT(!mir->destType().isTypeRef());
  21150 
  21151  Register ref = ToRegister(ins->ref());
  21152  Register superSTV = Register::Invalid();
  21153  Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
  21154  Register scratch2 = Register::Invalid();
  21155  Register result = ToRegister(ins->output());
  21156  Label onSuccess;
  21157  Label onFail;
  21158  Label join;
  21159  masm.branchWasmRefIsSubtype(ref, mir->ref()->wasmRefType(), mir->destType(),
  21160                              &onSuccess,
  21161                              /*onSuccess=*/true, /*signalNullChecks=*/false,
  21162                              superSTV, scratch1, scratch2);
  21163  masm.bind(&onFail);
  21164  masm.xor32(result, result);
  21165  masm.jump(&join);
  21166  masm.bind(&onSuccess);
  21167  masm.move32(Imm32(1), result);
  21168  masm.bind(&join);
  21169 }
  21170 
  21171 void CodeGenerator::visitWasmRefTestConcrete(LWasmRefTestConcrete* ins) {
  21172  MOZ_ASSERT(gen->compilingWasm());
  21173 
  21174  const MWasmRefTestConcrete* mir = ins->mir();
  21175  MOZ_ASSERT(mir->destType().isTypeRef());
  21176 
  21177  Register ref = ToRegister(ins->ref());
  21178  Register superSTV = ToRegister(ins->superSTV());
  21179  Register scratch1 = ToRegister(ins->temp0());
  21180  Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
  21181  Register result = ToRegister(ins->output());
  21182  Label onSuccess;
  21183  Label join;
  21184  masm.branchWasmRefIsSubtype(ref, mir->ref()->wasmRefType(), mir->destType(),
  21185                              &onSuccess,
  21186                              /*onSuccess=*/true, /*signalNullChecks=*/false,
  21187                              superSTV, scratch1, scratch2);
  21188  masm.move32(Imm32(0), result);
  21189  masm.jump(&join);
  21190  masm.bind(&onSuccess);
  21191  masm.move32(Imm32(1), result);
  21192  masm.bind(&join);
  21193 }
  21194 
  21195 void CodeGenerator::visitWasmRefTestAbstractAndBranch(
  21196    LWasmRefTestAbstractAndBranch* ins) {
  21197  MOZ_ASSERT(gen->compilingWasm());
  21198  Register ref = ToRegister(ins->ref());
  21199  Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
  21200  Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
  21201  Label* onFail = getJumpLabelForBranch(ins->ifFalse());
  21202  masm.branchWasmRefIsSubtype(ref, ins->sourceType(), ins->destType(),
  21203                              onSuccess, /*onSuccess=*/true,
  21204                              /*signalNullChecks=*/false, Register::Invalid(),
  21205                              scratch1, Register::Invalid());
  21206  masm.jump(onFail);
  21207 }
  21208 
  21209 void CodeGenerator::visitWasmRefTestConcreteAndBranch(
  21210    LWasmRefTestConcreteAndBranch* ins) {
  21211  MOZ_ASSERT(gen->compilingWasm());
  21212  Register ref = ToRegister(ins->ref());
  21213  Register superSTV = ToRegister(ins->superSTV());
  21214  Register scratch1 = ToRegister(ins->temp0());
  21215  Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
  21216  Label* onSuccess = getJumpLabelForBranch(ins->ifTrue());
  21217  Label* onFail = getJumpLabelForBranch(ins->ifFalse());
  21218  masm.branchWasmRefIsSubtype(
  21219      ref, ins->sourceType(), ins->destType(), onSuccess, /*onSuccess=*/true,
  21220      /*signalNullChecks=*/false, superSTV, scratch1, scratch2);
  21221  masm.jump(onFail);
  21222 }
  21223 
  21224 void CodeGenerator::visitWasmRefCastAbstract(LWasmRefCastAbstract* ins) {
  21225  MOZ_ASSERT(gen->compilingWasm());
  21226 
  21227  const MWasmRefCastAbstract* mir = ins->mir();
  21228  MOZ_ASSERT(!mir->destType().isTypeRef());
  21229 
  21230  Register ref = ToRegister(ins->ref());
  21231  Register superSTV = Register::Invalid();
  21232  Register scratch1 = ToTempRegisterOrInvalid(ins->temp0());
  21233  Register scratch2 = Register::Invalid();
  21234  MOZ_ASSERT(ref == ToRegister(ins->output()));
  21235  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  21236    masm.wasmTrap(wasm::Trap::BadCast, mir->trapSiteDesc());
  21237  });
  21238  addOutOfLineCode(ool, ins->mir());
  21239  FaultingCodeOffset fco = masm.branchWasmRefIsSubtype(
  21240      ref, mir->ref()->wasmRefType(), mir->destType(), ool->entry(),
  21241      /*onSuccess=*/false, /*signalNullChecks=*/true, superSTV, scratch1,
  21242      scratch2);
  21243  if (fco.isValid()) {
  21244    masm.append(wasm::Trap::BadCast, wasm::TrapMachineInsnForLoadWord(),
  21245                fco.get(), mir->trapSiteDesc());
  21246  }
  21247 }
  21248 
  21249 void CodeGenerator::visitWasmRefCastConcrete(LWasmRefCastConcrete* ins) {
  21250  MOZ_ASSERT(gen->compilingWasm());
  21251 
  21252  const MWasmRefCastConcrete* mir = ins->mir();
  21253  MOZ_ASSERT(mir->destType().isTypeRef());
  21254 
  21255  Register ref = ToRegister(ins->ref());
  21256  Register superSTV = ToRegister(ins->superSTV());
  21257  Register scratch1 = ToRegister(ins->temp0());
  21258  Register scratch2 = ToTempRegisterOrInvalid(ins->temp1());
  21259  MOZ_ASSERT(ref == ToRegister(ins->output()));
  21260  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  21261    masm.wasmTrap(wasm::Trap::BadCast, mir->trapSiteDesc());
  21262  });
  21263  addOutOfLineCode(ool, ins->mir());
  21264  FaultingCodeOffset fco = masm.branchWasmRefIsSubtype(
  21265      ref, mir->ref()->wasmRefType(), mir->destType(), ool->entry(),
  21266      /*onSuccess=*/false, /*signalNullChecks=*/true, superSTV, scratch1,
  21267      scratch2);
  21268  if (fco.isValid()) {
  21269    masm.append(wasm::Trap::BadCast, wasm::TrapMachineInsnForLoadWord(),
  21270                fco.get(), mir->trapSiteDesc());
  21271  }
  21272 }
  21273 
  21274 void CodeGenerator::callWasmStructAllocFun(
  21275    LInstruction* lir, wasm::SymbolicAddress fun, Register typeDefIndex,
  21276    Register allocSite, Register output,
  21277    const wasm::TrapSiteDesc& trapSiteDesc) {
  21278  MOZ_ASSERT(fun == wasm::SymbolicAddress::StructNewIL_true ||
  21279             fun == wasm::SymbolicAddress::StructNewIL_false ||
  21280             fun == wasm::SymbolicAddress::StructNewOOL_true ||
  21281             fun == wasm::SymbolicAddress::StructNewOOL_false);
  21282  MOZ_ASSERT(wasm::SASigStructNewIL_true.failureMode ==
  21283             wasm::FailureMode::FailOnNullPtr);
  21284  MOZ_ASSERT(wasm::SASigStructNewIL_false.failureMode ==
  21285             wasm::FailureMode::FailOnNullPtr);
  21286  MOZ_ASSERT(wasm::SASigStructNewOOL_true.failureMode ==
  21287             wasm::FailureMode::FailOnNullPtr);
  21288  MOZ_ASSERT(wasm::SASigStructNewOOL_false.failureMode ==
  21289             wasm::FailureMode::FailOnNullPtr);
  21290 
  21291  masm.Push(InstanceReg);
  21292  int32_t framePushedAfterInstance = masm.framePushed();
  21293  saveLive(lir);
  21294 
  21295  masm.setupWasmABICall(fun);
  21296  masm.passABIArg(InstanceReg);
  21297  masm.passABIArg(typeDefIndex);
  21298  masm.passABIArg(allocSite);
  21299  int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
  21300  CodeOffset offset =
  21301      masm.callWithABI(trapSiteDesc.bytecodeOffset, fun,
  21302                       mozilla::Some(instanceOffset), ABIType::General);
  21303  masm.storeCallPointerResult(output);
  21304 
  21305  markSafepointAt(offset.offset(), lir);
  21306  lir->safepoint()->setFramePushedAtStackMapBase(framePushedAfterInstance);
  21307  lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::CodegenCall);
  21308 
  21309  restoreLive(lir);
  21310  masm.Pop(InstanceReg);
  21311 #if JS_CODEGEN_ARM64
  21312  masm.syncStackPtr();
  21313 #endif
  21314 
  21315  masm.wasmTrapOnFailedInstanceCall(output, wasm::FailureMode::FailOnNullPtr,
  21316                                    wasm::Trap::ThrowReported, trapSiteDesc);
  21317 }
  21318 
  21319 void CodeGenerator::visitWasmNewStructObject(LWasmNewStructObject* lir) {
  21320  MOZ_ASSERT(gen->compilingWasm());
  21321 
  21322  MWasmNewStructObject* mir = lir->mir();
  21323  uint32_t typeDefIndex = wasmCodeMeta()->types->indexOf(mir->typeDef());
  21324 
  21325  Register allocSite = ToRegister(lir->allocSite());
  21326  Register output = ToRegister(lir->output());
  21327  Register temp = ToRegister(lir->temp0());
  21328 
  21329  if (mir->isOutline()) {
  21330    wasm::SymbolicAddress fun = mir->zeroFields()
  21331                                    ? wasm::SymbolicAddress::StructNewOOL_true
  21332                                    : wasm::SymbolicAddress::StructNewOOL_false;
  21333 
  21334    masm.move32(Imm32(typeDefIndex), temp);
  21335    callWasmStructAllocFun(lir, fun, temp, allocSite, output,
  21336                           mir->trapSiteDesc());
  21337  } else {
  21338    wasm::SymbolicAddress fun = mir->zeroFields()
  21339                                    ? wasm::SymbolicAddress::StructNewIL_true
  21340                                    : wasm::SymbolicAddress::StructNewIL_false;
  21341 
  21342    Register instance = ToRegister(lir->instance());
  21343    MOZ_ASSERT(instance == InstanceReg);
  21344 
  21345    auto* ool =
  21346        new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  21347          masm.move32(Imm32(typeDefIndex), temp);
  21348          callWasmStructAllocFun(lir, fun, temp, allocSite, output,
  21349                                 mir->trapSiteDesc());
  21350          masm.jump(ool.rejoin());
  21351        });
  21352    addOutOfLineCode(ool, lir->mir());
  21353 
  21354    size_t offsetOfTypeDefData = wasm::Instance::offsetInData(
  21355        wasmCodeMeta()->offsetOfTypeDefInstanceData(typeDefIndex));
  21356    masm.wasmNewStructObject(instance, output, allocSite, temp,
  21357                             offsetOfTypeDefData, ool->entry(),
  21358                             mir->allocKind(), mir->zeroFields());
  21359 
  21360    masm.bind(ool->rejoin());
  21361  }
  21362 }
  21363 
  21364 void CodeGenerator::callWasmArrayAllocFun(
  21365    LInstruction* lir, wasm::SymbolicAddress fun, Register numElements,
  21366    Register typeDefIndex, Register allocSite, Register output,
  21367    const wasm::TrapSiteDesc& trapSiteDesc) {
  21368  MOZ_ASSERT(fun == wasm::SymbolicAddress::ArrayNew_true ||
  21369             fun == wasm::SymbolicAddress::ArrayNew_false);
  21370  MOZ_ASSERT(wasm::SASigArrayNew_true.failureMode ==
  21371             wasm::FailureMode::FailOnNullPtr);
  21372  MOZ_ASSERT(wasm::SASigArrayNew_false.failureMode ==
  21373             wasm::FailureMode::FailOnNullPtr);
  21374 
  21375  masm.Push(InstanceReg);
  21376  int32_t framePushedAfterInstance = masm.framePushed();
  21377  saveLive(lir);
  21378 
  21379  masm.setupWasmABICall(fun);
  21380  masm.passABIArg(InstanceReg);
  21381  masm.passABIArg(numElements);
  21382  masm.passABIArg(typeDefIndex);
  21383  masm.passABIArg(allocSite);
  21384  int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
  21385  CodeOffset offset =
  21386      masm.callWithABI(trapSiteDesc.bytecodeOffset, fun,
  21387                       mozilla::Some(instanceOffset), ABIType::General);
  21388  masm.storeCallPointerResult(output);
  21389 
  21390  markSafepointAt(offset.offset(), lir);
  21391  lir->safepoint()->setFramePushedAtStackMapBase(framePushedAfterInstance);
  21392  lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::CodegenCall);
  21393 
  21394  restoreLive(lir);
  21395  masm.Pop(InstanceReg);
  21396 #if JS_CODEGEN_ARM64
  21397  masm.syncStackPtr();
  21398 #endif
  21399 
  21400  masm.wasmTrapOnFailedInstanceCall(output, wasm::FailureMode::FailOnNullPtr,
  21401                                    wasm::Trap::ThrowReported, trapSiteDesc);
  21402 }
  21403 
  21404 void CodeGenerator::visitWasmNewArrayObject(LWasmNewArrayObject* lir) {
  21405  MOZ_ASSERT(gen->compilingWasm());
  21406 
  21407  MWasmNewArrayObject* mir = lir->mir();
  21408  uint32_t typeDefIndex = wasmCodeMeta()->types->indexOf(mir->typeDef());
  21409 
  21410  Register allocSite = ToRegister(lir->allocSite());
  21411  Register output = ToRegister(lir->output());
  21412  Register temp0 = ToRegister(lir->temp0());
  21413  Register temp1 = ToRegister(lir->temp1());
  21414 
  21415  wasm::SymbolicAddress fun = mir->zeroFields()
  21416                                  ? wasm::SymbolicAddress::ArrayNew_true
  21417                                  : wasm::SymbolicAddress::ArrayNew_false;
  21418 
  21419  if (lir->numElements()->isConstant()) {
  21420    // numElements is constant, so we can do optimized code generation.
  21421    uint32_t numElements = lir->numElements()->toConstant()->toInt32();
  21422    CheckedUint32 storageBytes =
  21423        WasmArrayObject::calcStorageBytesChecked(mir->elemSize(), numElements);
  21424    if (!storageBytes.isValid() ||
  21425        storageBytes.value() > WasmArrayObject_MaxInlineBytes) {
  21426      // Too much array data to store inline. Immediately perform an instance
  21427      // call to handle the out-of-line storage (or the trap).
  21428      masm.move32(Imm32(typeDefIndex), temp0);
  21429      masm.move32(Imm32(numElements), temp1);
  21430      callWasmArrayAllocFun(lir, fun, temp1, temp0, allocSite, output,
  21431                            mir->trapSiteDesc());
  21432    } else {
  21433      // storageBytes is small enough to be stored inline in WasmArrayObject.
  21434      // Attempt a nursery allocation and fall back to an instance call if it
  21435      // fails.
  21436      Register instance = ToRegister(lir->instance());
  21437      MOZ_ASSERT(instance == InstanceReg);
  21438 
  21439      auto* ool =
  21440          new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  21441            masm.move32(Imm32(typeDefIndex), temp0);
  21442            masm.move32(Imm32(numElements), temp1);
  21443            callWasmArrayAllocFun(lir, fun, temp1, temp0, allocSite, output,
  21444                                  mir->trapSiteDesc());
  21445            masm.jump(ool.rejoin());
  21446          });
  21447      addOutOfLineCode(ool, lir->mir());
  21448 
  21449      size_t offsetOfTypeDefData = wasm::Instance::offsetInData(
  21450          wasmCodeMeta()->offsetOfTypeDefInstanceData(typeDefIndex));
  21451      masm.wasmNewArrayObjectFixed(
  21452          instance, output, allocSite, temp0, temp1, offsetOfTypeDefData,
  21453          ool->entry(), numElements, storageBytes.value(), mir->zeroFields());
  21454 
  21455      masm.bind(ool->rejoin());
  21456    }
  21457  } else {
  21458    // numElements is dynamic. Attempt a dynamic inline-storage nursery
  21459    // allocation and fall back to an instance call if it fails.
  21460    Register instance = ToRegister(lir->instance());
  21461    MOZ_ASSERT(instance == InstanceReg);
  21462    Register numElements = ToRegister(lir->numElements());
  21463 
  21464    auto* ool =
  21465        new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  21466          masm.move32(Imm32(typeDefIndex), temp0);
  21467          callWasmArrayAllocFun(lir, fun, numElements, temp0, allocSite, output,
  21468                                mir->trapSiteDesc());
  21469          masm.jump(ool.rejoin());
  21470        });
  21471    addOutOfLineCode(ool, lir->mir());
  21472 
  21473    size_t offsetOfTypeDefData = wasm::Instance::offsetInData(
  21474        wasmCodeMeta()->offsetOfTypeDefInstanceData(typeDefIndex));
  21475    masm.wasmNewArrayObject(instance, output, numElements, allocSite, temp1,
  21476                            offsetOfTypeDefData, ool->entry(), mir->elemSize(),
  21477                            mir->zeroFields());
  21478 
  21479    masm.bind(ool->rejoin());
  21480  }
  21481 }
  21482 
  21483 void CodeGenerator::visitWasmHeapReg(LWasmHeapReg* ins) {
  21484 #ifdef WASM_HAS_HEAPREG
  21485  masm.movePtr(HeapReg, ToRegister(ins->output()));
  21486 #else
  21487  MOZ_CRASH();
  21488 #endif
  21489 }
  21490 
  21491 void CodeGenerator::emitResumableWasmTrapOOL(
  21492    LInstruction* lir, size_t framePushed,
  21493    const wasm::TrapSiteDesc& trapSiteDesc, wasm::Trap trap) {
  21494  masm.wasmTrap(trap, trapSiteDesc);
  21495 
  21496  markSafepointAt(masm.currentOffset(), lir);
  21497 
  21498  // Note that masm.framePushed() doesn't include the register dump area.
  21499  // That will be taken into account when the StackMap is created from the
  21500  // LSafepoint.
  21501  lir->safepoint()->setFramePushedAtStackMapBase(framePushed);
  21502  lir->safepoint()->setWasmSafepointKind(WasmSafepointKind::Trap);
  21503 }
  21504 
  21505 void CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins) {
  21506  const MWasmBoundsCheck* mir = ins->mir();
  21507 
  21508  Register ptr = ToRegister(ins->ptr());
  21509  if (ins->boundsCheckLimit()->isConstant()) {
  21510    auto* ool =
  21511        new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  21512          masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
  21513        });
  21514    addOutOfLineCode(ool, mir);
  21515    masm.branch32(Assembler::AboveOrEqual, ptr,
  21516                  Imm32(ins->boundsCheckLimit()->toConstant()->toInt32()),
  21517                  ool->entry());
  21518    return;
  21519  }
  21520 
  21521  Register boundsCheckLimit = ToRegister(ins->boundsCheckLimit());
  21522  // When there are no spectre mitigations in place, branching out-of-line to
  21523  // the trap is a big performance win, but with mitigations it's trickier.  See
  21524  // bug 1680243.
  21525  if (JitOptions.spectreIndexMasking) {
  21526    Label ok;
  21527    masm.wasmBoundsCheck32(Assembler::Below, ptr, boundsCheckLimit, &ok);
  21528    masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
  21529    masm.bind(&ok);
  21530  } else {
  21531    auto* ool =
  21532        new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  21533          masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
  21534        });
  21535    addOutOfLineCode(ool, mir);
  21536    masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
  21537                           ool->entry());
  21538  }
  21539 }
  21540 
  21541 void CodeGenerator::visitWasmBoundsCheck64(LWasmBoundsCheck64* ins) {
  21542  const MWasmBoundsCheck* mir = ins->mir();
  21543 
  21544  Register64 ptr = ToRegister64(ins->ptr());
  21545  if (IsConstant(ins->boundsCheckLimit())) {
  21546    auto* ool =
  21547        new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  21548          masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
  21549        });
  21550    addOutOfLineCode(ool, mir);
  21551    masm.branch64(Assembler::AboveOrEqual, ptr,
  21552                  Imm64(ToInt64(ins->boundsCheckLimit())), ool->entry());
  21553    return;
  21554  }
  21555 
  21556  Register64 boundsCheckLimit = ToRegister64(ins->boundsCheckLimit());
  21557  // See above.
  21558  if (JitOptions.spectreIndexMasking) {
  21559    Label ok;
  21560    masm.wasmBoundsCheck64(Assembler::Below, ptr, boundsCheckLimit, &ok);
  21561    masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
  21562    masm.bind(&ok);
  21563  } else {
  21564    auto* ool =
  21565        new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  21566          masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
  21567        });
  21568    addOutOfLineCode(ool, mir);
  21569    masm.wasmBoundsCheck64(Assembler::AboveOrEqual, ptr, boundsCheckLimit,
  21570                           ool->entry());
  21571  }
  21572 }
  21573 
  21574 void CodeGenerator::visitWasmBoundsCheckInstanceField(
  21575    LWasmBoundsCheckInstanceField* ins) {
  21576  const MWasmBoundsCheck* mir = ins->mir();
  21577  Register ptr = ToRegister(ins->ptr());
  21578  Register instance = ToRegister(ins->instance());
  21579  // See above.
  21580  if (JitOptions.spectreIndexMasking) {
  21581    Label ok;
  21582    masm.wasmBoundsCheck32(Assembler::Condition::Below, ptr,
  21583                           Address(instance, ins->offset()), &ok);
  21584    masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
  21585    masm.bind(&ok);
  21586  } else {
  21587    auto* ool =
  21588        new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  21589          masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
  21590        });
  21591    addOutOfLineCode(ool, mir);
  21592    masm.wasmBoundsCheck32(Assembler::Condition::AboveOrEqual, ptr,
  21593                           Address(instance, ins->offset()), ool->entry());
  21594  }
  21595 }
  21596 
  21597 void CodeGenerator::visitWasmBoundsCheckInstanceField64(
  21598    LWasmBoundsCheckInstanceField64* ins) {
  21599  const MWasmBoundsCheck* mir = ins->mir();
  21600  Register64 ptr = ToRegister64(ins->ptr());
  21601  Register instance = ToRegister(ins->instance());
  21602  // See above.
  21603  if (JitOptions.spectreIndexMasking) {
  21604    Label ok;
  21605    masm.wasmBoundsCheck64(Assembler::Condition::Below, ptr,
  21606                           Address(instance, ins->offset()), &ok);
  21607    masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
  21608    masm.bind(&ok);
  21609  } else {
  21610    auto* ool =
  21611        new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  21612          masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
  21613        });
  21614    addOutOfLineCode(ool, mir);
  21615    masm.wasmBoundsCheck64(Assembler::Condition::AboveOrEqual, ptr,
  21616                           Address(instance, ins->offset()), ool->entry());
  21617  }
  21618 }
  21619 
  21620 void CodeGenerator::visitWasmBoundsCheckRange32(LWasmBoundsCheckRange32* ins) {
  21621  const MWasmBoundsCheckRange32* mir = ins->mir();
  21622  Register index = ToRegister(ins->index());
  21623  Register length = ToRegister(ins->length());
  21624  Register limit = ToRegister(ins->limit());
  21625  Register tmp = ToRegister(ins->temp0());
  21626 
  21627  masm.wasmBoundsCheckRange32(index, length, limit, tmp, mir->trapSiteDesc());
  21628 }
  21629 
  21630 void CodeGenerator::visitWasmAlignmentCheck(LWasmAlignmentCheck* ins) {
  21631  const MWasmAlignmentCheck* mir = ins->mir();
  21632  Register ptr = ToRegister(ins->ptr());
  21633  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  21634    masm.wasmTrap(wasm::Trap::UnalignedAccess, mir->trapSiteDesc());
  21635  });
  21636  addOutOfLineCode(ool, mir);
  21637  masm.branchTest32(Assembler::NonZero, ptr, Imm32(mir->byteSize() - 1),
  21638                    ool->entry());
  21639 }
  21640 
  21641 void CodeGenerator::visitWasmAlignmentCheck64(LWasmAlignmentCheck64* ins) {
  21642  const MWasmAlignmentCheck* mir = ins->mir();
  21643  Register64 ptr = ToRegister64(ins->ptr());
  21644 #ifdef JS_64BIT
  21645  Register r = ptr.reg;
  21646 #else
  21647  Register r = ptr.low;
  21648 #endif
  21649  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  21650    masm.wasmTrap(wasm::Trap::UnalignedAccess, mir->trapSiteDesc());
  21651  });
  21652  addOutOfLineCode(ool, mir);
  21653  masm.branchTestPtr(Assembler::NonZero, r, Imm32(mir->byteSize() - 1),
  21654                     ool->entry());
  21655 }
  21656 
  21657 void CodeGenerator::visitWasmLoadInstance(LWasmLoadInstance* ins) {
  21658  switch (ins->mir()->type()) {
  21659    case MIRType::WasmAnyRef:
  21660    case MIRType::Pointer:
  21661      masm.loadPtr(Address(ToRegister(ins->instance()), ins->mir()->offset()),
  21662                   ToRegister(ins->output()));
  21663      break;
  21664    case MIRType::Int32:
  21665      masm.load32(Address(ToRegister(ins->instance()), ins->mir()->offset()),
  21666                  ToRegister(ins->output()));
  21667      break;
  21668    default:
  21669      MOZ_CRASH("MIRType not supported in WasmLoadInstance");
  21670  }
  21671 }
  21672 
  21673 void CodeGenerator::visitWasmLoadInstance64(LWasmLoadInstance64* ins) {
  21674  MOZ_ASSERT(ins->mir()->type() == MIRType::Int64);
  21675  masm.load64(Address(ToRegister(ins->instance()), ins->mir()->offset()),
  21676              ToOutRegister64(ins));
  21677 }
  21678 
  21679 void CodeGenerator::incrementWarmUpCounter(AbsoluteAddress warmUpCount,
  21680                                           JSScript* script, Register tmp) {
  21681  // The code depends on the JitScript* not being discarded without also
  21682  // invalidating Ion code. Assert this.
  21683 #ifdef DEBUG
  21684  Label ok;
  21685  masm.movePtr(ImmGCPtr(script), tmp);
  21686  masm.loadJitScript(tmp, tmp);
  21687  masm.branchPtr(Assembler::Equal, tmp, ImmPtr(script->jitScript()), &ok);
  21688  masm.assumeUnreachable("Didn't find JitScript?");
  21689  masm.bind(&ok);
  21690 #endif
  21691 
  21692  masm.load32(warmUpCount, tmp);
  21693  masm.add32(Imm32(1), tmp);
  21694  masm.store32(tmp, warmUpCount);
  21695 }
  21696 
  21697 void CodeGenerator::visitIncrementWarmUpCounter(LIncrementWarmUpCounter* ins) {
  21698  Register tmp = ToRegister(ins->temp0());
  21699 
  21700  AbsoluteAddress warmUpCount =
  21701      AbsoluteAddress(ins->mir()->script()->jitScript())
  21702          .offset(JitScript::offsetOfWarmUpCount());
  21703  incrementWarmUpCounter(warmUpCount, ins->mir()->script(), tmp);
  21704 }
  21705 
  21706 void CodeGenerator::visitLexicalCheck(LLexicalCheck* ins) {
  21707  ValueOperand inputValue = ToValue(ins->input());
  21708  Label bail;
  21709  masm.branchTestMagicValue(Assembler::Equal, inputValue,
  21710                            JS_UNINITIALIZED_LEXICAL, &bail);
  21711  bailoutFrom(&bail, ins->snapshot());
  21712 }
  21713 
  21714 void CodeGenerator::visitThrowRuntimeLexicalError(
  21715    LThrowRuntimeLexicalError* ins) {
  21716  pushArg(Imm32(ins->mir()->errorNumber()));
  21717 
  21718  using Fn = bool (*)(JSContext*, unsigned);
  21719  callVM<Fn, jit::ThrowRuntimeLexicalError>(ins);
  21720 }
  21721 
  21722 void CodeGenerator::visitThrowMsg(LThrowMsg* ins) {
  21723  pushArg(Imm32(static_cast<int32_t>(ins->mir()->throwMsgKind())));
  21724 
  21725  using Fn = bool (*)(JSContext*, unsigned);
  21726  callVM<Fn, js::ThrowMsgOperation>(ins);
  21727 }
  21728 
  21729 void CodeGenerator::visitGlobalDeclInstantiation(
  21730    LGlobalDeclInstantiation* ins) {
  21731  pushArg(ImmPtr(ins->mir()->resumePoint()->pc()));
  21732  pushArg(ImmGCPtr(ins->mir()->block()->info().script()));
  21733 
  21734  using Fn = bool (*)(JSContext*, HandleScript, const jsbytecode*);
  21735  callVM<Fn, GlobalDeclInstantiationFromIon>(ins);
  21736 }
  21737 
  21738 void CodeGenerator::visitDebugger(LDebugger* ins) {
  21739  Register cx = ToRegister(ins->temp0());
  21740 
  21741  masm.loadJSContext(cx);
  21742  using Fn = bool (*)(JSContext* cx);
  21743  masm.setupAlignedABICall();
  21744  masm.passABIArg(cx);
  21745  masm.callWithABI<Fn, GlobalHasLiveOnDebuggerStatement>();
  21746 
  21747  Label bail;
  21748  masm.branchIfTrueBool(ReturnReg, &bail);
  21749  bailoutFrom(&bail, ins->snapshot());
  21750 }
  21751 
  21752 void CodeGenerator::visitNewTarget(LNewTarget* ins) {
  21753  ValueOperand output = ToOutValue(ins);
  21754 
  21755  // if (isConstructing) output = argv[Max(numActualArgs, numFormalArgs)]
  21756  Label notConstructing, done;
  21757  Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
  21758  masm.branchTestPtr(Assembler::Zero, calleeToken,
  21759                     Imm32(CalleeToken_FunctionConstructing), &notConstructing);
  21760 
  21761  Register argvLen = output.scratchReg();
  21762  masm.loadNumActualArgs(FramePointer, argvLen);
  21763 
  21764  Label useNFormals;
  21765 
  21766  size_t numFormalArgs = ins->mirRaw()->block()->info().nargs();
  21767  masm.branchPtr(Assembler::Below, argvLen, Imm32(numFormalArgs), &useNFormals);
  21768 
  21769  size_t argsOffset = JitFrameLayout::offsetOfActualArgs();
  21770  {
  21771    BaseValueIndex newTarget(FramePointer, argvLen, argsOffset);
  21772    masm.loadValue(newTarget, output);
  21773    masm.jump(&done);
  21774  }
  21775 
  21776  masm.bind(&useNFormals);
  21777 
  21778  {
  21779    Address newTarget(FramePointer,
  21780                      argsOffset + (numFormalArgs * sizeof(Value)));
  21781    masm.loadValue(newTarget, output);
  21782    masm.jump(&done);
  21783  }
  21784 
  21785  // else output = undefined
  21786  masm.bind(&notConstructing);
  21787  masm.moveValue(UndefinedValue(), output);
  21788  masm.bind(&done);
  21789 }
  21790 
  21791 void CodeGenerator::visitCheckReturn(LCheckReturn* ins) {
  21792  ValueOperand returnValue = ToValue(ins->returnValue());
  21793  ValueOperand thisValue = ToValue(ins->thisValue());
  21794  ValueOperand output = ToOutValue(ins);
  21795 
  21796  using Fn = bool (*)(JSContext*, HandleValue);
  21797  OutOfLineCode* ool = oolCallVM<Fn, ThrowBadDerivedReturnOrUninitializedThis>(
  21798      ins, ArgList(returnValue), StoreNothing());
  21799 
  21800  Label noChecks;
  21801  masm.branchTestObject(Assembler::Equal, returnValue, &noChecks);
  21802  masm.branchTestUndefined(Assembler::NotEqual, returnValue, ool->entry());
  21803  masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
  21804  masm.moveValue(thisValue, output);
  21805  masm.jump(ool->rejoin());
  21806  masm.bind(&noChecks);
  21807  masm.moveValue(returnValue, output);
  21808  masm.bind(ool->rejoin());
  21809 }
  21810 
  21811 void CodeGenerator::visitCheckIsObj(LCheckIsObj* ins) {
  21812  ValueOperand value = ToValue(ins->value());
  21813  Register output = ToRegister(ins->output());
  21814 
  21815  using Fn = bool (*)(JSContext*, CheckIsObjectKind);
  21816  OutOfLineCode* ool = oolCallVM<Fn, ThrowCheckIsObject>(
  21817      ins, ArgList(Imm32(ins->mir()->checkKind())), StoreNothing());
  21818 
  21819  masm.fallibleUnboxObject(value, output, ool->entry());
  21820  masm.bind(ool->rejoin());
  21821 }
  21822 
  21823 void CodeGenerator::visitCheckObjCoercible(LCheckObjCoercible* ins) {
  21824  ValueOperand checkValue = ToValue(ins->checkValue());
  21825 
  21826  using Fn = bool (*)(JSContext*, HandleValue);
  21827  OutOfLineCode* ool = oolCallVM<Fn, ThrowObjectCoercible>(
  21828      ins, ArgList(checkValue), StoreNothing());
  21829  masm.branchTestNull(Assembler::Equal, checkValue, ool->entry());
  21830  masm.branchTestUndefined(Assembler::Equal, checkValue, ool->entry());
  21831  masm.bind(ool->rejoin());
  21832 }
  21833 
  21834 void CodeGenerator::visitCheckClassHeritage(LCheckClassHeritage* ins) {
  21835  ValueOperand heritage = ToValue(ins->heritage());
  21836  Register temp0 = ToRegister(ins->temp0());
  21837  Register temp1 = ToRegister(ins->temp1());
  21838 
  21839  using Fn = bool (*)(JSContext*, HandleValue);
  21840  OutOfLineCode* ool = oolCallVM<Fn, CheckClassHeritageOperation>(
  21841      ins, ArgList(heritage), StoreNothing());
  21842 
  21843  masm.branchTestNull(Assembler::Equal, heritage, ool->rejoin());
  21844  masm.fallibleUnboxObject(heritage, temp0, ool->entry());
  21845 
  21846  masm.isConstructor(temp0, temp1, ool->entry());
  21847  masm.branchTest32(Assembler::Zero, temp1, temp1, ool->entry());
  21848 
  21849  masm.bind(ool->rejoin());
  21850 }
  21851 
  21852 void CodeGenerator::visitCheckThis(LCheckThis* ins) {
  21853  ValueOperand thisValue = ToValue(ins->thisValue());
  21854 
  21855  using Fn = bool (*)(JSContext*);
  21856  OutOfLineCode* ool =
  21857      oolCallVM<Fn, ThrowUninitializedThis>(ins, ArgList(), StoreNothing());
  21858  masm.branchTestMagic(Assembler::Equal, thisValue, ool->entry());
  21859  masm.bind(ool->rejoin());
  21860 }
  21861 
  21862 void CodeGenerator::visitCheckThisReinit(LCheckThisReinit* ins) {
  21863  ValueOperand thisValue = ToValue(ins->thisValue());
  21864 
  21865  using Fn = bool (*)(JSContext*);
  21866  OutOfLineCode* ool =
  21867      oolCallVM<Fn, ThrowInitializedThis>(ins, ArgList(), StoreNothing());
  21868  masm.branchTestMagic(Assembler::NotEqual, thisValue, ool->entry());
  21869  masm.bind(ool->rejoin());
  21870 }
  21871 
  21872 void CodeGenerator::visitGenerator(LGenerator* lir) {
  21873  Register callee = ToRegister(lir->callee());
  21874  Register environmentChain = ToRegister(lir->environmentChain());
  21875  Register argsObject = ToRegister(lir->argsObject());
  21876 
  21877  pushArg(argsObject);
  21878  pushArg(environmentChain);
  21879  pushArg(ImmGCPtr(current->mir()->info().script()));
  21880  pushArg(callee);
  21881 
  21882  using Fn = JSObject* (*)(JSContext * cx, HandleFunction, HandleScript,
  21883                           HandleObject, HandleObject);
  21884  callVM<Fn, CreateGenerator>(lir);
  21885 }
  21886 
  21887 void CodeGenerator::visitAsyncResolve(LAsyncResolve* lir) {
  21888  Register generator = ToRegister(lir->generator());
  21889  ValueOperand value = ToValue(lir->value());
  21890 
  21891  pushArg(value);
  21892  pushArg(generator);
  21893 
  21894  using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
  21895                           HandleValue);
  21896  callVM<Fn, js::AsyncFunctionResolve>(lir);
  21897 }
  21898 
  21899 void CodeGenerator::visitAsyncReject(LAsyncReject* lir) {
  21900  Register generator = ToRegister(lir->generator());
  21901  ValueOperand reason = ToValue(lir->reason());
  21902  ValueOperand stack = ToValue(lir->stack());
  21903 
  21904  pushArg(stack);
  21905  pushArg(reason);
  21906  pushArg(generator);
  21907 
  21908  using Fn = JSObject* (*)(JSContext*, Handle<AsyncFunctionGeneratorObject*>,
  21909                           HandleValue, HandleValue);
  21910  callVM<Fn, js::AsyncFunctionReject>(lir);
  21911 }
  21912 
  21913 void CodeGenerator::visitAsyncAwait(LAsyncAwait* lir) {
  21914  ValueOperand value = ToValue(lir->value());
  21915  Register generator = ToRegister(lir->generator());
  21916 
  21917  pushArg(value);
  21918  pushArg(generator);
  21919 
  21920  using Fn = JSObject* (*)(JSContext * cx,
  21921                           Handle<AsyncFunctionGeneratorObject*> genObj,
  21922                           HandleValue value);
  21923  callVM<Fn, js::AsyncFunctionAwait>(lir);
  21924 }
  21925 
  21926 void CodeGenerator::visitCanSkipAwait(LCanSkipAwait* lir) {
  21927  ValueOperand value = ToValue(lir->value());
  21928 
  21929  pushArg(value);
  21930 
  21931  using Fn = bool (*)(JSContext*, HandleValue, bool* canSkip);
  21932  callVM<Fn, js::CanSkipAwait>(lir);
  21933 }
  21934 
  21935 void CodeGenerator::visitMaybeExtractAwaitValue(LMaybeExtractAwaitValue* lir) {
  21936  ValueOperand value = ToValue(lir->value());
  21937  ValueOperand output = ToOutValue(lir);
  21938  Register canSkip = ToRegister(lir->canSkip());
  21939 
  21940  Label cantExtract, finished;
  21941  masm.branchIfFalseBool(canSkip, &cantExtract);
  21942 
  21943  pushArg(value);
  21944 
  21945  using Fn = bool (*)(JSContext*, HandleValue, MutableHandleValue);
  21946  callVM<Fn, js::ExtractAwaitValue>(lir);
  21947  masm.jump(&finished);
  21948  masm.bind(&cantExtract);
  21949 
  21950  masm.moveValue(value, output);
  21951 
  21952  masm.bind(&finished);
  21953 }
  21954 
  21955 void CodeGenerator::visitDebugCheckSelfHosted(LDebugCheckSelfHosted* ins) {
  21956  ValueOperand checkValue = ToValue(ins->checkValue());
  21957  pushArg(checkValue);
  21958  using Fn = bool (*)(JSContext*, HandleValue);
  21959  callVM<Fn, js::Debug_CheckSelfHosted>(ins);
  21960 }
  21961 
  21962 void CodeGenerator::visitRandom(LRandom* ins) {
  21963  using mozilla::non_crypto::XorShift128PlusRNG;
  21964 
  21965  FloatRegister output = ToFloatRegister(ins->output());
  21966  Register rngReg = ToRegister(ins->temp0());
  21967 
  21968  Register64 temp1 = ToRegister64(ins->temp1());
  21969  Register64 temp2 = ToRegister64(ins->temp2());
  21970 
  21971  const XorShift128PlusRNG* rng = gen->realm->addressOfRandomNumberGenerator();
  21972  masm.movePtr(ImmPtr(rng), rngReg);
  21973 
  21974  masm.randomDouble(rngReg, output, temp1, temp2);
  21975  if (js::SupportDifferentialTesting()) {
  21976    masm.loadConstantDouble(0.0, output);
  21977  }
  21978 }
  21979 
  21980 void CodeGenerator::visitSignExtendInt32(LSignExtendInt32* ins) {
  21981  Register input = ToRegister(ins->input());
  21982  Register output = ToRegister(ins->output());
  21983 
  21984  switch (ins->mir()->mode()) {
  21985    case MSignExtendInt32::Byte:
  21986      masm.move8SignExtend(input, output);
  21987      break;
  21988    case MSignExtendInt32::Half:
  21989      masm.move16SignExtend(input, output);
  21990      break;
  21991  }
  21992 }
  21993 
  21994 void CodeGenerator::visitSignExtendIntPtr(LSignExtendIntPtr* ins) {
  21995  Register input = ToRegister(ins->input());
  21996  Register output = ToRegister(ins->output());
  21997 
  21998  switch (ins->mir()->mode()) {
  21999    case MSignExtendIntPtr::Byte:
  22000      masm.move8SignExtendToPtr(input, output);
  22001      break;
  22002    case MSignExtendIntPtr::Half:
  22003      masm.move16SignExtendToPtr(input, output);
  22004      break;
  22005    case MSignExtendIntPtr::Word:
  22006      masm.move32SignExtendToPtr(input, output);
  22007      break;
  22008  }
  22009 }
  22010 
  22011 void CodeGenerator::visitRotate(LRotate* ins) {
  22012  MRotate* mir = ins->mir();
  22013  Register input = ToRegister(ins->input());
  22014  Register dest = ToRegister(ins->output());
  22015 
  22016  const LAllocation* count = ins->count();
  22017  if (count->isConstant()) {
  22018    int32_t c = ToInt32(count) & 0x1F;
  22019    if (mir->isLeftRotate()) {
  22020      masm.rotateLeft(Imm32(c), input, dest);
  22021    } else {
  22022      masm.rotateRight(Imm32(c), input, dest);
  22023    }
  22024  } else {
  22025    Register creg = ToRegister(count);
  22026    if (mir->isLeftRotate()) {
  22027      masm.rotateLeft(creg, input, dest);
  22028    } else {
  22029      masm.rotateRight(creg, input, dest);
  22030    }
  22031  }
  22032 }
  22033 
  22034 void CodeGenerator::visitRotateI64(LRotateI64* lir) {
  22035  MRotate* mir = lir->mir();
  22036  const LAllocation* count = lir->count();
  22037 
  22038  Register64 input = ToRegister64(lir->input());
  22039  Register64 output = ToOutRegister64(lir);
  22040  Register temp = ToTempRegisterOrInvalid(lir->temp0());
  22041 
  22042  if (count->isConstant()) {
  22043    int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
  22044    if (!c) {
  22045      if (input != output) {
  22046        masm.move64(input, output);
  22047      }
  22048      return;
  22049    }
  22050    if (mir->isLeftRotate()) {
  22051      masm.rotateLeft64(Imm32(c), input, output, temp);
  22052    } else {
  22053      masm.rotateRight64(Imm32(c), input, output, temp);
  22054    }
  22055  } else {
  22056    if (mir->isLeftRotate()) {
  22057      masm.rotateLeft64(ToRegister(count), input, output, temp);
  22058    } else {
  22059      masm.rotateRight64(ToRegister(count), input, output, temp);
  22060    }
  22061  }
  22062 }
  22063 
  22064 void CodeGenerator::visitReinterpretCast(LReinterpretCast* lir) {
  22065  MReinterpretCast* ins = lir->mir();
  22066 
  22067  MIRType to = ins->type();
  22068  mozilla::DebugOnly<MIRType> from = ins->input()->type();
  22069 
  22070  switch (to) {
  22071    case MIRType::Int32:
  22072      MOZ_ASSERT(from == MIRType::Float32);
  22073      masm.moveFloat32ToGPR(ToFloatRegister(lir->input()),
  22074                            ToRegister(lir->output()));
  22075      break;
  22076    case MIRType::Float32:
  22077      MOZ_ASSERT(from == MIRType::Int32);
  22078      masm.moveGPRToFloat32(ToRegister(lir->input()),
  22079                            ToFloatRegister(lir->output()));
  22080      break;
  22081    case MIRType::Double:
  22082    case MIRType::Int64:
  22083      MOZ_CRASH("not handled by this LIR opcode");
  22084    default:
  22085      MOZ_CRASH("unexpected ReinterpretCast");
  22086  }
  22087 }
  22088 
  22089 void CodeGenerator::visitReinterpretCastFromI64(LReinterpretCastFromI64* lir) {
  22090  MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
  22091  MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
  22092  masm.moveGPR64ToDouble(ToRegister64(lir->input()),
  22093                         ToFloatRegister(lir->output()));
  22094 }
  22095 
  22096 void CodeGenerator::visitReinterpretCastToI64(LReinterpretCastToI64* lir) {
  22097  MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
  22098  MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
  22099  masm.moveDoubleToGPR64(ToFloatRegister(lir->input()), ToOutRegister64(lir));
  22100 }
  22101 
  22102 void CodeGenerator::visitNaNToZero(LNaNToZero* lir) {
  22103  FloatRegister input = ToFloatRegister(lir->input());
  22104 
  22105  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  22106    FloatRegister output = ToFloatRegister(lir->output());
  22107    masm.loadConstantDouble(0.0, output);
  22108    masm.jump(ool.rejoin());
  22109  });
  22110  addOutOfLineCode(ool, lir->mir());
  22111 
  22112  if (lir->mir()->operandIsNeverNegativeZero()) {
  22113    masm.branchDouble(Assembler::DoubleUnordered, input, input, ool->entry());
  22114  } else {
  22115    FloatRegister scratch = ToFloatRegister(lir->temp0());
  22116    masm.loadConstantDouble(0.0, scratch);
  22117    masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch,
  22118                      ool->entry());
  22119  }
  22120  masm.bind(ool->rejoin());
  22121 }
  22122 
  22123 void CodeGenerator::visitIsPackedArray(LIsPackedArray* lir) {
  22124  Register obj = ToRegister(lir->object());
  22125  Register output = ToRegister(lir->output());
  22126  Register temp = ToRegister(lir->temp0());
  22127 
  22128  masm.setIsPackedArray(obj, output, temp);
  22129 }
  22130 
  22131 void CodeGenerator::visitGuardArrayIsPacked(LGuardArrayIsPacked* lir) {
  22132  Register array = ToRegister(lir->array());
  22133  Register temp0 = ToRegister(lir->temp0());
  22134  Register temp1 = ToRegister(lir->temp1());
  22135 
  22136  Label bail;
  22137  masm.branchArrayIsNotPacked(array, temp0, temp1, &bail);
  22138  bailoutFrom(&bail, lir->snapshot());
  22139 }
  22140 
  22141 void CodeGenerator::visitGuardElementsArePacked(LGuardElementsArePacked* lir) {
  22142  Register elements = ToRegister(lir->elements());
  22143 
  22144  Label bail;
  22145  Address flags(elements, ObjectElements::offsetOfFlags());
  22146  masm.branchTest32(Assembler::NonZero, flags,
  22147                    Imm32(ObjectElements::NON_PACKED), &bail);
  22148  bailoutFrom(&bail, lir->snapshot());
  22149 }
  22150 
  22151 void CodeGenerator::visitGetPrototypeOf(LGetPrototypeOf* lir) {
  22152  Register target = ToRegister(lir->target());
  22153  ValueOperand out = ToOutValue(lir);
  22154  Register scratch = out.scratchReg();
  22155 
  22156  using Fn = bool (*)(JSContext*, HandleObject, MutableHandleValue);
  22157  OutOfLineCode* ool = oolCallVM<Fn, jit::GetPrototypeOf>(lir, ArgList(target),
  22158                                                          StoreValueTo(out));
  22159 
  22160  MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
  22161 
  22162  masm.loadObjProto(target, scratch);
  22163 
  22164  Label hasProto;
  22165  masm.branchPtr(Assembler::Above, scratch, ImmWord(1), &hasProto);
  22166 
  22167  // Call into the VM for lazy prototypes.
  22168  masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), ool->entry());
  22169 
  22170  masm.moveValue(NullValue(), out);
  22171  masm.jump(ool->rejoin());
  22172 
  22173  masm.bind(&hasProto);
  22174  masm.tagValue(JSVAL_TYPE_OBJECT, scratch, out);
  22175 
  22176  masm.bind(ool->rejoin());
  22177 }
  22178 
  22179 void CodeGenerator::visitObjectWithProto(LObjectWithProto* lir) {
  22180  pushArg(ToValue(lir->prototype()));
  22181 
  22182  using Fn = PlainObject* (*)(JSContext*, HandleValue);
  22183  callVM<Fn, js::ObjectWithProtoOperation>(lir);
  22184 }
  22185 
  22186 void CodeGenerator::visitObjectStaticProto(LObjectStaticProto* lir) {
  22187  Register obj = ToRegister(lir->object());
  22188  Register output = ToRegister(lir->output());
  22189 
  22190  masm.loadObjProto(obj, output);
  22191 
  22192 #ifdef DEBUG
  22193  // We shouldn't encounter a null or lazy proto.
  22194  MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
  22195 
  22196  Label done;
  22197  masm.branchPtr(Assembler::Above, output, ImmWord(1), &done);
  22198  masm.assumeUnreachable("Unexpected null or lazy proto in MObjectStaticProto");
  22199  masm.bind(&done);
  22200 #endif
  22201 }
  22202 
  22203 void CodeGenerator::visitBuiltinObject(LBuiltinObject* lir) {
  22204  pushArg(Imm32(static_cast<int32_t>(lir->mir()->builtinObjectKind())));
  22205 
  22206  using Fn = JSObject* (*)(JSContext*, BuiltinObjectKind);
  22207  callVM<Fn, js::BuiltinObjectOperation>(lir);
  22208 }
  22209 
  22210 static void EmitLoadSuperFunction(MacroAssembler& masm, Register callee,
  22211                                  Register dest) {
  22212 #ifdef DEBUG
  22213  Label classCheckDone;
  22214  masm.branchTestObjIsFunction(Assembler::Equal, callee, dest, callee,
  22215                               &classCheckDone);
  22216  masm.assumeUnreachable("Unexpected non-JSFunction callee in JSOp::SuperFun");
  22217  masm.bind(&classCheckDone);
  22218 #endif
  22219 
  22220  // Load prototype of callee
  22221  masm.loadObjProto(callee, dest);
  22222 
  22223 #ifdef DEBUG
  22224  // We won't encounter a lazy proto, because |callee| is guaranteed to be a
  22225  // JSFunction and only proxy objects can have a lazy proto.
  22226  MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
  22227 
  22228  Label proxyCheckDone;
  22229  masm.branchPtr(Assembler::NotEqual, dest, ImmWord(1), &proxyCheckDone);
  22230  masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperFun");
  22231  masm.bind(&proxyCheckDone);
  22232 #endif
  22233 }
  22234 
  22235 void CodeGenerator::visitSuperFunction(LSuperFunction* lir) {
  22236  Register callee = ToRegister(lir->callee());
  22237  ValueOperand out = ToOutValue(lir);
  22238  Register temp = out.scratchReg();
  22239 
  22240  EmitLoadSuperFunction(masm, callee, temp);
  22241 
  22242  Label nullProto, done;
  22243  masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
  22244 
  22245  // Box prototype and return
  22246  masm.tagValue(JSVAL_TYPE_OBJECT, temp, out);
  22247  masm.jump(&done);
  22248 
  22249  masm.bind(&nullProto);
  22250  masm.moveValue(NullValue(), out);
  22251 
  22252  masm.bind(&done);
  22253 }
  22254 
  22255 void CodeGenerator::visitSuperFunctionAndUnbox(LSuperFunctionAndUnbox* lir) {
  22256  Register callee = ToRegister(lir->callee());
  22257  Register output = ToRegister(lir->output());
  22258 
  22259  EmitLoadSuperFunction(masm, callee, output);
  22260 
  22261  bailoutCmpPtr(Assembler::Equal, output, ImmWord(0), lir->snapshot());
  22262 }
  22263 
  22264 void CodeGenerator::visitInitHomeObject(LInitHomeObject* lir) {
  22265  Register func = ToRegister(lir->function());
  22266  ValueOperand homeObject = ToValue(lir->homeObject());
  22267 
  22268  masm.assertFunctionIsExtended(func);
  22269 
  22270  Address addr(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
  22271 
  22272  emitPreBarrier(addr);
  22273  masm.storeValue(homeObject, addr);
  22274 }
  22275 
  22276 void CodeGenerator::visitIsTypedArrayConstructor(
  22277    LIsTypedArrayConstructor* lir) {
  22278  Register object = ToRegister(lir->object());
  22279  Register output = ToRegister(lir->output());
  22280 
  22281  masm.setIsDefinitelyTypedArrayConstructor(object, output);
  22282 }
  22283 
  22284 void CodeGenerator::visitLoadValueTag(LLoadValueTag* lir) {
  22285  ValueOperand value = ToValue(lir->value());
  22286  Register output = ToRegister(lir->output());
  22287 
  22288  Register tag = masm.extractTag(value, output);
  22289  if (tag != output) {
  22290    masm.mov(tag, output);
  22291  }
  22292 }
  22293 
  22294 void CodeGenerator::visitGuardTagNotEqual(LGuardTagNotEqual* lir) {
  22295  Register lhs = ToRegister(lir->lhs());
  22296  Register rhs = ToRegister(lir->rhs());
  22297 
  22298  bailoutCmp32(Assembler::Equal, lhs, rhs, lir->snapshot());
  22299 
  22300  // If both lhs and rhs are numbers, can't use tag comparison to do inequality
  22301  // comparison
  22302  Label done;
  22303  masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
  22304  masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
  22305  bailout(lir->snapshot());
  22306 
  22307  masm.bind(&done);
  22308 }
  22309 
  22310 void CodeGenerator::visitLoadWrapperTarget(LLoadWrapperTarget* lir) {
  22311  Register object = ToRegister(lir->object());
  22312  Register output = ToRegister(lir->output());
  22313 
  22314  masm.loadPtr(Address(object, ProxyObject::offsetOfReservedSlots()), output);
  22315 
  22316  // Bail for revoked proxies.
  22317  Label bail;
  22318  Address targetAddr(output,
  22319                     js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
  22320  if (lir->mir()->fallible()) {
  22321    masm.fallibleUnboxObject(targetAddr, output, &bail);
  22322    bailoutFrom(&bail, lir->snapshot());
  22323  } else {
  22324    masm.unboxObject(targetAddr, output);
  22325  }
  22326 }
  22327 
  22328 void CodeGenerator::visitLoadGetterSetterFunction(
  22329    LLoadGetterSetterFunction* lir) {
  22330  ValueOperand getterSetter = ToValue(lir->getterSetter());
  22331  Register output = ToRegister(lir->output());
  22332 
  22333  masm.unboxNonDouble(getterSetter, output, JSVAL_TYPE_PRIVATE_GCTHING);
  22334 
  22335  size_t offset = lir->mir()->isGetter() ? GetterSetter::offsetOfGetter()
  22336                                         : GetterSetter::offsetOfSetter();
  22337  masm.loadPtr(Address(output, offset), output);
  22338 
  22339  Label bail;
  22340  masm.branchTestPtr(Assembler::Zero, output, output, &bail);
  22341  if (lir->mir()->needsClassGuard()) {
  22342    Register temp = ToRegister(lir->temp0());
  22343    masm.branchTestObjIsFunction(Assembler::NotEqual, output, temp, output,
  22344                                 &bail);
  22345  }
  22346 
  22347  bailoutFrom(&bail, lir->snapshot());
  22348 }
  22349 
  22350 void CodeGenerator::visitGuardHasGetterSetter(LGuardHasGetterSetter* lir) {
  22351  Register object = ToRegister(lir->object());
  22352  Register temp0 = ToRegister(lir->temp0());
  22353  Register temp1 = ToRegister(lir->temp1());
  22354  Register temp2 = ToRegister(lir->temp2());
  22355 
  22356  masm.movePropertyKey(lir->mir()->propId(), temp1);
  22357 
  22358  auto getterSetterVal = lir->mir()->getterSetterValue();
  22359  if (getterSetterVal.isValue()) {
  22360    auto* gs = getterSetterVal.toValue().toGCThing()->as<GetterSetter>();
  22361    masm.movePtr(ImmGCPtr(gs), temp2);
  22362  } else {
  22363    // Load the GetterSetter* from the Value stored in the IonScript.
  22364    Address valueAddr = getNurseryValueAddress(getterSetterVal, temp2);
  22365    masm.unboxNonDouble(valueAddr, temp2, JSVAL_TYPE_PRIVATE_GCTHING);
  22366  }
  22367 
  22368  using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
  22369                      GetterSetter* getterSetter);
  22370  masm.setupAlignedABICall();
  22371  masm.loadJSContext(temp0);
  22372  masm.passABIArg(temp0);
  22373  masm.passABIArg(object);
  22374  masm.passABIArg(temp1);
  22375  masm.passABIArg(temp2);
  22376  masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
  22377 
  22378  bailoutIfFalseBool(ReturnReg, lir->snapshot());
  22379 }
  22380 
  22381 void CodeGenerator::visitGuardIsExtensible(LGuardIsExtensible* lir) {
  22382  Register object = ToRegister(lir->object());
  22383  Register temp = ToRegister(lir->temp0());
  22384 
  22385  Label bail;
  22386  masm.branchIfObjectNotExtensible(object, temp, &bail);
  22387  bailoutFrom(&bail, lir->snapshot());
  22388 }
  22389 
  22390 void CodeGenerator::visitGuardInt32IsNonNegative(
  22391    LGuardInt32IsNonNegative* lir) {
  22392  Register index = ToRegister(lir->index());
  22393 
  22394  bailoutCmp32(Assembler::LessThan, index, Imm32(0), lir->snapshot());
  22395 }
  22396 
  22397 void CodeGenerator::visitGuardIntPtrIsNonNegative(
  22398    LGuardIntPtrIsNonNegative* lir) {
  22399  Register index = ToRegister(lir->index());
  22400 
  22401  bailoutCmpPtr(Assembler::LessThan, index, ImmWord(0), lir->snapshot());
  22402 }
  22403 
  22404 void CodeGenerator::visitGuardInt32Range(LGuardInt32Range* lir) {
  22405  Register input = ToRegister(lir->input());
  22406 
  22407  bailoutCmp32(Assembler::LessThan, input, Imm32(lir->mir()->minimum()),
  22408               lir->snapshot());
  22409  bailoutCmp32(Assembler::GreaterThan, input, Imm32(lir->mir()->maximum()),
  22410               lir->snapshot());
  22411 }
  22412 
  22413 void CodeGenerator::visitGuardIndexIsNotDenseElement(
  22414    LGuardIndexIsNotDenseElement* lir) {
  22415  Register object = ToRegister(lir->object());
  22416  Register index = ToRegister(lir->index());
  22417  Register temp = ToRegister(lir->temp0());
  22418  Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
  22419 
  22420  // Load obj->elements.
  22421  masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
  22422 
  22423  // Ensure index >= initLength or the element is a hole.
  22424  Label notDense;
  22425  Address capacity(temp, ObjectElements::offsetOfInitializedLength());
  22426  masm.spectreBoundsCheck32(index, capacity, spectreTemp, &notDense);
  22427 
  22428  BaseObjectElementIndex element(temp, index);
  22429  masm.branchTestMagic(Assembler::Equal, element, &notDense);
  22430 
  22431  bailout(lir->snapshot());
  22432 
  22433  masm.bind(&notDense);
  22434 }
  22435 
  22436 void CodeGenerator::visitGuardIndexIsValidUpdateOrAdd(
  22437    LGuardIndexIsValidUpdateOrAdd* lir) {
  22438  Register object = ToRegister(lir->object());
  22439  Register index = ToRegister(lir->index());
  22440  Register temp = ToRegister(lir->temp0());
  22441  Register spectreTemp = ToTempRegisterOrInvalid(lir->temp1());
  22442 
  22443  // Load obj->elements.
  22444  masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp);
  22445 
  22446  Label success;
  22447 
  22448  // If length is writable, branch to &success.  All indices are writable.
  22449  Address flags(temp, ObjectElements::offsetOfFlags());
  22450  masm.branchTest32(Assembler::Zero, flags,
  22451                    Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
  22452                    &success);
  22453 
  22454  // Otherwise, ensure index is in bounds.
  22455  Label bail;
  22456  Address length(temp, ObjectElements::offsetOfLength());
  22457  masm.spectreBoundsCheck32(index, length, spectreTemp, &bail);
  22458  masm.bind(&success);
  22459 
  22460  bailoutFrom(&bail, lir->snapshot());
  22461 }
  22462 
  22463 void CodeGenerator::visitCallAddOrUpdateSparseElement(
  22464    LCallAddOrUpdateSparseElement* lir) {
  22465  Register object = ToRegister(lir->object());
  22466  Register index = ToRegister(lir->index());
  22467  ValueOperand value = ToValue(lir->value());
  22468 
  22469  pushArg(Imm32(lir->mir()->strict()));
  22470  pushArg(value);
  22471  pushArg(index);
  22472  pushArg(object);
  22473 
  22474  using Fn =
  22475      bool (*)(JSContext*, Handle<NativeObject*>, int32_t, HandleValue, bool);
  22476  callVM<Fn, js::AddOrUpdateSparseElementHelper>(lir);
  22477 }
  22478 
  22479 void CodeGenerator::visitCallGetSparseElement(LCallGetSparseElement* lir) {
  22480  Register object = ToRegister(lir->object());
  22481  Register index = ToRegister(lir->index());
  22482 
  22483  pushArg(index);
  22484  pushArg(object);
  22485 
  22486  using Fn =
  22487      bool (*)(JSContext*, Handle<NativeObject*>, int32_t, MutableHandleValue);
  22488  callVM<Fn, js::GetSparseElementHelper>(lir);
  22489 }
  22490 
  22491 void CodeGenerator::visitCallNativeGetElement(LCallNativeGetElement* lir) {
  22492  Register object = ToRegister(lir->object());
  22493  Register index = ToRegister(lir->index());
  22494 
  22495  pushArg(index);
  22496  pushArg(TypedOrValueRegister(MIRType::Object, AnyRegister(object)));
  22497  pushArg(object);
  22498 
  22499  using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
  22500                      MutableHandleValue);
  22501  callVM<Fn, js::NativeGetElement>(lir);
  22502 }
  22503 
  22504 void CodeGenerator::visitCallNativeGetElementSuper(
  22505    LCallNativeGetElementSuper* lir) {
  22506  Register object = ToRegister(lir->object());
  22507  Register index = ToRegister(lir->index());
  22508  ValueOperand receiver = ToValue(lir->receiver());
  22509 
  22510  pushArg(index);
  22511  pushArg(receiver);
  22512  pushArg(object);
  22513 
  22514  using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
  22515                      MutableHandleValue);
  22516  callVM<Fn, js::NativeGetElement>(lir);
  22517 }
  22518 
  22519 void CodeGenerator::visitCallObjectHasSparseElement(
  22520    LCallObjectHasSparseElement* lir) {
  22521  Register object = ToRegister(lir->object());
  22522  Register index = ToRegister(lir->index());
  22523  Register temp0 = ToRegister(lir->temp0());
  22524  Register temp1 = ToRegister(lir->temp1());
  22525  Register output = ToRegister(lir->output());
  22526 
  22527  masm.reserveStack(sizeof(Value));
  22528  masm.moveStackPtrTo(temp1);
  22529 
  22530  using Fn = bool (*)(JSContext*, NativeObject*, int32_t, Value*);
  22531  masm.setupAlignedABICall();
  22532  masm.loadJSContext(temp0);
  22533  masm.passABIArg(temp0);
  22534  masm.passABIArg(object);
  22535  masm.passABIArg(index);
  22536  masm.passABIArg(temp1);
  22537  masm.callWithABI<Fn, HasNativeElementPure>();
  22538  masm.storeCallPointerResult(temp0);
  22539 
  22540  Label bail, ok;
  22541  uint32_t framePushed = masm.framePushed();
  22542  masm.branchIfTrueBool(temp0, &ok);
  22543  masm.adjustStack(sizeof(Value));
  22544  masm.jump(&bail);
  22545 
  22546  masm.bind(&ok);
  22547  masm.setFramePushed(framePushed);
  22548  masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
  22549  masm.adjustStack(sizeof(Value));
  22550 
  22551  bailoutFrom(&bail, lir->snapshot());
  22552 }
  22553 
  22554 void CodeGenerator::visitBigIntAsIntN(LBigIntAsIntN* ins) {
  22555  Register bits = ToRegister(ins->bits());
  22556  Register input = ToRegister(ins->input());
  22557 
  22558  pushArg(bits);
  22559  pushArg(input);
  22560 
  22561  using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
  22562  callVM<Fn, jit::BigIntAsIntN>(ins);
  22563 }
  22564 
  22565 void CodeGenerator::visitBigIntAsUintN(LBigIntAsUintN* ins) {
  22566  Register bits = ToRegister(ins->bits());
  22567  Register input = ToRegister(ins->input());
  22568 
  22569  pushArg(bits);
  22570  pushArg(input);
  22571 
  22572  using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
  22573  callVM<Fn, jit::BigIntAsUintN>(ins);
  22574 }
  22575 
  22576 void CodeGenerator::visitGuardNonGCThing(LGuardNonGCThing* ins) {
  22577  ValueOperand input = ToValue(ins->input());
  22578 
  22579  Label bail;
  22580  masm.branchTestGCThing(Assembler::Equal, input, &bail);
  22581  bailoutFrom(&bail, ins->snapshot());
  22582 }
  22583 
  22584 void CodeGenerator::visitToHashableNonGCThing(LToHashableNonGCThing* ins) {
  22585  ValueOperand input = ToValue(ins->input());
  22586  FloatRegister tempFloat = ToFloatRegister(ins->temp0());
  22587  ValueOperand output = ToOutValue(ins);
  22588 
  22589  masm.toHashableNonGCThing(input, output, tempFloat);
  22590 }
  22591 
  22592 void CodeGenerator::visitToHashableString(LToHashableString* ins) {
  22593  Register input = ToRegister(ins->input());
  22594  Register output = ToRegister(ins->output());
  22595 
  22596  using Fn = JSAtom* (*)(JSContext*, JSString*);
  22597  auto* ool = oolCallVM<Fn, js::AtomizeString>(ins, ArgList(input),
  22598                                               StoreRegisterTo(output));
  22599 
  22600  Label isAtom;
  22601  masm.branchTest32(Assembler::NonZero,
  22602                    Address(input, JSString::offsetOfFlags()),
  22603                    Imm32(JSString::ATOM_BIT), &isAtom);
  22604 
  22605  masm.tryFastAtomize(input, output, output, ool->entry());
  22606  masm.jump(ool->rejoin());
  22607  masm.bind(&isAtom);
  22608  masm.movePtr(input, output);
  22609  masm.bind(ool->rejoin());
  22610 }
  22611 
  22612 void CodeGenerator::visitToHashableValue(LToHashableValue* ins) {
  22613  ValueOperand input = ToValue(ins->input());
  22614  FloatRegister tempFloat = ToFloatRegister(ins->temp0());
  22615  ValueOperand output = ToOutValue(ins);
  22616 
  22617  Register str = output.scratchReg();
  22618 
  22619  using Fn = JSAtom* (*)(JSContext*, JSString*);
  22620  auto* ool =
  22621      oolCallVM<Fn, js::AtomizeString>(ins, ArgList(str), StoreRegisterTo(str));
  22622 
  22623  masm.toHashableValue(input, output, tempFloat, ool->entry(), ool->rejoin());
  22624 }
  22625 
  22626 void CodeGenerator::visitHashNonGCThing(LHashNonGCThing* ins) {
  22627  ValueOperand input = ToValue(ins->input());
  22628  Register temp = ToRegister(ins->temp0());
  22629  Register output = ToRegister(ins->output());
  22630 
  22631  masm.prepareHashNonGCThing(input, output, temp);
  22632 }
  22633 
  22634 void CodeGenerator::visitHashString(LHashString* ins) {
  22635  Register input = ToRegister(ins->input());
  22636  Register temp = ToRegister(ins->temp0());
  22637  Register output = ToRegister(ins->output());
  22638 
  22639  masm.prepareHashString(input, output, temp);
  22640 }
  22641 
  22642 void CodeGenerator::visitHashSymbol(LHashSymbol* ins) {
  22643  Register input = ToRegister(ins->input());
  22644  Register output = ToRegister(ins->output());
  22645 
  22646  masm.prepareHashSymbol(input, output);
  22647 }
  22648 
  22649 void CodeGenerator::visitHashBigInt(LHashBigInt* ins) {
  22650  Register input = ToRegister(ins->input());
  22651  Register temp0 = ToRegister(ins->temp0());
  22652  Register temp1 = ToRegister(ins->temp1());
  22653  Register temp2 = ToRegister(ins->temp2());
  22654  Register output = ToRegister(ins->output());
  22655 
  22656  masm.prepareHashBigInt(input, output, temp0, temp1, temp2);
  22657 }
  22658 
  22659 void CodeGenerator::visitHashObject(LHashObject* ins) {
  22660  Register setObj = ToRegister(ins->setObject());
  22661  ValueOperand input = ToValue(ins->input());
  22662  Register temp0 = ToRegister(ins->temp0());
  22663  Register temp1 = ToRegister(ins->temp1());
  22664  Register temp2 = ToRegister(ins->temp2());
  22665  Register temp3 = ToRegister(ins->temp3());
  22666  Register output = ToRegister(ins->output());
  22667 
  22668  masm.prepareHashObject(setObj, input, output, temp0, temp1, temp2, temp3);
  22669 }
  22670 
  22671 void CodeGenerator::visitHashValue(LHashValue* ins) {
  22672  Register setObj = ToRegister(ins->setObject());
  22673  ValueOperand input = ToValue(ins->input());
  22674  Register temp0 = ToRegister(ins->temp0());
  22675  Register temp1 = ToRegister(ins->temp1());
  22676  Register temp2 = ToRegister(ins->temp2());
  22677  Register temp3 = ToRegister(ins->temp3());
  22678  Register output = ToRegister(ins->output());
  22679 
  22680  masm.prepareHashValue(setObj, input, output, temp0, temp1, temp2, temp3);
  22681 }
  22682 
  22683 void CodeGenerator::visitSetObjectHasNonBigInt(LSetObjectHasNonBigInt* ins) {
  22684  Register setObj = ToRegister(ins->setObject());
  22685  ValueOperand input = ToValue(ins->value());
  22686  Register hash = ToRegister(ins->hash());
  22687  Register temp0 = ToRegister(ins->temp0());
  22688  Register temp1 = ToRegister(ins->temp1());
  22689  Register output = ToRegister(ins->output());
  22690 
  22691  masm.setObjectHasNonBigInt(setObj, input, hash, output, temp0, temp1);
  22692 }
  22693 
  22694 void CodeGenerator::visitSetObjectHasBigInt(LSetObjectHasBigInt* ins) {
  22695  Register setObj = ToRegister(ins->setObject());
  22696  ValueOperand input = ToValue(ins->value());
  22697  Register hash = ToRegister(ins->hash());
  22698  Register temp0 = ToRegister(ins->temp0());
  22699  Register temp1 = ToRegister(ins->temp1());
  22700  Register temp2 = ToRegister(ins->temp2());
  22701  Register temp3 = ToRegister(ins->temp3());
  22702  Register output = ToRegister(ins->output());
  22703 
  22704  masm.setObjectHasBigInt(setObj, input, hash, output, temp0, temp1, temp2,
  22705                          temp3);
  22706 }
  22707 
  22708 void CodeGenerator::visitSetObjectHasValue(LSetObjectHasValue* ins) {
  22709  Register setObj = ToRegister(ins->setObject());
  22710  ValueOperand input = ToValue(ins->value());
  22711  Register hash = ToRegister(ins->hash());
  22712  Register temp0 = ToRegister(ins->temp0());
  22713  Register temp1 = ToRegister(ins->temp1());
  22714  Register temp2 = ToRegister(ins->temp2());
  22715  Register temp3 = ToRegister(ins->temp3());
  22716  Register output = ToRegister(ins->output());
  22717 
  22718  masm.setObjectHasValue(setObj, input, hash, output, temp0, temp1, temp2,
  22719                         temp3);
  22720 }
  22721 
  22722 void CodeGenerator::visitSetObjectHasValueVMCall(
  22723    LSetObjectHasValueVMCall* ins) {
  22724  pushArg(ToValue(ins->value()));
  22725  pushArg(ToRegister(ins->setObject()));
  22726 
  22727  using Fn = bool (*)(JSContext*, Handle<SetObject*>, HandleValue, bool*);
  22728  callVM<Fn, jit::SetObjectHas>(ins);
  22729 }
  22730 
  22731 void CodeGenerator::visitSetObjectDelete(LSetObjectDelete* ins) {
  22732  pushArg(ToValue(ins->key()));
  22733  pushArg(ToRegister(ins->setObject()));
  22734  using Fn = bool (*)(JSContext*, Handle<SetObject*>, HandleValue, bool*);
  22735  callVM<Fn, jit::SetObjectDelete>(ins);
  22736 }
  22737 
  22738 void CodeGenerator::visitSetObjectAdd(LSetObjectAdd* ins) {
  22739  pushArg(ToValue(ins->key()));
  22740  pushArg(ToRegister(ins->setObject()));
  22741  using Fn = bool (*)(JSContext*, Handle<SetObject*>, HandleValue);
  22742  callVM<Fn, jit::SetObjectAdd>(ins);
  22743 }
  22744 
  22745 void CodeGenerator::visitSetObjectSize(LSetObjectSize* ins) {
  22746  Register setObj = ToRegister(ins->setObject());
  22747  Register output = ToRegister(ins->output());
  22748 
  22749  masm.loadSetObjectSize(setObj, output);
  22750 }
  22751 
  22752 void CodeGenerator::visitMapObjectHasNonBigInt(LMapObjectHasNonBigInt* ins) {
  22753  Register mapObj = ToRegister(ins->mapObject());
  22754  ValueOperand input = ToValue(ins->value());
  22755  Register hash = ToRegister(ins->hash());
  22756  Register temp0 = ToRegister(ins->temp0());
  22757  Register temp1 = ToRegister(ins->temp1());
  22758  Register output = ToRegister(ins->output());
  22759 
  22760  masm.mapObjectHasNonBigInt(mapObj, input, hash, output, temp0, temp1);
  22761 }
  22762 
  22763 void CodeGenerator::visitMapObjectHasBigInt(LMapObjectHasBigInt* ins) {
  22764  Register mapObj = ToRegister(ins->mapObject());
  22765  ValueOperand input = ToValue(ins->value());
  22766  Register hash = ToRegister(ins->hash());
  22767  Register temp0 = ToRegister(ins->temp0());
  22768  Register temp1 = ToRegister(ins->temp1());
  22769  Register temp2 = ToRegister(ins->temp2());
  22770  Register temp3 = ToRegister(ins->temp3());
  22771  Register output = ToRegister(ins->output());
  22772 
  22773  masm.mapObjectHasBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
  22774                          temp3);
  22775 }
  22776 
  22777 void CodeGenerator::visitMapObjectHasValue(LMapObjectHasValue* ins) {
  22778  Register mapObj = ToRegister(ins->mapObject());
  22779  ValueOperand input = ToValue(ins->value());
  22780  Register hash = ToRegister(ins->hash());
  22781  Register temp0 = ToRegister(ins->temp0());
  22782  Register temp1 = ToRegister(ins->temp1());
  22783  Register temp2 = ToRegister(ins->temp2());
  22784  Register temp3 = ToRegister(ins->temp3());
  22785  Register output = ToRegister(ins->output());
  22786 
  22787  masm.mapObjectHasValue(mapObj, input, hash, output, temp0, temp1, temp2,
  22788                         temp3);
  22789 }
  22790 
  22791 void CodeGenerator::visitMapObjectHasValueVMCall(
  22792    LMapObjectHasValueVMCall* ins) {
  22793  pushArg(ToValue(ins->value()));
  22794  pushArg(ToRegister(ins->mapObject()));
  22795 
  22796  using Fn = bool (*)(JSContext*, Handle<MapObject*>, HandleValue, bool*);
  22797  callVM<Fn, jit::MapObjectHas>(ins);
  22798 }
  22799 
  22800 void CodeGenerator::visitMapObjectGetNonBigInt(LMapObjectGetNonBigInt* ins) {
  22801  Register mapObj = ToRegister(ins->mapObject());
  22802  ValueOperand input = ToValue(ins->value());
  22803  Register hash = ToRegister(ins->hash());
  22804  Register temp0 = ToRegister(ins->temp0());
  22805  Register temp1 = ToRegister(ins->temp1());
  22806  ValueOperand output = ToOutValue(ins);
  22807 
  22808  masm.mapObjectGetNonBigInt(mapObj, input, hash, output, temp0, temp1,
  22809                             output.scratchReg());
  22810 }
  22811 
  22812 void CodeGenerator::visitMapObjectGetBigInt(LMapObjectGetBigInt* ins) {
  22813  Register mapObj = ToRegister(ins->mapObject());
  22814  ValueOperand input = ToValue(ins->value());
  22815  Register hash = ToRegister(ins->hash());
  22816  Register temp0 = ToRegister(ins->temp0());
  22817  Register temp1 = ToRegister(ins->temp1());
  22818  Register temp2 = ToRegister(ins->temp2());
  22819  Register temp3 = ToRegister(ins->temp3());
  22820  ValueOperand output = ToOutValue(ins);
  22821 
  22822  masm.mapObjectGetBigInt(mapObj, input, hash, output, temp0, temp1, temp2,
  22823                          temp3, output.scratchReg());
  22824 }
  22825 
  22826 void CodeGenerator::visitMapObjectGetValue(LMapObjectGetValue* ins) {
  22827  Register mapObj = ToRegister(ins->mapObject());
  22828  ValueOperand input = ToValue(ins->value());
  22829  Register hash = ToRegister(ins->hash());
  22830  Register temp0 = ToRegister(ins->temp0());
  22831  Register temp1 = ToRegister(ins->temp1());
  22832  Register temp2 = ToRegister(ins->temp2());
  22833  Register temp3 = ToRegister(ins->temp3());
  22834  ValueOperand output = ToOutValue(ins);
  22835 
  22836  masm.mapObjectGetValue(mapObj, input, hash, output, temp0, temp1, temp2,
  22837                         temp3, output.scratchReg());
  22838 }
  22839 
  22840 void CodeGenerator::visitMapObjectGetValueVMCall(
  22841    LMapObjectGetValueVMCall* ins) {
  22842  pushArg(ToValue(ins->value()));
  22843  pushArg(ToRegister(ins->mapObject()));
  22844 
  22845  using Fn =
  22846      bool (*)(JSContext*, Handle<MapObject*>, HandleValue, MutableHandleValue);
  22847  callVM<Fn, jit::MapObjectGet>(ins);
  22848 }
  22849 
  22850 void CodeGenerator::visitMapObjectDelete(LMapObjectDelete* ins) {
  22851  pushArg(ToValue(ins->key()));
  22852  pushArg(ToRegister(ins->mapObject()));
  22853  using Fn = bool (*)(JSContext*, Handle<MapObject*>, HandleValue, bool*);
  22854  callVM<Fn, jit::MapObjectDelete>(ins);
  22855 }
  22856 
  22857 void CodeGenerator::visitMapObjectSet(LMapObjectSet* ins) {
  22858  pushArg(ToValue(ins->value()));
  22859  pushArg(ToValue(ins->key()));
  22860  pushArg(ToRegister(ins->mapObject()));
  22861  using Fn = bool (*)(JSContext*, Handle<MapObject*>, HandleValue, HandleValue);
  22862  callVM<Fn, jit::MapObjectSet>(ins);
  22863 }
  22864 
  22865 void CodeGenerator::visitMapObjectSize(LMapObjectSize* ins) {
  22866  Register mapObj = ToRegister(ins->mapObject());
  22867  Register output = ToRegister(ins->output());
  22868 
  22869  masm.loadMapObjectSize(mapObj, output);
  22870 }
  22871 
  22872 void CodeGenerator::emitWeakMapLookupObject(
  22873    Register weakMap, Register obj, Register hashTable, Register hashCode,
  22874    Register scratch, Register scratch2, Register scratch3, Register scratch4,
  22875    Register scratch5, Label* found, Label* missing) {
  22876  // Load hash map if it exists. If not, jump to missing.
  22877  Address mapAddr(weakMap,
  22878                  NativeObject::getFixedSlotOffset(WeakMapObject::DataSlot));
  22879  masm.branchTestUndefined(Assembler::Equal, mapAddr, missing);
  22880  masm.loadPrivate(mapAddr, hashTable);
  22881 
  22882  // Hash and scramble address of object.
  22883 #ifdef JS_PUNBOX64
  22884  ValueOperand boxedObj(scratch);
  22885 #else
  22886  ValueOperand boxedObj(scratch, obj);
  22887 #endif
  22888  masm.tagValue(JSVAL_TYPE_OBJECT, obj, boxedObj);
  22889  masm.hashAndScrambleValue(boxedObj, hashCode, scratch2);
  22890  masm.prepareHashMFBT(hashCode, /*alreadyScrambled*/ true);
  22891 
  22892  using Entry = WeakMapObject::Map::Entry;
  22893  auto matchEntry = [&]() {
  22894    Register entry = scratch;
  22895    Label noMatch;
  22896    masm.fallibleUnboxObject(Address(entry, Entry::offsetOfKey()), scratch2,
  22897                             &noMatch);
  22898    masm.branchPtr(Assembler::Equal, obj, scratch2, found);
  22899    masm.bind(&noMatch);
  22900  };
  22901  masm.lookupMFBT<WeakMapObject::Map>(hashTable, hashCode, scratch, scratch2,
  22902                                      scratch3, scratch4, scratch5, missing,
  22903                                      matchEntry);
  22904 }
  22905 
  22906 void CodeGenerator::visitWeakMapGetObject(LWeakMapGetObject* ins) {
  22907 #ifndef JS_CODEGEN_X86
  22908  Register weakMap = ToRegister(ins->weakMap());
  22909  Register obj = ToRegister(ins->object());
  22910  Register hashTable = ToRegister(ins->temp0());
  22911  Register hashCode = ToRegister(ins->temp1());
  22912  Register scratch = ToRegister(ins->temp2());
  22913  Register scratch2 = ToRegister(ins->temp3());
  22914  Register scratch3 = ToRegister(ins->temp4());
  22915  Register scratch4 = ToRegister(ins->temp5());
  22916  Register scratch5 = ToRegister(ins->temp6());
  22917  ValueOperand output = ToOutValue(ins);
  22918 
  22919  Label found, missing;
  22920 
  22921  emitWeakMapLookupObject(weakMap, obj, hashTable, hashCode, scratch, scratch2,
  22922                          scratch3, scratch4, scratch5, &found, &missing);
  22923 
  22924  masm.bind(&found);
  22925 
  22926  using Entry = WeakMapObject::Map::Entry;
  22927  masm.loadValue(Address(scratch, Entry::offsetOfValue()), output);
  22928 
  22929  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
  22930    // Unboxed, tenured GC cell that needs to be barriered is in scratch.
  22931 
  22932    LiveRegisterSet regsToSave(RegisterSet::Volatile());
  22933    regsToSave.takeUnchecked(hashTable);
  22934    regsToSave.takeUnchecked(hashCode);
  22935    regsToSave.takeUnchecked(scratch);
  22936    regsToSave.takeUnchecked(scratch2);
  22937    regsToSave.takeUnchecked(scratch3);
  22938    regsToSave.takeUnchecked(scratch4);
  22939    regsToSave.takeUnchecked(scratch5);
  22940    masm.PushRegsInMask(regsToSave);
  22941 
  22942    using Fn = void (*)(js::gc::Cell* cell);
  22943    masm.setupAlignedABICall();
  22944    masm.passABIArg(scratch);
  22945    masm.callWithABI<Fn, js::jit::ReadBarrier>();
  22946 
  22947    masm.PopRegsInMask(regsToSave);
  22948 
  22949    masm.jump(ool.rejoin());
  22950  });
  22951  addOutOfLineCode(ool, ins->mir());
  22952 
  22953  masm.emitValueReadBarrierFastPath(output, scratch, scratch2, scratch3,
  22954                                    scratch4, scratch5, ool->entry());
  22955  masm.jump(ool->rejoin());
  22956 
  22957  masm.bind(&missing);
  22958  masm.moveValue(UndefinedValue(), output);
  22959 
  22960  masm.bind(ool->rejoin());
  22961 #else
  22962  // x86 doesn't have enough registers, so we call into the VM.
  22963  Register weakMap = ToRegister(ins->weakMap());
  22964  Register obj = ToRegister(ins->object());
  22965  Register temp = ToRegister(ins->temp0());
  22966  ValueOperand output = ToOutValue(ins);
  22967 
  22968  // The result Value will be stored on the stack.
  22969  masm.reserveStack(sizeof(Value));
  22970  masm.moveStackPtrTo(temp);
  22971 
  22972  using Fn = void (*)(WeakMapObject*, JSObject*, Value*);
  22973  masm.setupAlignedABICall();
  22974  masm.passABIArg(weakMap);
  22975  masm.passABIArg(obj);
  22976  masm.passABIArg(temp);
  22977  masm.callWithABI<Fn, js::WeakMapObject::getObject>();
  22978 
  22979  masm.Pop(output);
  22980 #endif
  22981 }
  22982 
  22983 void CodeGenerator::visitWeakMapHasObject(LWeakMapHasObject* ins) {
  22984 #ifndef JS_CODEGEN_X86
  22985  Register weakMap = ToRegister(ins->weakMap());
  22986  Register obj = ToRegister(ins->object());
  22987  Register hashTable = ToRegister(ins->temp0());
  22988  Register hashCode = ToRegister(ins->temp1());
  22989  Register scratch = ToRegister(ins->temp2());
  22990  Register scratch2 = ToRegister(ins->temp3());
  22991  Register scratch3 = ToRegister(ins->temp4());
  22992  Register scratch4 = ToRegister(ins->temp5());
  22993  Register scratch5 = ToRegister(ins->temp6());
  22994  Register output = ToRegister(ins->output());
  22995 
  22996  Label found, missing, done;
  22997 
  22998  emitWeakMapLookupObject(weakMap, obj, hashTable, hashCode, scratch, scratch2,
  22999                          scratch3, scratch4, scratch5, &found, &missing);
  23000 
  23001  masm.bind(&found);
  23002  masm.move32(Imm32(1), output);
  23003  masm.jump(&done);
  23004 
  23005  masm.bind(&missing);
  23006  masm.move32(Imm32(0), output);
  23007  masm.bind(&done);
  23008 #else
  23009  // x86 doesn't have enough registers, so we call into the VM.
  23010  Register weakMap = ToRegister(ins->weakMap());
  23011  Register obj = ToRegister(ins->object());
  23012  Register output = ToRegister(ins->output());
  23013 
  23014  using Fn = bool (*)(WeakMapObject*, JSObject*);
  23015  masm.setupAlignedABICall();
  23016  masm.passABIArg(weakMap);
  23017  masm.passABIArg(obj);
  23018  masm.callWithABI<Fn, js::WeakMapObject::hasObject>();
  23019  masm.storeCallBoolResult(output);
  23020 #endif
  23021 }
  23022 
  23023 void CodeGenerator::visitWeakSetHasObject(LWeakSetHasObject* ins) {
  23024  Register weakSet = ToRegister(ins->weakSet());
  23025  Register obj = ToRegister(ins->object());
  23026  Register output = ToRegister(ins->output());
  23027 
  23028  using Fn = bool (*)(WeakSetObject*, JSObject*);
  23029  masm.setupAlignedABICall();
  23030  masm.passABIArg(weakSet);
  23031  masm.passABIArg(obj);
  23032  masm.callWithABI<Fn, js::WeakSetObject::hasObject>();
  23033  masm.storeCallBoolResult(output);
  23034 }
  23035 
  23036 void CodeGenerator::visitDateFillLocalTimeSlots(LDateFillLocalTimeSlots* ins) {
  23037  Register date = ToRegister(ins->date());
  23038  Register temp = ToRegister(ins->temp0());
  23039 
  23040  masm.dateFillLocalTimeSlots(date, temp, liveVolatileRegs(ins));
  23041 }
  23042 
  23043 void CodeGenerator::visitDateHoursFromSecondsIntoYear(
  23044    LDateHoursFromSecondsIntoYear* ins) {
  23045  auto secondsIntoYear = ToValue(ins->secondsIntoYear());
  23046  auto output = ToOutValue(ins);
  23047  Register temp0 = ToRegister(ins->temp0());
  23048  Register temp1 = ToRegister(ins->temp1());
  23049 
  23050  masm.dateHoursFromSecondsIntoYear(secondsIntoYear, output, temp0, temp1);
  23051 }
  23052 
  23053 void CodeGenerator::visitDateMinutesFromSecondsIntoYear(
  23054    LDateMinutesFromSecondsIntoYear* ins) {
  23055  auto secondsIntoYear = ToValue(ins->secondsIntoYear());
  23056  auto output = ToOutValue(ins);
  23057  Register temp0 = ToRegister(ins->temp0());
  23058  Register temp1 = ToRegister(ins->temp1());
  23059 
  23060  masm.dateMinutesFromSecondsIntoYear(secondsIntoYear, output, temp0, temp1);
  23061 }
  23062 
  23063 void CodeGenerator::visitDateSecondsFromSecondsIntoYear(
  23064    LDateSecondsFromSecondsIntoYear* ins) {
  23065  auto secondsIntoYear = ToValue(ins->secondsIntoYear());
  23066  auto output = ToOutValue(ins);
  23067  Register temp0 = ToRegister(ins->temp0());
  23068  Register temp1 = ToRegister(ins->temp1());
  23069 
  23070  masm.dateSecondsFromSecondsIntoYear(secondsIntoYear, output, temp0, temp1);
  23071 }
  23072 
  23073 void CodeGenerator::visitCanonicalizeNaND(LCanonicalizeNaND* ins) {
  23074  auto output = ToFloatRegister(ins->output());
  23075  MOZ_ASSERT(output == ToFloatRegister(ins->input()));
  23076 
  23077  masm.canonicalizeDouble(output);
  23078 }
  23079 
  23080 void CodeGenerator::visitCanonicalizeNaNF(LCanonicalizeNaNF* ins) {
  23081  auto output = ToFloatRegister(ins->output());
  23082  MOZ_ASSERT(output == ToFloatRegister(ins->input()));
  23083 
  23084  masm.canonicalizeFloat(output);
  23085 }
  23086 
  23087 template <size_t NumDefs>
  23088 void CodeGenerator::emitIonToWasmCallBase(LIonToWasmCallBase<NumDefs>* lir) {
  23089  wasm::JitCallStackArgVector stackArgs;
  23090  masm.propagateOOM(stackArgs.reserve(lir->numOperands()));
  23091  if (masm.oom()) {
  23092    return;
  23093  }
  23094 
  23095  MIonToWasmCall* mir = lir->mir();
  23096  const wasm::FuncExport& funcExport = mir->funcExport();
  23097  const wasm::FuncType& sig =
  23098      mir->instance()->code().codeMeta().getFuncType(funcExport.funcIndex());
  23099 
  23100  ABIArgGenerator abi(ABIKind::Wasm);
  23101  for (size_t i = 0; i < lir->numOperands(); i++) {
  23102    MIRType argMir;
  23103    switch (sig.args()[i].kind()) {
  23104      case wasm::ValType::I32:
  23105      case wasm::ValType::I64:
  23106      case wasm::ValType::F32:
  23107      case wasm::ValType::F64:
  23108        argMir = sig.args()[i].toMIRType();
  23109        break;
  23110      case wasm::ValType::V128:
  23111        MOZ_CRASH("unexpected argument type when calling from ion to wasm");
  23112      case wasm::ValType::Ref:
  23113        // temporarilyUnsupportedReftypeForEntry() restricts args to externref
  23114        MOZ_RELEASE_ASSERT(sig.args()[i].refType().isExtern());
  23115        // Argument is boxed on the JS side to an anyref, so passed as a
  23116        // pointer here.
  23117        argMir = sig.args()[i].toMIRType();
  23118        break;
  23119    }
  23120 
  23121    ABIArg arg = abi.next(argMir);
  23122    switch (arg.kind()) {
  23123      case ABIArg::GPR:
  23124      case ABIArg::FPU: {
  23125        MOZ_ASSERT(ToAnyRegister(lir->getOperand(i)) == arg.reg());
  23126        stackArgs.infallibleEmplaceBack(wasm::JitCallStackArg());
  23127        break;
  23128      }
  23129      case ABIArg::Stack: {
  23130        const LAllocation* larg = lir->getOperand(i);
  23131        if (larg->isConstant()) {
  23132          stackArgs.infallibleEmplaceBack(ToInt32(larg));
  23133        } else if (larg->isGeneralReg()) {
  23134          stackArgs.infallibleEmplaceBack(ToRegister(larg));
  23135        } else if (larg->isFloatReg()) {
  23136          stackArgs.infallibleEmplaceBack(ToFloatRegister(larg));
  23137        } else {
  23138          // Always use the stack pointer here because GenerateDirectCallFromJit
  23139          // depends on this.
  23140          Address addr = ToAddress<BaseRegForAddress::SP>(larg);
  23141          stackArgs.infallibleEmplaceBack(addr);
  23142        }
  23143        break;
  23144      }
  23145 #ifdef JS_CODEGEN_REGISTER_PAIR
  23146      case ABIArg::GPR_PAIR: {
  23147        MOZ_CRASH(
  23148            "no way to pass i64, and wasm uses hardfp for function calls");
  23149      }
  23150 #endif
  23151      case ABIArg::Uninitialized: {
  23152        MOZ_CRASH("Uninitialized ABIArg kind");
  23153      }
  23154    }
  23155  }
  23156 
  23157  const wasm::ValTypeVector& results = sig.results();
  23158  if (results.length() == 0) {
  23159    MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
  23160  } else {
  23161    MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
  23162    switch (results[0].kind()) {
  23163      case wasm::ValType::I32:
  23164        MOZ_ASSERT(lir->mir()->type() == MIRType::Int32);
  23165        MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
  23166        break;
  23167      case wasm::ValType::I64:
  23168        MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
  23169        MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
  23170        break;
  23171      case wasm::ValType::F32:
  23172        MOZ_ASSERT(lir->mir()->type() == MIRType::Float32);
  23173        MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnFloat32Reg);
  23174        break;
  23175      case wasm::ValType::F64:
  23176        MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
  23177        MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
  23178        break;
  23179      case wasm::ValType::V128:
  23180        MOZ_CRASH("unexpected return type when calling from ion to wasm");
  23181      case wasm::ValType::Ref:
  23182        // The wasm stubs layer unboxes anything that needs to be unboxed
  23183        // and leaves it in a Value.  A FuncRef/EqRef we could in principle
  23184        // leave it as a raw object pointer but for now it complicates the
  23185        // API to do so.
  23186        MOZ_ASSERT(lir->mir()->type() == MIRType::Value);
  23187        break;
  23188    }
  23189  }
  23190 
  23191  WasmInstanceObject* instObj = lir->mir()->instanceObject();
  23192 
  23193  Register scratch = ToRegister(lir->temp());
  23194 
  23195  uint32_t callOffset;
  23196  ensureOsiSpace();
  23197  GenerateDirectCallFromJit(masm, funcExport, instObj->instance(), stackArgs,
  23198                            scratch, &callOffset);
  23199 
  23200  // Add the instance object to the constant pool, so it is transferred to
  23201  // the owning IonScript and so that it gets traced as long as the IonScript
  23202  // lives.
  23203 
  23204  uint32_t unused;
  23205  masm.propagateOOM(graph.addConstantToPool(ObjectValue(*instObj), &unused));
  23206 
  23207  markSafepointAt(callOffset, lir);
  23208 }
  23209 
  23210 void CodeGenerator::visitIonToWasmCall(LIonToWasmCall* lir) {
  23211  emitIonToWasmCallBase(lir);
  23212 }
  23213 void CodeGenerator::visitIonToWasmCallV(LIonToWasmCallV* lir) {
  23214  emitIonToWasmCallBase(lir);
  23215 }
  23216 void CodeGenerator::visitIonToWasmCallI64(LIonToWasmCallI64* lir) {
  23217  emitIonToWasmCallBase(lir);
  23218 }
  23219 
  23220 void CodeGenerator::visitWasmNullConstant(LWasmNullConstant* lir) {
  23221  masm.xorPtr(ToRegister(lir->output()), ToRegister(lir->output()));
  23222 }
  23223 
  23224 void CodeGenerator::visitWasmFence(LWasmFence* lir) {
  23225  MOZ_ASSERT(gen->compilingWasm());
  23226  masm.memoryBarrier(MemoryBarrier::Full());
  23227 }
  23228 
  23229 void CodeGenerator::visitWasmAnyRefFromJSValue(LWasmAnyRefFromJSValue* lir) {
  23230  ValueOperand input = ToValue(lir->def());
  23231  Register output = ToRegister(lir->output());
  23232  FloatRegister tempFloat = ToFloatRegister(lir->temp0());
  23233 
  23234  using Fn = JSObject* (*)(JSContext * cx, HandleValue value);
  23235  OutOfLineCode* oolBoxValue = oolCallVM<Fn, wasm::AnyRef::boxValue>(
  23236      lir, ArgList(input), StoreRegisterTo(output));
  23237  masm.convertValueToWasmAnyRef(input, output, tempFloat, oolBoxValue->entry());
  23238  masm.bind(oolBoxValue->rejoin());
  23239 }
  23240 
  23241 void CodeGenerator::visitWasmAnyRefFromJSObject(LWasmAnyRefFromJSObject* lir) {
  23242  Register input = ToRegister(lir->def());
  23243  Register output = ToRegister(lir->output());
  23244  masm.convertObjectToWasmAnyRef(input, output);
  23245 }
  23246 
  23247 void CodeGenerator::visitWasmAnyRefFromJSString(LWasmAnyRefFromJSString* lir) {
  23248  Register input = ToRegister(lir->def());
  23249  Register output = ToRegister(lir->output());
  23250  masm.convertStringToWasmAnyRef(input, output);
  23251 }
  23252 
  23253 void CodeGenerator::visitWasmAnyRefIsJSString(LWasmAnyRefIsJSString* lir) {
  23254  Register input = ToRegister(lir->input());
  23255  Register output = ToRegister(lir->output());
  23256  Register temp = ToRegister(lir->temp0());
  23257  Label fallthrough;
  23258  Label isJSString;
  23259  masm.branchWasmAnyRefIsJSString(true, input, temp, &isJSString);
  23260  masm.move32(Imm32(0), output);
  23261  masm.jump(&fallthrough);
  23262  masm.bind(&isJSString);
  23263  masm.move32(Imm32(1), output);
  23264  masm.bind(&fallthrough);
  23265 }
  23266 
  23267 void CodeGenerator::visitWasmTrapIfAnyRefIsNotJSString(
  23268    LWasmTrapIfAnyRefIsNotJSString* lir) {
  23269  Register input = ToRegister(lir->input());
  23270  Register temp = ToRegister(lir->temp0());
  23271  Label isJSString;
  23272  masm.branchWasmAnyRefIsJSString(true, input, temp, &isJSString);
  23273  masm.wasmTrap(lir->mir()->trap(), lir->mir()->trapSiteDesc());
  23274  masm.bind(&isJSString);
  23275 }
  23276 
  23277 void CodeGenerator::visitWasmAnyRefJSStringLength(
  23278    LWasmAnyRefJSStringLength* lir) {
  23279  Register input = ToRegister(lir->input());
  23280  Register output = ToRegister(lir->output());
  23281  Register temp = ToRegister(lir->temp0());
  23282  Label isJSString;
  23283  masm.branchWasmAnyRefIsJSString(true, input, temp, &isJSString);
  23284  masm.wasmTrap(lir->mir()->trap(), lir->mir()->trapSiteDesc());
  23285  masm.bind(&isJSString);
  23286  masm.untagWasmAnyRef(input, temp, wasm::AnyRefTag::String);
  23287  masm.loadStringLength(temp, output);
  23288 }
  23289 
  23290 void CodeGenerator::visitWasmNewI31Ref(LWasmNewI31Ref* lir) {
  23291  if (lir->value()->isConstant()) {
  23292    // i31ref are often created with constants. If that's the case we will
  23293    // do the operation statically here. This is similar to what is done
  23294    // in masm.truncate32ToWasmI31Ref.
  23295    Register output = ToRegister(lir->output());
  23296    uint32_t value =
  23297        static_cast<uint32_t>(lir->value()->toConstant()->toInt32());
  23298    uintptr_t ptr = wasm::AnyRef::fromUint32Truncate(value).rawValue();
  23299    masm.movePtr(ImmWord(ptr), output);
  23300  } else {
  23301    Register value = ToRegister(lir->value());
  23302    Register output = ToRegister(lir->output());
  23303    masm.truncate32ToWasmI31Ref(value, output);
  23304  }
  23305 }
  23306 
  23307 void CodeGenerator::visitWasmI31RefGet(LWasmI31RefGet* lir) {
  23308  Register value = ToRegister(lir->input());
  23309  Register output = ToRegister(lir->output());
  23310  if (lir->mir()->wideningOp() == wasm::FieldWideningOp::Signed) {
  23311    masm.convertWasmI31RefTo32Signed(value, output);
  23312  } else {
  23313    masm.convertWasmI31RefTo32Unsigned(value, output);
  23314  }
  23315 }
  23316 
  23317 #ifdef ENABLE_EXPLICIT_RESOURCE_MANAGEMENT
  23318 void CodeGenerator::visitAddDisposableResource(LAddDisposableResource* lir) {
  23319  Register environment = ToRegister(lir->environment());
  23320  ValueOperand resource = ToValue(lir->resource());
  23321  ValueOperand method = ToValue(lir->method());
  23322  Register needsClosure = ToRegister(lir->needsClosure());
  23323  uint8_t hint = lir->mir()->hint();
  23324 
  23325  pushArg(Imm32(hint));
  23326  pushArg(needsClosure);
  23327  pushArg(method);
  23328  pushArg(resource);
  23329  pushArg(environment);
  23330 
  23331  using Fn = bool (*)(JSContext*, JS::Handle<JSObject*>, JS::Handle<JS::Value>,
  23332                      JS::Handle<JS::Value>, bool, UsingHint);
  23333  callVM<Fn, js::AddDisposableResourceToCapability>(lir);
  23334 }
  23335 
  23336 void CodeGenerator::visitTakeDisposeCapability(LTakeDisposeCapability* lir) {
  23337  Register environment = ToRegister(lir->environment());
  23338  ValueOperand output = ToOutValue(lir);
  23339 
  23340  Address capabilityAddr(
  23341      environment, DisposableEnvironmentObject::offsetOfDisposeCapability());
  23342  emitPreBarrier(capabilityAddr);
  23343  masm.loadValue(capabilityAddr, output);
  23344  masm.storeValue(JS::UndefinedValue(), capabilityAddr);
  23345 }
  23346 
  23347 void CodeGenerator::visitCreateSuppressedError(LCreateSuppressedError* lir) {
  23348  ValueOperand error = ToValue(lir->error());
  23349  ValueOperand suppressed = ToValue(lir->suppressed());
  23350 
  23351  pushArg(suppressed);
  23352  pushArg(error);
  23353 
  23354  using Fn = ErrorObject* (*)(JSContext*, JS::Handle<JS::Value>,
  23355                              JS::Handle<JS::Value>);
  23356  callVM<Fn, js::CreateSuppressedError>(lir);
  23357 }
  23358 #endif
  23359 
  23360 #ifdef FUZZING_JS_FUZZILLI
  23361 void CodeGenerator::emitFuzzilliHashObject(LInstruction* lir, Register obj,
  23362                                           Register output) {
  23363  using Fn = void (*)(JSContext* cx, JSObject* obj, uint32_t* out);
  23364  OutOfLineCode* ool = oolCallVM<Fn, FuzzilliHashObjectInl>(
  23365      lir, ArgList(obj), StoreRegisterTo(output));
  23366 
  23367  masm.jump(ool->entry());
  23368  masm.bind(ool->rejoin());
  23369 }
  23370 
  23371 void CodeGenerator::emitFuzzilliHashBigInt(LInstruction* lir, Register bigInt,
  23372                                           Register output) {
  23373  LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
  23374  volatileRegs.takeUnchecked(output);
  23375 
  23376  masm.PushRegsInMask(volatileRegs);
  23377 
  23378  using Fn = uint32_t (*)(BigInt* bigInt);
  23379  masm.setupUnalignedABICall(output);
  23380  masm.passABIArg(bigInt);
  23381  masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
  23382  masm.storeCallInt32Result(output);
  23383 
  23384  masm.PopRegsInMask(volatileRegs);
  23385 }
  23386 
  23387 void CodeGenerator::visitFuzzilliHashV(LFuzzilliHashV* ins) {
  23388  ValueOperand value = ToValue(ins->value());
  23389 
  23390  FloatRegister scratchFloat = ToFloatRegister(ins->temp1());
  23391  Register scratch = ToRegister(ins->temp0());
  23392  Register output = ToRegister(ins->output());
  23393  MOZ_ASSERT(scratch != output);
  23394 
  23395  Label hashDouble, done;
  23396 
  23397  Label isInt32, isDouble, isNull, isUndefined, isBoolean, isBigInt, isObject;
  23398  {
  23399    ScratchTagScope tag(masm, value);
  23400    masm.splitTagForTest(value, tag);
  23401 
  23402    masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
  23403    masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
  23404    masm.branchTestNull(Assembler::Equal, tag, &isNull);
  23405    masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
  23406    masm.branchTestBoolean(Assembler::Equal, tag, &isBoolean);
  23407    masm.branchTestBigInt(Assembler::Equal, tag, &isBigInt);
  23408    masm.branchTestObject(Assembler::Equal, tag, &isObject);
  23409 
  23410    // Symbol or String.
  23411    masm.move32(Imm32(0), output);
  23412    masm.jump(&done);
  23413  }
  23414 
  23415  masm.bind(&isInt32);
  23416  {
  23417    masm.unboxInt32(value, scratch);
  23418    masm.convertInt32ToDouble(scratch, scratchFloat);
  23419    masm.jump(&hashDouble);
  23420  }
  23421 
  23422  masm.bind(&isDouble);
  23423  {
  23424    masm.unboxDouble(value, scratchFloat);
  23425    masm.jump(&hashDouble);
  23426  }
  23427 
  23428  masm.bind(&isNull);
  23429  {
  23430    masm.loadConstantDouble(1.0, scratchFloat);
  23431    masm.jump(&hashDouble);
  23432  }
  23433 
  23434  masm.bind(&isUndefined);
  23435  {
  23436    masm.loadConstantDouble(2.0, scratchFloat);
  23437    masm.jump(&hashDouble);
  23438  }
  23439 
  23440  masm.bind(&isBoolean);
  23441  {
  23442    masm.unboxBoolean(value, scratch);
  23443    masm.add32(Imm32(3), scratch);
  23444    masm.convertInt32ToDouble(scratch, scratchFloat);
  23445    masm.jump(&hashDouble);
  23446  }
  23447 
  23448  masm.bind(&isBigInt);
  23449  {
  23450    masm.unboxBigInt(value, scratch);
  23451    emitFuzzilliHashBigInt(ins, scratch, output);
  23452    masm.jump(&done);
  23453  }
  23454 
  23455  masm.bind(&isObject);
  23456  {
  23457    masm.unboxObject(value, scratch);
  23458    emitFuzzilliHashObject(ins, scratch, output);
  23459    masm.jump(&done);
  23460  }
  23461 
  23462  masm.bind(&hashDouble);
  23463  masm.fuzzilliHashDouble(scratchFloat, output, scratch);
  23464 
  23465  masm.bind(&done);
  23466 }
  23467 
  23468 void CodeGenerator::visitFuzzilliHashT(LFuzzilliHashT* ins) {
  23469  const LAllocation* value = ins->value();
  23470  MIRType mirType = ins->mir()->getOperand(0)->type();
  23471 
  23472  Register scratch = ToTempRegisterOrInvalid(ins->temp0());
  23473  FloatRegister scratchFloat = ToTempFloatRegisterOrInvalid(ins->temp1());
  23474 
  23475  Register output = ToRegister(ins->output());
  23476  MOZ_ASSERT(scratch != output);
  23477 
  23478  switch (mirType) {
  23479    case MIRType::Undefined: {
  23480      masm.loadConstantDouble(2.0, scratchFloat);
  23481      masm.fuzzilliHashDouble(scratchFloat, output, scratch);
  23482      break;
  23483    }
  23484 
  23485    case MIRType::Null: {
  23486      masm.loadConstantDouble(1.0, scratchFloat);
  23487      masm.fuzzilliHashDouble(scratchFloat, output, scratch);
  23488      break;
  23489    }
  23490 
  23491    case MIRType::Int32: {
  23492      masm.move32(ToRegister(value), scratch);
  23493      masm.convertInt32ToDouble(scratch, scratchFloat);
  23494      masm.fuzzilliHashDouble(scratchFloat, output, scratch);
  23495      break;
  23496    }
  23497 
  23498    case MIRType::Double: {
  23499      masm.moveDouble(ToFloatRegister(value), scratchFloat);
  23500      masm.fuzzilliHashDouble(scratchFloat, output, scratch);
  23501      break;
  23502    }
  23503 
  23504    case MIRType::Float32: {
  23505      masm.convertFloat32ToDouble(ToFloatRegister(value), scratchFloat);
  23506      masm.fuzzilliHashDouble(scratchFloat, output, scratch);
  23507      break;
  23508    }
  23509 
  23510    case MIRType::Boolean: {
  23511      masm.add32(Imm32(3), ToRegister(value), scratch);
  23512      masm.convertInt32ToDouble(scratch, scratchFloat);
  23513      masm.fuzzilliHashDouble(scratchFloat, output, scratch);
  23514      break;
  23515    }
  23516 
  23517    case MIRType::BigInt: {
  23518      emitFuzzilliHashBigInt(ins, ToRegister(value), output);
  23519      break;
  23520    }
  23521 
  23522    case MIRType::Object: {
  23523      emitFuzzilliHashObject(ins, ToRegister(value), output);
  23524      break;
  23525    }
  23526 
  23527    default:
  23528      MOZ_CRASH("unexpected type");
  23529  }
  23530 }
  23531 
  23532 void CodeGenerator::visitFuzzilliHashStore(LFuzzilliHashStore* ins) {
  23533  Register value = ToRegister(ins->value());
  23534  Register temp0 = ToRegister(ins->temp0());
  23535  Register temp1 = ToRegister(ins->temp1());
  23536 
  23537  masm.fuzzilliStoreHash(value, temp0, temp1);
  23538 }
  23539 #endif
  23540 
  23541 static_assert(!std::is_polymorphic_v<CodeGenerator>,
  23542              "CodeGenerator should not have any virtual methods");
  23543 
  23544 }  // namespace jit
  23545 }  // namespace js