tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

BaselineCacheIRCompiler.cpp (140272B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/BaselineCacheIRCompiler.h"
      8 
      9 #include "mozilla/RandomNum.h"
     10 
     11 #include "gc/GC.h"
     12 #include "jit/CacheIR.h"
     13 #include "jit/CacheIRAOT.h"
     14 #include "jit/CacheIRSpewer.h"
     15 #include "jit/CacheIRWriter.h"
     16 #include "jit/JitFrames.h"
     17 #include "jit/JitRuntime.h"
     18 #include "jit/JitZone.h"
     19 #include "jit/Linker.h"
     20 #include "jit/MoveEmitter.h"
     21 #include "jit/RegExpStubConstants.h"
     22 #include "jit/SharedICHelpers.h"
     23 #include "jit/StubFolding.h"
     24 #include "jit/VMFunctions.h"
     25 #include "js/experimental/JitInfo.h"  // JSJitInfo
     26 #include "js/friend/DOMProxy.h"       // JS::ExpandoAndGeneration
     27 #include "proxy/DeadObjectProxy.h"
     28 #include "proxy/Proxy.h"
     29 #include "util/Unicode.h"
     30 #include "vm/PortableBaselineInterpret.h"
     31 #include "vm/StaticStrings.h"
     32 
     33 #include "jit/JitScript-inl.h"
     34 #include "jit/MacroAssembler-inl.h"
     35 #include "jit/SharedICHelpers-inl.h"
     36 #include "jit/VMFunctionList-inl.h"
     37 
     38 using namespace js;
     39 using namespace js::jit;
     40 
     41 using mozilla::Maybe;
     42 
     43 using JS::ExpandoAndGeneration;
     44 
     45 namespace js {
     46 namespace jit {
     47 
     48 static uint32_t GetICStackValueOffset() {
     49  uint32_t offset = ICStackValueOffset;
     50  if (JitOptions.enableICFramePointers) {
     51 #ifdef JS_USE_LINK_REGISTER
     52    // The frame pointer and return address are also on the stack.
     53    offset += 2 * sizeof(uintptr_t);
     54 #else
     55    // The frame pointer is also on the stack.
     56    offset += sizeof(uintptr_t);
     57 #endif
     58  }
     59  return offset;
     60 }
     61 
     62 static void PushICFrameRegs(MacroAssembler& masm) {
     63  MOZ_ASSERT(JitOptions.enableICFramePointers);
     64 #ifdef JS_USE_LINK_REGISTER
     65  masm.pushReturnAddress();
     66 #endif
     67  masm.push(FramePointer);
     68 }
     69 
     70 static void PopICFrameRegs(MacroAssembler& masm) {
     71  MOZ_ASSERT(JitOptions.enableICFramePointers);
     72  masm.pop(FramePointer);
     73 #ifdef JS_USE_LINK_REGISTER
     74  masm.popReturnAddress();
     75 #endif
     76 }
     77 
     78 Address CacheRegisterAllocator::addressOf(MacroAssembler& masm,
     79                                          BaselineFrameSlot slot) const {
     80  uint32_t offset =
     81      stackPushed_ + GetICStackValueOffset() + slot.slot() * sizeof(JS::Value);
     82  return Address(masm.getStackPointer(), offset);
     83 }
     84 BaseValueIndex CacheRegisterAllocator::addressOf(MacroAssembler& masm,
     85                                                 Register argcReg,
     86                                                 BaselineFrameSlot slot) const {
     87  uint32_t offset =
     88      stackPushed_ + GetICStackValueOffset() + slot.slot() * sizeof(JS::Value);
     89  return BaseValueIndex(masm.getStackPointer(), argcReg, offset);
     90 }
     91 
     92 // BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code.
     93 BaselineCacheIRCompiler::BaselineCacheIRCompiler(JSContext* cx,
     94                                                 TempAllocator& alloc,
     95                                                 const CacheIRWriter& writer,
     96                                                 uint32_t stubDataOffset)
     97    : CacheIRCompiler(cx, alloc, writer, stubDataOffset, Mode::Baseline,
     98                      StubFieldPolicy::Address),
     99      makesGCCalls_(false) {}
    100 
    101 // AutoStubFrame methods
    102 AutoStubFrame::AutoStubFrame(BaselineCacheIRCompiler& compiler)
    103    : compiler(compiler)
    104 #ifdef DEBUG
    105      ,
    106      framePushedAtEnterStubFrame_(0)
    107 #endif
    108 {
    109 }
    110 void AutoStubFrame::enter(MacroAssembler& masm, Register scratch) {
    111  MOZ_ASSERT(compiler.allocator.stackPushed() == 0);
    112 
    113  if (JitOptions.enableICFramePointers) {
    114    // If we have already pushed the frame pointer, pop it
    115    // before creating the stub frame.
    116    PopICFrameRegs(masm);
    117  }
    118  EmitBaselineEnterStubFrame(masm, scratch);
    119 
    120 #ifdef DEBUG
    121  framePushedAtEnterStubFrame_ = masm.framePushed();
    122 #endif
    123 
    124  MOZ_ASSERT(!compiler.enteredStubFrame_);
    125  compiler.enteredStubFrame_ = true;
    126 
    127  // All current uses of this are to call VM functions that can GC.
    128  compiler.makesGCCalls_ = true;
    129 }
    130 void AutoStubFrame::leave(MacroAssembler& masm) {
    131  MOZ_ASSERT(compiler.enteredStubFrame_);
    132  compiler.enteredStubFrame_ = false;
    133 
    134 #ifdef DEBUG
    135  masm.setFramePushed(framePushedAtEnterStubFrame_);
    136 #endif
    137 
    138  EmitBaselineLeaveStubFrame(masm);
    139  if (JitOptions.enableICFramePointers) {
    140    // We will pop the frame pointer when we return,
    141    // so we have to push it again now.
    142    PushICFrameRegs(masm);
    143  }
    144 }
    145 
    146 void AutoStubFrame::pushInlinedICScript(MacroAssembler& masm,
    147                                        Address icScriptAddr) {
    148  // The baseline prologue expects the inlined ICScript to
    149  // be at a fixed offset from the stub frame pointer.
    150  MOZ_ASSERT(compiler.localTracingSlots_ == 0);
    151  masm.Push(icScriptAddr);
    152 
    153 #ifndef JS_64BIT
    154  // We expect the stack to be Value-aligned when we start pushing arguments.
    155  // We are already aligned when we enter the stub frame. Pushing a 32-bit
    156  // ICScript requires an additional adjustment to maintain alignment.
    157  static_assert(sizeof(Value) == 2 * sizeof(uintptr_t));
    158  masm.subFromStackPtr(Imm32(sizeof(uintptr_t)));
    159 #endif
    160 }
    161 
    162 void AutoStubFrame::storeTracedValue(MacroAssembler& masm, ValueOperand value) {
    163  MOZ_ASSERT(compiler.localTracingSlots_ < 255);
    164  MOZ_ASSERT(masm.framePushed() - framePushedAtEnterStubFrame_ ==
    165             compiler.localTracingSlots_ * sizeof(Value));
    166  masm.Push(value);
    167  compiler.localTracingSlots_++;
    168 }
    169 
    170 void AutoStubFrame::loadTracedValue(MacroAssembler& masm, uint8_t slotIndex,
    171                                    ValueOperand value) {
    172  MOZ_ASSERT(slotIndex <= compiler.localTracingSlots_);
    173  int32_t offset = BaselineStubFrameLayout::LocallyTracedValueOffset +
    174                   slotIndex * sizeof(Value);
    175  masm.loadValue(Address(FramePointer, -offset), value);
    176 }
    177 
    178 #ifdef DEBUG
    179 AutoStubFrame::~AutoStubFrame() { MOZ_ASSERT(!compiler.enteredStubFrame_); }
    180 #endif
    181 
    182 }  // namespace jit
    183 }  // namespace js
    184 
    185 bool BaselineCacheIRCompiler::makesGCCalls() const { return makesGCCalls_; }
    186 
    187 Address BaselineCacheIRCompiler::stubAddress(uint32_t offset) const {
    188  return Address(ICStubReg, stubDataOffset_ + offset);
    189 }
    190 
    191 template <typename Fn, Fn fn>
    192 void BaselineCacheIRCompiler::callVM(MacroAssembler& masm) {
    193  VMFunctionId id = VMFunctionToId<Fn, fn>::id;
    194  callVMInternal(masm, id);
    195 }
    196 
    197 JitCode* BaselineCacheIRCompiler::compile() {
    198  AutoCreatedBy acb(masm, "BaselineCacheIRCompiler::compile");
    199 
    200 #ifndef JS_USE_LINK_REGISTER
    201  masm.adjustFrame(sizeof(intptr_t));
    202 #endif
    203 #ifdef JS_CODEGEN_ARM
    204  AutoNonDefaultSecondScratchRegister andssr(masm, BaselineSecondScratchReg);
    205 #endif
    206  if (JitOptions.enableICFramePointers) {
    207    /* [SMDOC] Baseline IC Frame Pointers
    208     *
    209     *  In general, ICs don't have frame pointers until just before
    210     *  doing a VM call, at which point we retroactively create a stub
    211     *  frame. However, for the sake of external profilers, we
    212     *  optionally support full-IC frame pointers in baseline ICs, with
    213     *  the following approach:
    214     *    1. We push a frame pointer when we enter an IC.
    215     *    2. We pop the frame pointer when we return from an IC, or
    216     *       when we jump to the next IC.
    217     *    3. Entering a stub frame for a VM call already pushes a
    218     *       frame pointer, so we pop our existing frame pointer
    219     *       just before entering a stub frame and push it again
    220     *       just after leaving a stub frame.
    221     *  Some ops take advantage of the fact that the frame pointer is
    222     *  not updated until we enter a stub frame to read values from
    223     *  the caller's frame. To support this, we allocate a separate
    224     *  baselineFrame register when IC frame pointers are enabled.
    225     */
    226    PushICFrameRegs(masm);
    227    masm.moveStackPtrTo(FramePointer);
    228 
    229    MOZ_ASSERT(baselineFrameReg() != FramePointer);
    230    masm.loadPtr(Address(FramePointer, 0), baselineFrameReg());
    231  }
    232 
    233  // Count stub entries: We count entries rather than successes as it much
    234  // easier to ensure ICStubReg is valid at entry than at exit.
    235  Address enteredCount(ICStubReg, ICCacheIRStub::offsetOfEnteredCount());
    236  masm.add32(Imm32(1), enteredCount);
    237 
    238  perfSpewer_.startRecording();
    239 
    240  CacheIRReader reader(writer_);
    241  do {
    242    CacheOp op = reader.readOp();
    243    perfSpewer_.recordInstruction(masm, op);
    244    switch (op) {
    245 #define DEFINE_OP(op, ...)                 \
    246  case CacheOp::op:                        \
    247    if (!emit##op(reader)) return nullptr; \
    248    break;
    249      CACHE_IR_OPS(DEFINE_OP)
    250 #undef DEFINE_OP
    251 
    252      default:
    253        MOZ_CRASH("Invalid op");
    254    }
    255    allocator.nextOp();
    256  } while (reader.more());
    257 
    258  MOZ_ASSERT(!enteredStubFrame_);
    259  masm.assumeUnreachable("Should have returned from IC");
    260 
    261  // Done emitting the main IC code. Now emit the failure paths.
    262  perfSpewer_.recordOffset(masm, "FailurePath");
    263 
    264  for (size_t i = 0; i < failurePaths.length(); i++) {
    265    if (!emitFailurePath(i)) {
    266      return nullptr;
    267    }
    268    if (JitOptions.enableICFramePointers) {
    269      PopICFrameRegs(masm);
    270    }
    271    EmitStubGuardFailure(masm);
    272  }
    273 
    274  perfSpewer_.endRecording();
    275 
    276  Linker linker(masm);
    277  JitCode* newStubCode = linker.newCode(cx_, CodeKind::Baseline);
    278  if (!newStubCode) {
    279    cx_->recoverFromOutOfMemory();
    280    return nullptr;
    281  }
    282 
    283  newStubCode->setLocalTracingSlots(localTracingSlots_);
    284 
    285  return newStubCode;
    286 }
    287 
    288 bool BaselineCacheIRCompiler::emitGuardShape(ObjOperandId objId,
    289                                             uint32_t shapeOffset) {
    290  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    291  Register obj = allocator.useRegister(masm, objId);
    292  AutoScratchRegister scratch1(allocator, masm);
    293 
    294  bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
    295 
    296  Maybe<AutoScratchRegister> maybeScratch2;
    297  if (needSpectreMitigations) {
    298    maybeScratch2.emplace(allocator, masm);
    299  }
    300 
    301  FailurePath* failure;
    302  if (!addFailurePath(&failure)) {
    303    return false;
    304  }
    305 
    306  Address addr(stubAddress(shapeOffset));
    307  masm.loadPtr(addr, scratch1);
    308  if (needSpectreMitigations) {
    309    masm.branchTestObjShape(Assembler::NotEqual, obj, scratch1, *maybeScratch2,
    310                            obj, failure->label());
    311  } else {
    312    masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, obj,
    313                                                scratch1, failure->label());
    314  }
    315 
    316  return true;
    317 }
    318 
    319 bool BaselineCacheIRCompiler::emitGuardProto(ObjOperandId objId,
    320                                             uint32_t protoOffset) {
    321  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    322  Register obj = allocator.useRegister(masm, objId);
    323  AutoScratchRegister scratch(allocator, masm);
    324 
    325  FailurePath* failure;
    326  if (!addFailurePath(&failure)) {
    327    return false;
    328  }
    329 
    330  Address addr(stubAddress(protoOffset));
    331  masm.loadObjProto(obj, scratch);
    332  masm.branchPtr(Assembler::NotEqual, addr, scratch, failure->label());
    333  return true;
    334 }
    335 
    336 bool BaselineCacheIRCompiler::emitGuardCompartment(ObjOperandId objId,
    337                                                   uint32_t globalOffset,
    338                                                   uint32_t compartmentOffset) {
    339  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    340  Register obj = allocator.useRegister(masm, objId);
    341  AutoScratchRegister scratch(allocator, masm);
    342 
    343  FailurePath* failure;
    344  if (!addFailurePath(&failure)) {
    345    return false;
    346  }
    347 
    348  // Verify that the global wrapper is still valid, as
    349  // it is pre-requisite for doing the compartment check.
    350  Address globalWrapper(stubAddress(globalOffset));
    351  masm.loadPtr(globalWrapper, scratch);
    352  Address handlerAddr(scratch, ProxyObject::offsetOfHandler());
    353  masm.branchPtr(Assembler::Equal, handlerAddr,
    354                 ImmPtr(&DeadObjectProxy::singleton), failure->label());
    355 
    356  Address addr(stubAddress(compartmentOffset));
    357  masm.branchTestObjCompartment(Assembler::NotEqual, obj, addr, scratch,
    358                                failure->label());
    359  return true;
    360 }
    361 
    362 bool BaselineCacheIRCompiler::emitGuardAnyClass(ObjOperandId objId,
    363                                                uint32_t claspOffset) {
    364  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    365  Register obj = allocator.useRegister(masm, objId);
    366  AutoScratchRegister scratch(allocator, masm);
    367 
    368  FailurePath* failure;
    369  if (!addFailurePath(&failure)) {
    370    return false;
    371  }
    372 
    373  Address testAddr(stubAddress(claspOffset));
    374  if (objectGuardNeedsSpectreMitigations(objId)) {
    375    masm.branchTestObjClass(Assembler::NotEqual, obj, testAddr, scratch, obj,
    376                            failure->label());
    377  } else {
    378    masm.branchTestObjClassNoSpectreMitigations(
    379        Assembler::NotEqual, obj, testAddr, scratch, failure->label());
    380  }
    381 
    382  return true;
    383 }
    384 
    385 bool BaselineCacheIRCompiler::emitGuardHasProxyHandler(ObjOperandId objId,
    386                                                       uint32_t handlerOffset) {
    387  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    388  Register obj = allocator.useRegister(masm, objId);
    389  AutoScratchRegister scratch(allocator, masm);
    390 
    391  FailurePath* failure;
    392  if (!addFailurePath(&failure)) {
    393    return false;
    394  }
    395 
    396  Address testAddr(stubAddress(handlerOffset));
    397  masm.loadPtr(testAddr, scratch);
    398 
    399  Address handlerAddr(obj, ProxyObject::offsetOfHandler());
    400  masm.branchPtr(Assembler::NotEqual, handlerAddr, scratch, failure->label());
    401  return true;
    402 }
    403 
    404 bool BaselineCacheIRCompiler::emitGuardSpecificObject(ObjOperandId objId,
    405                                                      uint32_t expectedOffset) {
    406  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    407  Register obj = allocator.useRegister(masm, objId);
    408 
    409  FailurePath* failure;
    410  if (!addFailurePath(&failure)) {
    411    return false;
    412  }
    413 
    414  Address addr(stubAddress(expectedOffset));
    415  masm.branchPtr(Assembler::NotEqual, addr, obj, failure->label());
    416  return true;
    417 }
    418 
    419 bool BaselineCacheIRCompiler::emitGuardSpecificFunction(
    420    ObjOperandId objId, uint32_t expectedOffset, uint32_t nargsAndFlagsOffset) {
    421  return emitGuardSpecificObject(objId, expectedOffset);
    422 }
    423 
    424 bool BaselineCacheIRCompiler::emitGuardFunctionScript(
    425    ObjOperandId funId, uint32_t expectedOffset, uint32_t nargsAndFlagsOffset) {
    426  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    427 
    428  Register fun = allocator.useRegister(masm, funId);
    429  AutoScratchRegister scratch(allocator, masm);
    430 
    431  FailurePath* failure;
    432  if (!addFailurePath(&failure)) {
    433    return false;
    434  }
    435 
    436  Address addr(stubAddress(expectedOffset));
    437  masm.loadPrivate(Address(fun, JSFunction::offsetOfJitInfoOrScript()),
    438                   scratch);
    439  masm.branchPtr(Assembler::NotEqual, addr, scratch, failure->label());
    440  return true;
    441 }
    442 
    443 bool BaselineCacheIRCompiler::emitGuardSpecificAtom(StringOperandId strId,
    444                                                    uint32_t expectedOffset) {
    445  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    446  Register str = allocator.useRegister(masm, strId);
    447  AutoScratchRegister scratch(allocator, masm);
    448 
    449  FailurePath* failure;
    450  if (!addFailurePath(&failure)) {
    451    return false;
    452  }
    453 
    454  Address atomAddr(stubAddress(expectedOffset));
    455 
    456  Label done, notCachedAtom;
    457  masm.branchPtr(Assembler::Equal, atomAddr, str, &done);
    458 
    459  // The pointers are not equal, so if the input string is also an atom it
    460  // must be a different string.
    461  masm.branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
    462                    Imm32(JSString::ATOM_BIT), failure->label());
    463 
    464  masm.tryFastAtomize(str, scratch, scratch, &notCachedAtom);
    465  masm.branchPtr(Assembler::Equal, atomAddr, scratch, &done);
    466  masm.jump(failure->label());
    467  masm.bind(&notCachedAtom);
    468 
    469  // Check the length.
    470  masm.loadPtr(atomAddr, scratch);
    471  masm.loadStringLength(scratch, scratch);
    472  masm.branch32(Assembler::NotEqual, Address(str, JSString::offsetOfLength()),
    473                scratch, failure->label());
    474 
    475  // We have a non-atomized string with the same length. Call a helper
    476  // function to do the comparison.
    477  LiveRegisterSet volatileRegs = liveVolatileRegs();
    478  masm.PushRegsInMask(volatileRegs);
    479 
    480  using Fn = bool (*)(JSString* str1, JSString* str2);
    481  masm.setupUnalignedABICall(scratch);
    482  masm.loadPtr(atomAddr, scratch);
    483  masm.passABIArg(scratch);
    484  masm.passABIArg(str);
    485  masm.callWithABI<Fn, EqualStringsHelperPure>();
    486  masm.storeCallPointerResult(scratch);
    487 
    488  LiveRegisterSet ignore;
    489  ignore.add(scratch);
    490  masm.PopRegsInMaskIgnore(volatileRegs, ignore);
    491  masm.branchIfFalseBool(scratch, failure->label());
    492 
    493  masm.bind(&done);
    494  return true;
    495 }
    496 
    497 bool BaselineCacheIRCompiler::emitGuardSpecificSymbol(SymbolOperandId symId,
    498                                                      uint32_t expectedOffset) {
    499  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    500  Register sym = allocator.useRegister(masm, symId);
    501 
    502  FailurePath* failure;
    503  if (!addFailurePath(&failure)) {
    504    return false;
    505  }
    506 
    507  Address addr(stubAddress(expectedOffset));
    508  masm.branchPtr(Assembler::NotEqual, addr, sym, failure->label());
    509  return true;
    510 }
    511 
    512 bool BaselineCacheIRCompiler::emitGuardSpecificValue(ValOperandId valId,
    513                                                     uint32_t expectedOffset) {
    514  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    515  ValueOperand val = allocator.useValueRegister(masm, valId);
    516 
    517  FailurePath* failure;
    518  if (!addFailurePath(&failure)) {
    519    return false;
    520  }
    521 
    522  Address addr(stubAddress(expectedOffset));
    523  masm.branchTestValue(Assembler::NotEqual, addr, val, failure->label());
    524  return true;
    525 }
    526 
    527 bool BaselineCacheIRCompiler::emitLoadValueResult(uint32_t valOffset) {
    528  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    529  AutoOutputRegister output(*this);
    530  masm.loadValue(stubAddress(valOffset), output.valueReg());
    531  return true;
    532 }
    533 
    534 bool BaselineCacheIRCompiler::emitUncheckedLoadWeakValueResult(
    535    uint32_t valOffset) {
    536  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    537  AutoOutputRegister output(*this);
    538  masm.loadValue(stubAddress(valOffset), output.valueReg());
    539  return true;
    540 }
    541 
    542 bool BaselineCacheIRCompiler::emitUncheckedLoadWeakObjectResult(
    543    uint32_t objOffset) {
    544  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    545  AutoOutputRegister output(*this);
    546  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
    547 
    548  masm.loadPtr(stubAddress(objOffset), scratch);
    549  masm.tagValue(JSVAL_TYPE_OBJECT, scratch, output.valueReg());
    550  return true;
    551 }
    552 
    553 bool BaselineCacheIRCompiler::emitLoadFixedSlotResult(ObjOperandId objId,
    554                                                      uint32_t offsetOffset) {
    555  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    556  AutoOutputRegister output(*this);
    557  Register obj = allocator.useRegister(masm, objId);
    558  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
    559 
    560  masm.load32(stubAddress(offsetOffset), scratch);
    561  masm.loadValue(BaseIndex(obj, scratch, TimesOne), output.valueReg());
    562  return true;
    563 }
    564 
    565 bool BaselineCacheIRCompiler::emitLoadFixedSlotTypedResult(
    566    ObjOperandId objId, uint32_t offsetOffset, ValueType) {
    567  // The type is only used by Warp.
    568  return emitLoadFixedSlotResult(objId, offsetOffset);
    569 }
    570 
    571 bool BaselineCacheIRCompiler::emitLoadDynamicSlotResult(ObjOperandId objId,
    572                                                        uint32_t offsetOffset) {
    573  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    574  AutoOutputRegister output(*this);
    575  Register obj = allocator.useRegister(masm, objId);
    576  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
    577  AutoScratchRegister scratch2(allocator, masm);
    578 
    579  masm.load32(stubAddress(offsetOffset), scratch);
    580  masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2);
    581  masm.loadValue(BaseIndex(scratch2, scratch, TimesOne), output.valueReg());
    582  return true;
    583 }
    584 
    585 bool BaselineCacheIRCompiler::emitCallScriptedGetterShared(
    586    ValOperandId receiverId, ObjOperandId calleeId, bool sameRealm,
    587    uint32_t nargsAndFlagsOffset, Maybe<uint32_t> icScriptOffset) {
    588  ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
    589  Register callee = allocator.useRegister(masm, calleeId);
    590 
    591  AutoScratchRegister code(allocator, masm);
    592  AutoScratchRegister scratch(allocator, masm);
    593 
    594  bool isInlined = icScriptOffset.isSome();
    595 
    596  // First, retrieve raw jitcode for getter.
    597  if (isInlined) {
    598    masm.loadJitCodeRawNoIon(callee, code, scratch);
    599  } else {
    600    masm.loadJitCodeRaw(callee, code);
    601  }
    602 
    603  allocator.discardStack(masm);
    604 
    605  AutoStubFrame stubFrame(*this);
    606  stubFrame.enter(masm, scratch);
    607 
    608  if (!sameRealm) {
    609    masm.switchToObjectRealm(callee, scratch);
    610  }
    611 
    612  if (isInlined) {
    613    stubFrame.pushInlinedICScript(masm, stubAddress(*icScriptOffset));
    614  }
    615 
    616  Label noUnderflow, doneAlignment;
    617  masm.loadFunctionArgCount(callee, scratch);
    618  masm.branch32(Assembler::Equal, scratch, Imm32(0), &noUnderflow);
    619 
    620  masm.alignJitStackBasedOnNArgs(scratch, /*countIncludesThis*/ false);
    621 
    622  Label loop;
    623  masm.bind(&loop);
    624  masm.Push(UndefinedValue());
    625  masm.sub32(Imm32(1), scratch);
    626  masm.branch32(Assembler::Above, scratch, Imm32(0), &loop);
    627  masm.jump(&doneAlignment);
    628 
    629  // Align the stack such that the JitFrameLayout is aligned on
    630  // JitStackAlignment.
    631  masm.bind(&noUnderflow);
    632  masm.alignJitStackBasedOnNArgs(0, /*countIncludesThis = */ false);
    633  masm.bind(&doneAlignment);
    634 
    635  // Getter is called with 0 arguments, just |receiver| as thisv.
    636  // Note that we use Push, not push, so that callJit will align the stack
    637  // properly on ARM.
    638  masm.Push(receiver);
    639 
    640  masm.Push(callee);
    641  masm.Push(
    642      FrameDescriptor(FrameType::BaselineStub, /* argc = */ 0, isInlined));
    643 
    644  masm.callJit(code);
    645 
    646  stubFrame.leave(masm);
    647 
    648  if (!sameRealm) {
    649    masm.switchToBaselineFrameRealm(R1.scratchReg());
    650  }
    651 
    652  return true;
    653 }
    654 
    655 bool BaselineCacheIRCompiler::emitCallScriptedGetterResult(
    656    ValOperandId receiverId, ObjOperandId calleeId, bool sameRealm,
    657    uint32_t nargsAndFlagsOffset) {
    658  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    659  Maybe<uint32_t> icScriptOffset = mozilla::Nothing();
    660  return emitCallScriptedGetterShared(receiverId, calleeId, sameRealm,
    661                                      nargsAndFlagsOffset, icScriptOffset);
    662 }
    663 
    664 bool BaselineCacheIRCompiler::emitCallInlinedGetterResult(
    665    ValOperandId receiverId, ObjOperandId calleeId, uint32_t icScriptOffset,
    666    bool sameRealm, uint32_t nargsAndFlagsOffset) {
    667  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    668  return emitCallScriptedGetterShared(receiverId, calleeId, sameRealm,
    669                                      nargsAndFlagsOffset,
    670                                      mozilla::Some(icScriptOffset));
    671 }
    672 
    673 bool BaselineCacheIRCompiler::emitCallNativeGetterResult(
    674    ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
    675    uint32_t nargsAndFlagsOffset) {
    676  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    677 
    678  ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
    679  Address getterAddr(stubAddress(getterOffset));
    680 
    681  AutoScratchRegister scratch(allocator, masm);
    682 
    683  allocator.discardStack(masm);
    684 
    685  AutoStubFrame stubFrame(*this);
    686  stubFrame.enter(masm, scratch);
    687 
    688  // Load the callee in the scratch register.
    689  masm.loadPtr(getterAddr, scratch);
    690 
    691  masm.Push(receiver);
    692  masm.Push(scratch);
    693 
    694  using Fn =
    695      bool (*)(JSContext*, HandleFunction, HandleValue, MutableHandleValue);
    696  callVM<Fn, CallNativeGetter>(masm);
    697 
    698  stubFrame.leave(masm);
    699  return true;
    700 }
    701 
    702 bool BaselineCacheIRCompiler::emitCallDOMGetterResult(ObjOperandId objId,
    703                                                      uint32_t jitInfoOffset) {
    704  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    705 
    706  Register obj = allocator.useRegister(masm, objId);
    707  Address jitInfoAddr(stubAddress(jitInfoOffset));
    708 
    709  AutoScratchRegister scratch(allocator, masm);
    710 
    711  allocator.discardStack(masm);
    712 
    713  AutoStubFrame stubFrame(*this);
    714  stubFrame.enter(masm, scratch);
    715 
    716  // Load the JSJitInfo in the scratch register.
    717  masm.loadPtr(jitInfoAddr, scratch);
    718 
    719  masm.Push(obj);
    720  masm.Push(scratch);
    721 
    722  using Fn =
    723      bool (*)(JSContext*, const JSJitInfo*, HandleObject, MutableHandleValue);
    724  callVM<Fn, CallDOMGetter>(masm);
    725 
    726  stubFrame.leave(masm);
    727  return true;
    728 }
    729 
    730 bool BaselineCacheIRCompiler::emitProxyGetResult(ObjOperandId objId,
    731                                                 uint32_t idOffset) {
    732  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    733  Register obj = allocator.useRegister(masm, objId);
    734  Address idAddr(stubAddress(idOffset));
    735 
    736  AutoScratchRegister scratch(allocator, masm);
    737 
    738  allocator.discardStack(masm);
    739 
    740  AutoStubFrame stubFrame(*this);
    741  stubFrame.enter(masm, scratch);
    742 
    743  // Load the jsid in the scratch register.
    744  masm.loadPtr(idAddr, scratch);
    745 
    746  masm.Push(scratch);
    747  masm.Push(obj);
    748 
    749  using Fn = bool (*)(JSContext*, HandleObject, HandleId, MutableHandleValue);
    750  callVM<Fn, ProxyGetProperty>(masm);
    751 
    752  stubFrame.leave(masm);
    753  return true;
    754 }
    755 
    756 bool BaselineCacheIRCompiler::emitFrameIsConstructingResult() {
    757  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    758 
    759  AutoOutputRegister output(*this);
    760  Register outputScratch = output.valueReg().scratchReg();
    761 
    762  // Load the CalleeToken.
    763  Address tokenAddr(baselineFrameReg(), JitFrameLayout::offsetOfCalleeToken());
    764  masm.loadPtr(tokenAddr, outputScratch);
    765 
    766  // The low bit indicates whether this call is constructing, just clear the
    767  // other bits.
    768  static_assert(CalleeToken_Function == 0x0);
    769  static_assert(CalleeToken_FunctionConstructing == 0x1);
    770  masm.andPtr(Imm32(0x1), outputScratch);
    771 
    772  masm.tagValue(JSVAL_TYPE_BOOLEAN, outputScratch, output.valueReg());
    773  return true;
    774 }
    775 
    776 bool BaselineCacheIRCompiler::emitLoadConstantStringResult(uint32_t strOffset) {
    777  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    778  AutoOutputRegister output(*this);
    779  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
    780 
    781  masm.loadPtr(stubAddress(strOffset), scratch);
    782  masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
    783  return true;
    784 }
    785 
    786 bool BaselineCacheIRCompiler::emitCompareStringResult(JSOp op,
    787                                                      StringOperandId lhsId,
    788                                                      StringOperandId rhsId) {
    789  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    790  AutoOutputRegister output(*this);
    791 
    792  Register left = allocator.useRegister(masm, lhsId);
    793  Register right = allocator.useRegister(masm, rhsId);
    794 
    795  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
    796 
    797  allocator.discardStack(masm);
    798 
    799  Label slow, done;
    800  masm.compareStrings(op, left, right, scratch, &slow);
    801  masm.jump(&done);
    802  masm.bind(&slow);
    803  {
    804    AutoStubFrame stubFrame(*this);
    805    stubFrame.enter(masm, scratch);
    806 
    807    // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
    808    // - |left <= right| is implemented as |right >= left|.
    809    // - |left > right| is implemented as |right < left|.
    810    if (op == JSOp::Le || op == JSOp::Gt) {
    811      masm.Push(left);
    812      masm.Push(right);
    813    } else {
    814      masm.Push(right);
    815      masm.Push(left);
    816    }
    817 
    818    using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
    819    if (op == JSOp::Eq || op == JSOp::StrictEq) {
    820      callVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(masm);
    821    } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
    822      callVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(masm);
    823    } else if (op == JSOp::Lt || op == JSOp::Gt) {
    824      callVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(masm);
    825    } else {
    826      MOZ_ASSERT(op == JSOp::Le || op == JSOp::Ge);
    827      callVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(masm);
    828    }
    829 
    830    stubFrame.leave(masm);
    831    masm.storeCallPointerResult(scratch);
    832  }
    833  masm.bind(&done);
    834  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
    835  return true;
    836 }
    837 
    838 bool BaselineCacheIRCompiler::emitSameValueResult(ValOperandId lhsId,
    839                                                  ValOperandId rhsId) {
    840  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    841 
    842  AutoOutputRegister output(*this);
    843  AutoScratchRegister scratch(allocator, masm);
    844  ValueOperand lhs = allocator.useValueRegister(masm, lhsId);
    845 #ifdef JS_CODEGEN_X86
    846  // Use the output to avoid running out of registers.
    847  allocator.copyToScratchValueRegister(masm, rhsId, output.valueReg());
    848  ValueOperand rhs = output.valueReg();
    849 #else
    850  ValueOperand rhs = allocator.useValueRegister(masm, rhsId);
    851 #endif
    852 
    853  allocator.discardStack(masm);
    854 
    855  Label done;
    856  Label call;
    857 
    858  // Check to see if the values have identical bits.
    859  // This is correct for SameValue because SameValue(NaN,NaN) is true,
    860  // and SameValue(0,-0) is false.
    861  masm.branch64(Assembler::NotEqual, lhs.toRegister64(), rhs.toRegister64(),
    862                &call);
    863  masm.moveValue(BooleanValue(true), output.valueReg());
    864  masm.jump(&done);
    865 
    866  {
    867    masm.bind(&call);
    868 
    869    AutoStubFrame stubFrame(*this);
    870    stubFrame.enter(masm, scratch);
    871 
    872    masm.pushValue(lhs);
    873    masm.pushValue(rhs);
    874 
    875    using Fn = bool (*)(JSContext*, const Value&, const Value&, bool*);
    876    callVM<Fn, SameValue>(masm);
    877 
    878    stubFrame.leave(masm);
    879    masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, output.valueReg());
    880  }
    881 
    882  masm.bind(&done);
    883  return true;
    884 }
    885 
    886 bool BaselineCacheIRCompiler::emitStoreSlotShared(bool isFixed,
    887                                                  ObjOperandId objId,
    888                                                  uint32_t offsetOffset,
    889                                                  ValOperandId rhsId) {
    890  Register obj = allocator.useRegister(masm, objId);
    891  ValueOperand val = allocator.useValueRegister(masm, rhsId);
    892 
    893  AutoScratchRegister scratch1(allocator, masm);
    894  Maybe<AutoScratchRegister> scratch2;
    895  if (!isFixed) {
    896    scratch2.emplace(allocator, masm);
    897  }
    898 
    899  Address offsetAddr = stubAddress(offsetOffset);
    900  masm.load32(offsetAddr, scratch1);
    901 
    902  if (isFixed) {
    903    BaseIndex slot(obj, scratch1, TimesOne);
    904    EmitPreBarrier(masm, slot, MIRType::Value);
    905    masm.storeValue(val, slot);
    906  } else {
    907    masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2.ref());
    908    BaseIndex slot(scratch2.ref(), scratch1, TimesOne);
    909    EmitPreBarrier(masm, slot, MIRType::Value);
    910    masm.storeValue(val, slot);
    911  }
    912 
    913  emitPostBarrierSlot(obj, val, scratch1);
    914  return true;
    915 }
    916 
    917 bool BaselineCacheIRCompiler::emitStoreFixedSlot(ObjOperandId objId,
    918                                                 uint32_t offsetOffset,
    919                                                 ValOperandId rhsId) {
    920  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    921  return emitStoreSlotShared(true, objId, offsetOffset, rhsId);
    922 }
    923 
    924 bool BaselineCacheIRCompiler::emitStoreDynamicSlot(ObjOperandId objId,
    925                                                   uint32_t offsetOffset,
    926                                                   ValOperandId rhsId) {
    927  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
    928  return emitStoreSlotShared(false, objId, offsetOffset, rhsId);
    929 }
    930 
    931 bool BaselineCacheIRCompiler::emitAddAndStoreSlotShared(
    932    CacheOp op, ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
    933    uint32_t newShapeOffset, Maybe<uint32_t> numNewSlotsOffset,
    934    bool preserveWrapper) {
    935  Register obj = allocator.useRegister(masm, objId);
    936  ValueOperand val = allocator.useValueRegister(masm, rhsId);
    937 
    938  AutoScratchRegister scratch1(allocator, masm);
    939  AutoScratchRegister scratch2(allocator, masm);
    940 
    941  Address newShapeAddr = stubAddress(newShapeOffset);
    942  Address offsetAddr = stubAddress(offsetOffset);
    943 
    944  FailurePath* failure = nullptr;
    945  if (preserveWrapper) {
    946    if (!addFailurePath(&failure)) {
    947      return false;
    948    }
    949    LiveRegisterSet save = liveVolatileRegs();
    950    save.takeUnchecked(scratch1);
    951    save.takeUnchecked(scratch2);
    952    masm.preserveWrapper(obj, scratch1, scratch2, save);
    953    masm.branchIfFalseBool(scratch1, failure->label());
    954  }
    955 
    956  if (op == CacheOp::AllocateAndStoreDynamicSlot) {
    957    // We have to (re)allocate dynamic slots. Do this first, as it's the
    958    // only fallible operation here. Note that growSlotsPure is fallible but
    959    // does not GC.
    960    Address numNewSlotsAddr = stubAddress(*numNewSlotsOffset);
    961 
    962    if (!failure && !addFailurePath(&failure)) {
    963      return false;
    964    }
    965 
    966    LiveRegisterSet save = liveVolatileRegs();
    967    masm.PushRegsInMask(save);
    968 
    969    using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
    970    masm.setupUnalignedABICall(scratch1);
    971    masm.loadJSContext(scratch1);
    972    masm.passABIArg(scratch1);
    973    masm.passABIArg(obj);
    974    masm.load32(numNewSlotsAddr, scratch2);
    975    masm.passABIArg(scratch2);
    976    masm.callWithABI<Fn, NativeObject::growSlotsPure>();
    977    masm.storeCallPointerResult(scratch1);
    978 
    979    LiveRegisterSet ignore;
    980    ignore.add(scratch1);
    981    masm.PopRegsInMaskIgnore(save, ignore);
    982 
    983    masm.branchIfFalseBool(scratch1, failure->label());
    984  }
    985 
    986  // Update the object's shape.
    987  masm.loadPtr(newShapeAddr, scratch1);
    988  masm.storeObjShape(scratch1, obj,
    989                     [](MacroAssembler& masm, const Address& addr) {
    990                       EmitPreBarrier(masm, addr, MIRType::Shape);
    991                     });
    992 
    993  // Perform the store. No pre-barrier required since this is a new
    994  // initialization.
    995  masm.load32(offsetAddr, scratch1);
    996  if (op == CacheOp::AddAndStoreFixedSlot) {
    997    BaseIndex slot(obj, scratch1, TimesOne);
    998    masm.storeValue(val, slot);
    999  } else {
   1000    MOZ_ASSERT(op == CacheOp::AddAndStoreDynamicSlot ||
   1001               op == CacheOp::AllocateAndStoreDynamicSlot);
   1002    masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2);
   1003    BaseIndex slot(scratch2, scratch1, TimesOne);
   1004    masm.storeValue(val, slot);
   1005  }
   1006 
   1007  emitPostBarrierSlot(obj, val, scratch1);
   1008  return true;
   1009 }
   1010 
   1011 bool BaselineCacheIRCompiler::emitAddAndStoreFixedSlot(ObjOperandId objId,
   1012                                                       uint32_t offsetOffset,
   1013                                                       ValOperandId rhsId,
   1014                                                       uint32_t newShapeOffset,
   1015                                                       bool preserveWrapper) {
   1016  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1017  Maybe<uint32_t> numNewSlotsOffset = mozilla::Nothing();
   1018  return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot, objId,
   1019                                   offsetOffset, rhsId, newShapeOffset,
   1020                                   numNewSlotsOffset, preserveWrapper);
   1021 }
   1022 
   1023 bool BaselineCacheIRCompiler::emitAddAndStoreDynamicSlot(
   1024    ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
   1025    uint32_t newShapeOffset, bool preserveWrapper) {
   1026  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1027  Maybe<uint32_t> numNewSlotsOffset = mozilla::Nothing();
   1028  return emitAddAndStoreSlotShared(CacheOp::AddAndStoreDynamicSlot, objId,
   1029                                   offsetOffset, rhsId, newShapeOffset,
   1030                                   numNewSlotsOffset, preserveWrapper);
   1031 }
   1032 
   1033 bool BaselineCacheIRCompiler::emitAllocateAndStoreDynamicSlot(
   1034    ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
   1035    uint32_t newShapeOffset, uint32_t numNewSlotsOffset, bool preserveWrapper) {
   1036  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1037  return emitAddAndStoreSlotShared(
   1038      CacheOp::AllocateAndStoreDynamicSlot, objId, offsetOffset, rhsId,
   1039      newShapeOffset, mozilla::Some(numNewSlotsOffset), preserveWrapper);
   1040 }
   1041 
   1042 bool BaselineCacheIRCompiler::emitIsArrayResult(ValOperandId inputId) {
   1043  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1044 
   1045  AutoOutputRegister output(*this);
   1046  AutoScratchRegister scratch1(allocator, masm);
   1047  AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
   1048 
   1049  ValueOperand val = allocator.useValueRegister(masm, inputId);
   1050 
   1051  allocator.discardStack(masm);
   1052 
   1053  Label isNotArray;
   1054  // Primitives are never Arrays.
   1055  masm.fallibleUnboxObject(val, scratch1, &isNotArray);
   1056 
   1057  Label isArray;
   1058  masm.branchTestObjClass(Assembler::Equal, scratch1, &ArrayObject::class_,
   1059                          scratch2, scratch1, &isArray);
   1060 
   1061  // isArray can also return true for Proxy wrapped Arrays.
   1062  masm.branchTestObjectIsProxy(false, scratch1, scratch2, &isNotArray);
   1063  Label done;
   1064  {
   1065    AutoStubFrame stubFrame(*this);
   1066    stubFrame.enter(masm, scratch2);
   1067 
   1068    masm.Push(scratch1);
   1069 
   1070    using Fn = bool (*)(JSContext*, HandleObject, bool*);
   1071    callVM<Fn, js::IsArrayFromJit>(masm);
   1072 
   1073    stubFrame.leave(masm);
   1074 
   1075    masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, output.valueReg());
   1076    masm.jump(&done);
   1077  }
   1078 
   1079  masm.bind(&isNotArray);
   1080  masm.moveValue(BooleanValue(false), output.valueReg());
   1081  masm.jump(&done);
   1082 
   1083  masm.bind(&isArray);
   1084  masm.moveValue(BooleanValue(true), output.valueReg());
   1085 
   1086  masm.bind(&done);
   1087  return true;
   1088 }
   1089 
   1090 bool BaselineCacheIRCompiler::emitIsTypedArrayResult(ObjOperandId objId,
   1091                                                     bool isPossiblyWrapped) {
   1092  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1093 
   1094  AutoOutputRegister output(*this);
   1095  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   1096  Register obj = allocator.useRegister(masm, objId);
   1097 
   1098  allocator.discardStack(masm);
   1099 
   1100  Label notTypedArray, isWrapper, done;
   1101  masm.loadObjClassUnsafe(obj, scratch);
   1102  masm.branchIfClassIsNotTypedArray(scratch, &notTypedArray);
   1103  masm.moveValue(BooleanValue(true), output.valueReg());
   1104  masm.jump(&done);
   1105 
   1106  masm.bind(&notTypedArray);
   1107  if (isPossiblyWrapped) {
   1108    Label notProxy;
   1109    masm.branchTestClassIsProxy(false, scratch, &notProxy);
   1110    masm.branchTestProxyHandlerFamily(Assembler::Equal, obj, scratch,
   1111                                      &Wrapper::family, &isWrapper);
   1112    masm.bind(&notProxy);
   1113  }
   1114  masm.moveValue(BooleanValue(false), output.valueReg());
   1115 
   1116  if (isPossiblyWrapped) {
   1117    masm.jump(&done);
   1118 
   1119    masm.bind(&isWrapper);
   1120 
   1121    AutoStubFrame stubFrame(*this);
   1122    stubFrame.enter(masm, scratch);
   1123 
   1124    masm.Push(obj);
   1125 
   1126    using Fn = bool (*)(JSContext*, JSObject*, bool*);
   1127    callVM<Fn, jit::IsPossiblyWrappedTypedArray>(masm);
   1128 
   1129    stubFrame.leave(masm);
   1130 
   1131    masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, output.valueReg());
   1132  }
   1133 
   1134  masm.bind(&done);
   1135  return true;
   1136 }
   1137 
   1138 bool BaselineCacheIRCompiler::emitLoadStringCharResult(
   1139    StringOperandId strId, Int32OperandId indexId,
   1140    StringCharOutOfBounds outOfBounds) {
   1141  AutoOutputRegister output(*this);
   1142  Register str = allocator.useRegister(masm, strId);
   1143  Register index = allocator.useRegister(masm, indexId);
   1144  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   1145  AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
   1146  AutoScratchRegister scratch3(allocator, masm);
   1147 
   1148  // Bounds check, load string char.
   1149  Label done;
   1150  Label tagResult;
   1151  Label loadFailed;
   1152  if (outOfBounds == StringCharOutOfBounds::Failure) {
   1153    FailurePath* failure;
   1154    if (!addFailurePath(&failure)) {
   1155      return false;
   1156    }
   1157 
   1158    masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
   1159                              scratch3, failure->label());
   1160    masm.loadStringChar(str, index, scratch2, scratch1, scratch3,
   1161                        failure->label());
   1162 
   1163    allocator.discardStack(masm);
   1164  } else {
   1165    // Discard the stack before jumping to |done|.
   1166    allocator.discardStack(masm);
   1167 
   1168    if (outOfBounds == StringCharOutOfBounds::EmptyString) {
   1169      // Return the empty string for out-of-bounds access.
   1170      masm.movePtr(ImmGCPtr(cx_->names().empty_), scratch1);
   1171    } else {
   1172      // Return |undefined| for out-of-bounds access.
   1173      masm.moveValue(UndefinedValue(), output.valueReg());
   1174    }
   1175 
   1176    // This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
   1177    // guaranteed to see no nested ropes.
   1178    masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
   1179                              scratch3, &done);
   1180    masm.loadStringChar(str, index, scratch2, scratch1, scratch3, &loadFailed);
   1181  }
   1182 
   1183  // Load StaticString for this char. For larger code units perform a VM call.
   1184  Label vmCall;
   1185  masm.lookupStaticString(scratch2, scratch1, cx_->staticStrings(), &vmCall);
   1186  masm.jump(&tagResult);
   1187 
   1188  if (outOfBounds != StringCharOutOfBounds::Failure) {
   1189    masm.bind(&loadFailed);
   1190    masm.assumeUnreachable("loadStringChar can't fail for linear strings");
   1191  }
   1192 
   1193  {
   1194    masm.bind(&vmCall);
   1195 
   1196    AutoStubFrame stubFrame(*this);
   1197    stubFrame.enter(masm, scratch3);
   1198 
   1199    masm.Push(scratch2);
   1200 
   1201    using Fn = JSLinearString* (*)(JSContext*, int32_t);
   1202    callVM<Fn, js::StringFromCharCode>(masm);
   1203 
   1204    stubFrame.leave(masm);
   1205 
   1206    masm.storeCallPointerResult(scratch1);
   1207  }
   1208 
   1209  if (outOfBounds != StringCharOutOfBounds::UndefinedValue) {
   1210    masm.bind(&tagResult);
   1211    masm.bind(&done);
   1212    masm.tagValue(JSVAL_TYPE_STRING, scratch1, output.valueReg());
   1213  } else {
   1214    masm.bind(&tagResult);
   1215    masm.tagValue(JSVAL_TYPE_STRING, scratch1, output.valueReg());
   1216    masm.bind(&done);
   1217  }
   1218  return true;
   1219 }
   1220 
   1221 bool BaselineCacheIRCompiler::emitLoadStringCharResult(StringOperandId strId,
   1222                                                       Int32OperandId indexId,
   1223                                                       bool handleOOB) {
   1224  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1225  auto outOfBounds = handleOOB ? StringCharOutOfBounds::EmptyString
   1226                               : StringCharOutOfBounds::Failure;
   1227  return emitLoadStringCharResult(strId, indexId, outOfBounds);
   1228 }
   1229 
   1230 bool BaselineCacheIRCompiler::emitLoadStringAtResult(StringOperandId strId,
   1231                                                     Int32OperandId indexId,
   1232                                                     bool handleOOB) {
   1233  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1234  auto outOfBounds = handleOOB ? StringCharOutOfBounds::UndefinedValue
   1235                               : StringCharOutOfBounds::Failure;
   1236  return emitLoadStringCharResult(strId, indexId, outOfBounds);
   1237 }
   1238 
   1239 bool BaselineCacheIRCompiler::emitStringFromCodeResult(Int32OperandId codeId,
   1240                                                       StringCode stringCode) {
   1241  AutoOutputRegister output(*this);
   1242  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   1243 
   1244  Register code = allocator.useRegister(masm, codeId);
   1245 
   1246  FailurePath* failure = nullptr;
   1247  if (stringCode == StringCode::CodePoint) {
   1248    if (!addFailurePath(&failure)) {
   1249      return false;
   1250    }
   1251  }
   1252 
   1253  if (stringCode == StringCode::CodePoint) {
   1254    // Note: This condition must match tryAttachStringFromCodePoint to prevent
   1255    // failure loops.
   1256    masm.branch32(Assembler::Above, code, Imm32(unicode::NonBMPMax),
   1257                  failure->label());
   1258  }
   1259 
   1260  allocator.discardStack(masm);
   1261 
   1262  // We pre-allocate atoms for the first UNIT_STATIC_LIMIT characters.
   1263  // For code units larger than that, we must do a VM call.
   1264  Label vmCall;
   1265  masm.lookupStaticString(code, scratch, cx_->staticStrings(), &vmCall);
   1266 
   1267  Label done;
   1268  masm.jump(&done);
   1269 
   1270  {
   1271    masm.bind(&vmCall);
   1272 
   1273    AutoStubFrame stubFrame(*this);
   1274    stubFrame.enter(masm, scratch);
   1275 
   1276    masm.Push(code);
   1277 
   1278    if (stringCode == StringCode::CodeUnit) {
   1279      using Fn = JSLinearString* (*)(JSContext*, int32_t);
   1280      callVM<Fn, js::StringFromCharCode>(masm);
   1281    } else {
   1282      using Fn = JSLinearString* (*)(JSContext*, char32_t);
   1283      callVM<Fn, js::StringFromCodePoint>(masm);
   1284    }
   1285 
   1286    stubFrame.leave(masm);
   1287    masm.storeCallPointerResult(scratch);
   1288  }
   1289 
   1290  masm.bind(&done);
   1291  masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
   1292  return true;
   1293 }
   1294 
   1295 bool BaselineCacheIRCompiler::emitStringFromCharCodeResult(
   1296    Int32OperandId codeId) {
   1297  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1298 
   1299  return emitStringFromCodeResult(codeId, StringCode::CodeUnit);
   1300 }
   1301 
   1302 bool BaselineCacheIRCompiler::emitStringFromCodePointResult(
   1303    Int32OperandId codeId) {
   1304  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1305 
   1306  return emitStringFromCodeResult(codeId, StringCode::CodePoint);
   1307 }
   1308 
   1309 bool BaselineCacheIRCompiler::emitReflectGetPrototypeOfResult(
   1310    ObjOperandId objId) {
   1311  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1312 
   1313  AutoOutputRegister output(*this);
   1314  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   1315 
   1316  Register obj = allocator.useRegister(masm, objId);
   1317 
   1318  allocator.discardStack(masm);
   1319 
   1320  MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
   1321 
   1322  masm.loadObjProto(obj, scratch);
   1323 
   1324  Label hasProto;
   1325  masm.branchPtr(Assembler::Above, scratch, ImmWord(1), &hasProto);
   1326 
   1327  // Call into the VM for lazy prototypes.
   1328  Label slow, done;
   1329  masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), &slow);
   1330 
   1331  masm.moveValue(NullValue(), output.valueReg());
   1332  masm.jump(&done);
   1333 
   1334  masm.bind(&hasProto);
   1335  masm.tagValue(JSVAL_TYPE_OBJECT, scratch, output.valueReg());
   1336  masm.jump(&done);
   1337 
   1338  {
   1339    masm.bind(&slow);
   1340 
   1341    AutoStubFrame stubFrame(*this);
   1342    stubFrame.enter(masm, scratch);
   1343 
   1344    masm.Push(obj);
   1345 
   1346    using Fn = bool (*)(JSContext*, HandleObject, MutableHandleValue);
   1347    callVM<Fn, jit::GetPrototypeOf>(masm);
   1348 
   1349    stubFrame.leave(masm);
   1350  }
   1351 
   1352  masm.bind(&done);
   1353  return true;
   1354 }
   1355 
   1356 bool BaselineCacheIRCompiler::emitHasClassResult(ObjOperandId objId,
   1357                                                 uint32_t claspOffset) {
   1358  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1359 
   1360  AutoOutputRegister output(*this);
   1361  Register obj = allocator.useRegister(masm, objId);
   1362  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   1363 
   1364  Address claspAddr(stubAddress(claspOffset));
   1365  masm.loadObjClassUnsafe(obj, scratch);
   1366  masm.cmpPtrSet(Assembler::Equal, claspAddr, scratch.get(), scratch);
   1367  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
   1368  return true;
   1369 }
   1370 
   1371 bool BaselineCacheIRCompiler::emitHasShapeResult(ObjOperandId objId,
   1372                                                 uint32_t shapeOffset) {
   1373  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1374 
   1375  AutoOutputRegister output(*this);
   1376  Register obj = allocator.useRegister(masm, objId);
   1377  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   1378 
   1379  // Note: no Spectre mitigations are needed here because this shape check only
   1380  // affects correctness.
   1381  Address shapeAddr(stubAddress(shapeOffset));
   1382  masm.loadObjShapeUnsafe(obj, scratch);
   1383  masm.cmpPtrSet(Assembler::Equal, shapeAddr, scratch.get(), scratch);
   1384  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
   1385  return true;
   1386 }
   1387 
   1388 void BaselineCacheIRCompiler::emitAtomizeString(Register str, Register temp,
   1389                                                Label* failure) {
   1390  Label isAtom, notCachedAtom;
   1391  masm.branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
   1392                    Imm32(JSString::ATOM_BIT), &isAtom);
   1393  masm.tryFastAtomize(str, temp, str, &notCachedAtom);
   1394  masm.jump(&isAtom);
   1395  masm.bind(&notCachedAtom);
   1396 
   1397  {
   1398    LiveRegisterSet save = liveVolatileRegs();
   1399    masm.PushRegsInMask(save);
   1400 
   1401    using Fn = JSAtom* (*)(JSContext * cx, JSString * str);
   1402    masm.setupUnalignedABICall(temp);
   1403    masm.loadJSContext(temp);
   1404    masm.passABIArg(temp);
   1405    masm.passABIArg(str);
   1406    masm.callWithABI<Fn, jit::AtomizeStringNoGC>();
   1407    masm.storeCallPointerResult(temp);
   1408 
   1409    LiveRegisterSet ignore;
   1410    ignore.add(temp);
   1411    masm.PopRegsInMaskIgnore(save, ignore);
   1412 
   1413    masm.branchPtr(Assembler::Equal, temp, ImmWord(0), failure);
   1414    masm.mov(temp, str);
   1415  }
   1416  masm.bind(&isAtom);
   1417 }
   1418 
   1419 bool BaselineCacheIRCompiler::emitSetHasStringResult(ObjOperandId setId,
   1420                                                     StringOperandId strId) {
   1421  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1422 
   1423  AutoOutputRegister output(*this);
   1424  Register set = allocator.useRegister(masm, setId);
   1425  Register str = allocator.useRegister(masm, strId);
   1426 
   1427  AutoScratchRegister scratch1(allocator, masm);
   1428  AutoScratchRegister scratch2(allocator, masm);
   1429  AutoScratchRegister scratch3(allocator, masm);
   1430  AutoScratchRegister scratch4(allocator, masm);
   1431 
   1432  FailurePath* failure;
   1433  if (!addFailurePath(&failure)) {
   1434    return false;
   1435  }
   1436 
   1437  emitAtomizeString(str, scratch1, failure->label());
   1438  masm.prepareHashString(str, scratch1, scratch2);
   1439 
   1440  masm.tagValue(JSVAL_TYPE_STRING, str, output.valueReg());
   1441  masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
   1442                             scratch3, scratch4);
   1443  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
   1444  return true;
   1445 }
   1446 
   1447 bool BaselineCacheIRCompiler::emitMapHasStringResult(ObjOperandId mapId,
   1448                                                     StringOperandId strId) {
   1449  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1450 
   1451  AutoOutputRegister output(*this);
   1452  Register map = allocator.useRegister(masm, mapId);
   1453  Register str = allocator.useRegister(masm, strId);
   1454 
   1455  AutoScratchRegister scratch1(allocator, masm);
   1456  AutoScratchRegister scratch2(allocator, masm);
   1457  AutoScratchRegister scratch3(allocator, masm);
   1458  AutoScratchRegister scratch4(allocator, masm);
   1459 
   1460  FailurePath* failure;
   1461  if (!addFailurePath(&failure)) {
   1462    return false;
   1463  }
   1464 
   1465  emitAtomizeString(str, scratch1, failure->label());
   1466  masm.prepareHashString(str, scratch1, scratch2);
   1467 
   1468  masm.tagValue(JSVAL_TYPE_STRING, str, output.valueReg());
   1469  masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
   1470                             scratch3, scratch4);
   1471  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
   1472  return true;
   1473 }
   1474 
   1475 bool BaselineCacheIRCompiler::emitMapGetStringResult(ObjOperandId mapId,
   1476                                                     StringOperandId strId) {
   1477  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1478 
   1479  AutoOutputRegister output(*this);
   1480  Register map = allocator.useRegister(masm, mapId);
   1481  Register str = allocator.useRegister(masm, strId);
   1482 
   1483  AutoScratchRegister scratch1(allocator, masm);
   1484  AutoScratchRegister scratch2(allocator, masm);
   1485  AutoScratchRegister scratch3(allocator, masm);
   1486  AutoScratchRegister scratch4(allocator, masm);
   1487 
   1488  FailurePath* failure;
   1489  if (!addFailurePath(&failure)) {
   1490    return false;
   1491  }
   1492 
   1493  emitAtomizeString(str, scratch1, failure->label());
   1494  masm.prepareHashString(str, scratch1, scratch2);
   1495 
   1496  masm.tagValue(JSVAL_TYPE_STRING, str, output.valueReg());
   1497  masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
   1498                             output.valueReg(), scratch2, scratch3, scratch4);
   1499  return true;
   1500 }
   1501 
   1502 bool BaselineCacheIRCompiler::emitCallNativeSetter(
   1503    ObjOperandId receiverId, uint32_t setterOffset, ValOperandId rhsId,
   1504    bool sameRealm, uint32_t nargsAndFlagsOffset) {
   1505  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1506  Register receiver = allocator.useRegister(masm, receiverId);
   1507  Address setterAddr(stubAddress(setterOffset));
   1508  ValueOperand val = allocator.useValueRegister(masm, rhsId);
   1509 
   1510  AutoScratchRegister scratch(allocator, masm);
   1511 
   1512  allocator.discardStack(masm);
   1513 
   1514  AutoStubFrame stubFrame(*this);
   1515  stubFrame.enter(masm, scratch);
   1516 
   1517  // Load the callee in the scratch register.
   1518  masm.loadPtr(setterAddr, scratch);
   1519 
   1520  masm.Push(val);
   1521  masm.Push(receiver);
   1522  masm.Push(scratch);
   1523 
   1524  using Fn = bool (*)(JSContext*, HandleFunction, HandleObject, HandleValue);
   1525  callVM<Fn, CallNativeSetter>(masm);
   1526 
   1527  stubFrame.leave(masm);
   1528  return true;
   1529 }
   1530 
   1531 bool BaselineCacheIRCompiler::emitCallScriptedSetterShared(
   1532    ObjOperandId receiverId, ObjOperandId calleeId, ValOperandId rhsId,
   1533    bool sameRealm, uint32_t nargsAndFlagsOffset,
   1534    Maybe<uint32_t> icScriptOffset) {
   1535  AutoScratchRegister scratch(allocator, masm);
   1536 #if defined(JS_CODEGEN_X86)
   1537  Register code = scratch;
   1538 #else
   1539  AutoScratchRegister code(allocator, masm);
   1540 #endif
   1541 
   1542  Register receiver = allocator.useRegister(masm, receiverId);
   1543  Register callee = allocator.useRegister(masm, calleeId);
   1544  ValueOperand val = allocator.useValueRegister(masm, rhsId);
   1545 
   1546  bool isInlined = icScriptOffset.isSome();
   1547 
   1548  allocator.discardStack(masm);
   1549 
   1550  AutoStubFrame stubFrame(*this);
   1551  stubFrame.enter(masm, scratch);
   1552 
   1553  if (!sameRealm) {
   1554    masm.switchToObjectRealm(callee, scratch);
   1555  }
   1556 
   1557  if (isInlined) {
   1558    stubFrame.pushInlinedICScript(masm, stubAddress(*icScriptOffset));
   1559  }
   1560 
   1561  Label noUnderflow, doneAlignment;
   1562  masm.loadFunctionArgCount(callee, scratch);
   1563  masm.branch32(Assembler::BelowOrEqual, scratch, Imm32(1), &noUnderflow);
   1564 
   1565  masm.alignJitStackBasedOnNArgs(scratch, /*countIncludesThis*/ false);
   1566 
   1567  Label loop;
   1568  masm.bind(&loop);
   1569  masm.Push(UndefinedValue());
   1570  masm.sub32(Imm32(1), scratch);
   1571  masm.branch32(Assembler::Above, scratch, Imm32(1), &loop);
   1572  masm.jump(&doneAlignment);
   1573 
   1574  // Align the stack such that the JitFrameLayout is aligned on
   1575  // JitStackAlignment.
   1576  masm.bind(&noUnderflow);
   1577  masm.alignJitStackBasedOnNArgs(1, /*countIncludesThis = */ false);
   1578  masm.bind(&doneAlignment);
   1579 
   1580  // Setter is called with 1 argument, and |receiver| as thisv. Note that we use
   1581  // Push, not push, so that callJit will align the stack properly on ARM.
   1582  masm.Push(val);
   1583  masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(receiver)));
   1584 
   1585  // Push callee.
   1586  masm.Push(callee);
   1587 
   1588  // Push frame descriptor.
   1589  masm.Push(
   1590      FrameDescriptor(FrameType::BaselineStub, /* argc = */ 1, isInlined));
   1591 
   1592  // Load the jitcode pointer.
   1593  Register scratch2 = val.scratchReg();
   1594  if (isInlined) {
   1595    masm.loadJitCodeRawNoIon(callee, code, scratch2);
   1596  } else {
   1597    masm.loadJitCodeRaw(callee, code);
   1598  }
   1599 
   1600  masm.callJit(code);
   1601 
   1602  stubFrame.leave(masm);
   1603 
   1604  if (!sameRealm) {
   1605    masm.switchToBaselineFrameRealm(R1.scratchReg());
   1606  }
   1607 
   1608  return true;
   1609 }
   1610 
   1611 bool BaselineCacheIRCompiler::emitCallScriptedSetter(
   1612    ObjOperandId receiverId, ObjOperandId calleeId, ValOperandId rhsId,
   1613    bool sameRealm, uint32_t nargsAndFlagsOffset) {
   1614  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1615  Maybe<uint32_t> icScriptOffset = mozilla::Nothing();
   1616  return emitCallScriptedSetterShared(receiverId, calleeId, rhsId, sameRealm,
   1617                                      nargsAndFlagsOffset, icScriptOffset);
   1618 }
   1619 
   1620 bool BaselineCacheIRCompiler::emitCallInlinedSetter(
   1621    ObjOperandId receiverId, ObjOperandId calleeId, ValOperandId rhsId,
   1622    uint32_t icScriptOffset, bool sameRealm, uint32_t nargsAndFlagsOffset) {
   1623  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1624  return emitCallScriptedSetterShared(receiverId, calleeId, rhsId, sameRealm,
   1625                                      nargsAndFlagsOffset,
   1626                                      mozilla::Some(icScriptOffset));
   1627 }
   1628 
   1629 bool BaselineCacheIRCompiler::emitCallDOMSetter(ObjOperandId objId,
   1630                                                uint32_t jitInfoOffset,
   1631                                                ValOperandId rhsId) {
   1632  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1633  Register obj = allocator.useRegister(masm, objId);
   1634  ValueOperand val = allocator.useValueRegister(masm, rhsId);
   1635  Address jitInfoAddr(stubAddress(jitInfoOffset));
   1636 
   1637  AutoScratchRegister scratch(allocator, masm);
   1638 
   1639  allocator.discardStack(masm);
   1640 
   1641  AutoStubFrame stubFrame(*this);
   1642  stubFrame.enter(masm, scratch);
   1643 
   1644  // Load the JSJitInfo in the scratch register.
   1645  masm.loadPtr(jitInfoAddr, scratch);
   1646 
   1647  masm.Push(val);
   1648  masm.Push(obj);
   1649  masm.Push(scratch);
   1650 
   1651  using Fn = bool (*)(JSContext*, const JSJitInfo*, HandleObject, HandleValue);
   1652  callVM<Fn, CallDOMSetter>(masm);
   1653 
   1654  stubFrame.leave(masm);
   1655  return true;
   1656 }
   1657 
   1658 bool BaselineCacheIRCompiler::emitCallSetArrayLength(ObjOperandId objId,
   1659                                                     bool strict,
   1660                                                     ValOperandId rhsId) {
   1661  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1662  Register obj = allocator.useRegister(masm, objId);
   1663  ValueOperand val = allocator.useValueRegister(masm, rhsId);
   1664 
   1665  AutoScratchRegister scratch(allocator, masm);
   1666 
   1667  allocator.discardStack(masm);
   1668 
   1669  AutoStubFrame stubFrame(*this);
   1670  stubFrame.enter(masm, scratch);
   1671 
   1672  masm.Push(Imm32(strict));
   1673  masm.Push(val);
   1674  masm.Push(obj);
   1675 
   1676  using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
   1677  callVM<Fn, jit::SetArrayLength>(masm);
   1678 
   1679  stubFrame.leave(masm);
   1680  return true;
   1681 }
   1682 
   1683 bool BaselineCacheIRCompiler::emitProxySet(ObjOperandId objId,
   1684                                           uint32_t idOffset,
   1685                                           ValOperandId rhsId, bool strict) {
   1686  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1687  Register obj = allocator.useRegister(masm, objId);
   1688  ValueOperand val = allocator.useValueRegister(masm, rhsId);
   1689  Address idAddr(stubAddress(idOffset));
   1690 
   1691  AutoScratchRegister scratch(allocator, masm);
   1692 
   1693  allocator.discardStack(masm);
   1694 
   1695  AutoStubFrame stubFrame(*this);
   1696  stubFrame.enter(masm, scratch);
   1697 
   1698  // Load the jsid in the scratch register.
   1699  masm.loadPtr(idAddr, scratch);
   1700 
   1701  masm.Push(Imm32(strict));
   1702  masm.Push(val);
   1703  masm.Push(scratch);
   1704  masm.Push(obj);
   1705 
   1706  using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
   1707  callVM<Fn, ProxySetProperty>(masm);
   1708 
   1709  stubFrame.leave(masm);
   1710  return true;
   1711 }
   1712 
   1713 bool BaselineCacheIRCompiler::emitProxySetByValue(ObjOperandId objId,
   1714                                                  ValOperandId idId,
   1715                                                  ValOperandId rhsId,
   1716                                                  bool strict) {
   1717  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1718  Register obj = allocator.useRegister(masm, objId);
   1719  ValueOperand idVal = allocator.useValueRegister(masm, idId);
   1720  ValueOperand val = allocator.useValueRegister(masm, rhsId);
   1721 
   1722  allocator.discardStack(masm);
   1723 
   1724  // We need a scratch register but we don't have any registers available on
   1725  // x86, so temporarily store |obj| in the frame's scratch slot.
   1726  int scratchOffset = BaselineFrame::reverseOffsetOfScratchValue();
   1727  masm.storePtr(obj, Address(baselineFrameReg(), scratchOffset));
   1728 
   1729  AutoStubFrame stubFrame(*this);
   1730  stubFrame.enter(masm, obj);
   1731 
   1732  // Restore |obj|. Because we entered a stub frame we first have to load
   1733  // the original frame pointer.
   1734  masm.loadPtr(Address(FramePointer, 0), obj);
   1735  masm.loadPtr(Address(obj, scratchOffset), obj);
   1736 
   1737  masm.Push(Imm32(strict));
   1738  masm.Push(val);
   1739  masm.Push(idVal);
   1740  masm.Push(obj);
   1741 
   1742  using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
   1743  callVM<Fn, ProxySetPropertyByValue>(masm);
   1744 
   1745  stubFrame.leave(masm);
   1746  return true;
   1747 }
   1748 
   1749 bool BaselineCacheIRCompiler::emitCallAddOrUpdateSparseElementHelper(
   1750    ObjOperandId objId, Int32OperandId idId, ValOperandId rhsId, bool strict) {
   1751  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1752  Register obj = allocator.useRegister(masm, objId);
   1753  Register id = allocator.useRegister(masm, idId);
   1754  ValueOperand val = allocator.useValueRegister(masm, rhsId);
   1755  AutoScratchRegister scratch(allocator, masm);
   1756 
   1757  allocator.discardStack(masm);
   1758 
   1759  AutoStubFrame stubFrame(*this);
   1760  stubFrame.enter(masm, scratch);
   1761 
   1762  masm.Push(Imm32(strict));
   1763  masm.Push(val);
   1764  masm.Push(id);
   1765  masm.Push(obj);
   1766 
   1767  using Fn = bool (*)(JSContext* cx, Handle<NativeObject*> obj, int32_t int_id,
   1768                      HandleValue v, bool strict);
   1769  callVM<Fn, AddOrUpdateSparseElementHelper>(masm);
   1770 
   1771  stubFrame.leave(masm);
   1772  return true;
   1773 }
   1774 
   1775 bool BaselineCacheIRCompiler::emitMegamorphicSetElement(ObjOperandId objId,
   1776                                                        ValOperandId idId,
   1777                                                        ValOperandId rhsId,
   1778                                                        bool strict) {
   1779  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1780  Register obj = allocator.useRegister(masm, objId);
   1781  ValueOperand idVal = allocator.useValueRegister(masm, idId);
   1782  ValueOperand val = allocator.useValueRegister(masm, rhsId);
   1783 
   1784 #ifdef JS_CODEGEN_X86
   1785  allocator.discardStack(masm);
   1786  // We need a scratch register but we don't have any registers available on
   1787  // x86, so temporarily store |obj| in the frame's scratch slot.
   1788  int scratchOffset = BaselineFrame::reverseOffsetOfScratchValue();
   1789  masm.storePtr(obj, Address(baselineFrameReg_, scratchOffset));
   1790 
   1791  AutoStubFrame stubFrame(*this);
   1792  stubFrame.enter(masm, obj);
   1793 
   1794  // Restore |obj|. Because we entered a stub frame we first have to load
   1795  // the original frame pointer.
   1796  masm.loadPtr(Address(FramePointer, 0), obj);
   1797  masm.loadPtr(Address(obj, scratchOffset), obj);
   1798 #else
   1799  AutoScratchRegister scratch(allocator, masm);
   1800 
   1801  allocator.discardStack(masm);
   1802  AutoStubFrame stubFrame(*this);
   1803  stubFrame.enter(masm, scratch);
   1804 #endif
   1805 
   1806  masm.Push(Imm32(strict));
   1807  masm.Push(val);
   1808  masm.Push(idVal);
   1809  masm.Push(obj);
   1810 
   1811  using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
   1812  callVM<Fn, SetElementMegamorphic<false>>(masm);
   1813 
   1814  stubFrame.leave(masm);
   1815  return true;
   1816 }
   1817 
   1818 bool BaselineCacheIRCompiler::emitReturnFromIC() {
   1819  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1820  allocator.discardStack(masm);
   1821  if (JitOptions.enableICFramePointers) {
   1822    PopICFrameRegs(masm);
   1823  }
   1824  EmitReturnFromIC(masm);
   1825  return true;
   1826 }
   1827 
   1828 bool BaselineCacheIRCompiler::emitLoadArgumentFixedSlot(ValOperandId resultId,
   1829                                                        uint8_t slotIndex) {
   1830  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1831  ValueOperand resultReg = allocator.defineValueRegister(masm, resultId);
   1832  Address addr = allocator.addressOf(masm, BaselineFrameSlot(slotIndex));
   1833  masm.loadValue(addr, resultReg);
   1834  return true;
   1835 }
   1836 
   1837 bool BaselineCacheIRCompiler::emitLoadArgumentDynamicSlot(ValOperandId resultId,
   1838                                                          Int32OperandId argcId,
   1839                                                          uint8_t slotIndex) {
   1840  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1841  ValueOperand resultReg = allocator.defineValueRegister(masm, resultId);
   1842  Register argcReg = allocator.useRegister(masm, argcId);
   1843  BaseValueIndex addr =
   1844      allocator.addressOf(masm, argcReg, BaselineFrameSlot(slotIndex));
   1845  masm.loadValue(addr, resultReg);
   1846  return true;
   1847 }
   1848 
   1849 bool BaselineCacheIRCompiler::emitGuardDOMExpandoMissingOrGuardShape(
   1850    ValOperandId expandoId, uint32_t shapeOffset) {
   1851  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1852  ValueOperand val = allocator.useValueRegister(masm, expandoId);
   1853  AutoScratchRegister shapeScratch(allocator, masm);
   1854  AutoScratchRegister objScratch(allocator, masm);
   1855  Address shapeAddr(stubAddress(shapeOffset));
   1856 
   1857  FailurePath* failure;
   1858  if (!addFailurePath(&failure)) {
   1859    return false;
   1860  }
   1861 
   1862  Label done;
   1863  masm.branchTestUndefined(Assembler::Equal, val, &done);
   1864 
   1865  masm.debugAssertIsObject(val);
   1866  masm.loadPtr(shapeAddr, shapeScratch);
   1867  masm.unboxObject(val, objScratch);
   1868  // The expando object is not used in this case, so we don't need Spectre
   1869  // mitigations.
   1870  masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, objScratch,
   1871                                              shapeScratch, failure->label());
   1872 
   1873  masm.bind(&done);
   1874  return true;
   1875 }
   1876 
   1877 bool BaselineCacheIRCompiler::emitLoadDOMExpandoValueGuardGeneration(
   1878    ObjOperandId objId, uint32_t expandoAndGenerationOffset,
   1879    uint32_t generationOffset, ValOperandId resultId) {
   1880  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1881  Register obj = allocator.useRegister(masm, objId);
   1882  Address expandoAndGenerationAddr(stubAddress(expandoAndGenerationOffset));
   1883  Address generationAddr(stubAddress(generationOffset));
   1884 
   1885  AutoScratchRegister scratch(allocator, masm);
   1886  ValueOperand output = allocator.defineValueRegister(masm, resultId);
   1887 
   1888  FailurePath* failure;
   1889  if (!addFailurePath(&failure)) {
   1890    return false;
   1891  }
   1892 
   1893  masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
   1894  Address expandoAddr(scratch,
   1895                      js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
   1896 
   1897  // Load the ExpandoAndGeneration* in the output scratch register and guard
   1898  // it matches the proxy's ExpandoAndGeneration.
   1899  masm.loadPtr(expandoAndGenerationAddr, output.scratchReg());
   1900  masm.branchPrivatePtr(Assembler::NotEqual, expandoAddr, output.scratchReg(),
   1901                        failure->label());
   1902 
   1903  // Guard expandoAndGeneration->generation matches the expected generation.
   1904  masm.branch64(
   1905      Assembler::NotEqual,
   1906      Address(output.scratchReg(), ExpandoAndGeneration::offsetOfGeneration()),
   1907      generationAddr, scratch, failure->label());
   1908 
   1909  // Load expandoAndGeneration->expando into the output Value register.
   1910  masm.loadValue(
   1911      Address(output.scratchReg(), ExpandoAndGeneration::offsetOfExpando()),
   1912      output);
   1913  return true;
   1914 }
   1915 
   1916 bool BaselineCacheIRCompiler::init(CacheKind kind) {
   1917  if (!allocator.init()) {
   1918    return false;
   1919  }
   1920 
   1921  size_t numInputs = writer_.numInputOperands();
   1922  MOZ_ASSERT(numInputs == NumInputsForCacheKind(kind));
   1923 
   1924  // Baseline passes the first 2 inputs in R0/R1, other Values are stored on
   1925  // the stack.
   1926  size_t numInputsInRegs = std::min(numInputs, size_t(2));
   1927  AllocatableGeneralRegisterSet available =
   1928      BaselineICAvailableGeneralRegs(numInputsInRegs);
   1929 
   1930  switch (kind) {
   1931    case CacheKind::NewArray:
   1932    case CacheKind::NewObject:
   1933    case CacheKind::Lambda:
   1934    case CacheKind::LazyConstant:
   1935    case CacheKind::GetImport:
   1936      MOZ_ASSERT(numInputs == 0);
   1937      outputUnchecked_.emplace(R0);
   1938      break;
   1939    case CacheKind::GetProp:
   1940    case CacheKind::TypeOf:
   1941    case CacheKind::TypeOfEq:
   1942    case CacheKind::ToPropertyKey:
   1943    case CacheKind::GetIterator:
   1944    case CacheKind::OptimizeSpreadCall:
   1945    case CacheKind::OptimizeGetIterator:
   1946    case CacheKind::ToBool:
   1947    case CacheKind::UnaryArith:
   1948      MOZ_ASSERT(numInputs == 1);
   1949      allocator.initInputLocation(0, R0);
   1950      outputUnchecked_.emplace(R0);
   1951      break;
   1952    case CacheKind::Compare:
   1953    case CacheKind::GetElem:
   1954    case CacheKind::GetPropSuper:
   1955    case CacheKind::In:
   1956    case CacheKind::HasOwn:
   1957    case CacheKind::CheckPrivateField:
   1958    case CacheKind::InstanceOf:
   1959    case CacheKind::BinaryArith:
   1960      MOZ_ASSERT(numInputs == 2);
   1961      allocator.initInputLocation(0, R0);
   1962      allocator.initInputLocation(1, R1);
   1963      outputUnchecked_.emplace(R0);
   1964      break;
   1965    case CacheKind::SetProp:
   1966      MOZ_ASSERT(numInputs == 2);
   1967      allocator.initInputLocation(0, R0);
   1968      allocator.initInputLocation(1, R1);
   1969      break;
   1970    case CacheKind::GetElemSuper:
   1971      MOZ_ASSERT(numInputs == 3);
   1972      allocator.initInputLocation(0, BaselineFrameSlot(0));
   1973      allocator.initInputLocation(1, R1);
   1974      allocator.initInputLocation(2, R0);
   1975      outputUnchecked_.emplace(R0);
   1976      break;
   1977    case CacheKind::SetElem:
   1978      MOZ_ASSERT(numInputs == 3);
   1979      allocator.initInputLocation(0, R0);
   1980      allocator.initInputLocation(1, R1);
   1981      allocator.initInputLocation(2, BaselineFrameSlot(0));
   1982      break;
   1983    case CacheKind::GetName:
   1984    case CacheKind::BindName:
   1985      MOZ_ASSERT(numInputs == 1);
   1986      allocator.initInputLocation(0, R0.scratchReg(), JSVAL_TYPE_OBJECT);
   1987 #if defined(JS_NUNBOX32)
   1988      // availableGeneralRegs can't know that GetName/BindName is only using
   1989      // the payloadReg and not typeReg on x86.
   1990      available.add(R0.typeReg());
   1991 #endif
   1992      outputUnchecked_.emplace(R0);
   1993      break;
   1994    case CacheKind::Call:
   1995      MOZ_ASSERT(numInputs == 1);
   1996      allocator.initInputLocation(0, R0.scratchReg(), JSVAL_TYPE_INT32);
   1997 #if defined(JS_NUNBOX32)
   1998      // availableGeneralRegs can't know that Call is only using
   1999      // the payloadReg and not typeReg on x86.
   2000      available.add(R0.typeReg());
   2001 #endif
   2002      outputUnchecked_.emplace(R0);
   2003      break;
   2004    case CacheKind::CloseIter:
   2005      MOZ_ASSERT(numInputs == 1);
   2006      allocator.initInputLocation(0, R0.scratchReg(), JSVAL_TYPE_OBJECT);
   2007 #if defined(JS_NUNBOX32)
   2008      // availableGeneralRegs can't know that CloseIter is only using
   2009      // the payloadReg and not typeReg on x86.
   2010      available.add(R0.typeReg());
   2011 #endif
   2012      break;
   2013  }
   2014 
   2015  // Baseline doesn't allocate float registers so none of them are live.
   2016  liveFloatRegs_ = LiveFloatRegisterSet(FloatRegisterSet());
   2017 
   2018  if (JitOptions.enableICFramePointers) {
   2019    baselineFrameReg_ = available.takeAny();
   2020  }
   2021 
   2022  allocator.initAvailableRegs(available);
   2023  return true;
   2024 }
   2025 
   2026 static void ResetEnteredCounts(const ICEntry* icEntry) {
   2027  ICStub* stub = icEntry->firstStub();
   2028  while (true) {
   2029    stub->resetEnteredCount();
   2030    if (stub->isFallback()) {
   2031      return;
   2032    }
   2033    stub = stub->toCacheIRStub()->next();
   2034  }
   2035 }
   2036 
   2037 #ifdef ENABLE_JS_AOT_ICS
   2038 void DumpNonAOTICStubAndQuit(CacheKind kind, const CacheIRWriter& writer) {
   2039  // Generate a random filename (unlikely to conflict with others).
   2040  char filename[64];
   2041  snprintf(filename, sizeof(filename), "IC-%" PRIu64,
   2042           mozilla::RandomUint64OrDie());
   2043  FILE* f = fopen(filename, "w");
   2044  MOZ_RELEASE_ASSERT(f);
   2045 
   2046  // Generate the CacheIR text to dump to a file.
   2047  {
   2048    Fprinter printer(f);
   2049    SpewCacheIROpsAsAOT(printer, kind, writer);
   2050  }
   2051  fflush(f);
   2052  fclose(f);
   2053  fprintf(stderr, "UNEXPECTED NEW IC BODY\n");
   2054 
   2055  fprintf(stderr,
   2056          "Please add the file '%s' to the ahead-of-time known IC bodies in "
   2057          "js/src/ics/.\n"
   2058          "\n"
   2059          "To keep running and dump all new ICs (useful for updating with "
   2060          "test-suites),\n"
   2061          "set the environment variable AOT_ICS_KEEP_GOING=1 and rerun.\n",
   2062          filename);
   2063 
   2064  if (!getenv("AOT_ICS_KEEP_GOING")) {
   2065    abort();
   2066  }
   2067 }
   2068 #endif
   2069 
   2070 static constexpr uint32_t StubDataOffset = sizeof(ICCacheIRStub);
   2071 static_assert(StubDataOffset % sizeof(uint64_t) == 0,
   2072              "Stub fields must be aligned");
   2073 
   2074 static bool LookupOrCompileStub(JSContext* cx, CacheKind kind,
   2075                                const CacheIRWriter& writer,
   2076                                CacheIRStubInfo*& stubInfo, JitCode*& code,
   2077                                const char* name, bool isAOTFill,
   2078                                JitZone* jitZone) {
   2079  CacheIRStubKey::Lookup lookup(kind, ICStubEngine::Baseline,
   2080                                writer.codeStart(), writer.codeLength());
   2081 
   2082  code = jitZone->getBaselineCacheIRStubCode(lookup, &stubInfo);
   2083 
   2084 #ifdef ENABLE_JS_AOT_ICS
   2085  if (JitOptions.enableAOTICEnforce && !stubInfo && !isAOTFill &&
   2086      !jitZone->isIncompleteAOTICs()) {
   2087    DumpNonAOTICStubAndQuit(kind, writer);
   2088  }
   2089 #endif
   2090 
   2091  if (!code && !IsPortableBaselineInterpreterEnabled()) {
   2092    // We have to generate stub code.
   2093    TempAllocator temp(&cx->tempLifoAlloc());
   2094    JitContext jctx(cx);
   2095    BaselineCacheIRCompiler comp(cx, temp, writer, StubDataOffset);
   2096    if (!comp.init(kind)) {
   2097      return false;
   2098    }
   2099 
   2100    code = comp.compile();
   2101    if (!code) {
   2102      return false;
   2103    }
   2104 
   2105    comp.perfSpewer().saveProfile(code, name);
   2106 
   2107    // Allocate the shared CacheIRStubInfo. Note that the
   2108    // putBaselineCacheIRStubCode call below will transfer ownership
   2109    // to the stub code HashMap, so we don't have to worry about freeing
   2110    // it below.
   2111    MOZ_ASSERT(!stubInfo);
   2112    stubInfo =
   2113        CacheIRStubInfo::New(kind, ICStubEngine::Baseline, comp.makesGCCalls(),
   2114                             StubDataOffset, writer);
   2115    if (!stubInfo) {
   2116      return false;
   2117    }
   2118 
   2119    CacheIRStubKey key(stubInfo);
   2120    if (!jitZone->putBaselineCacheIRStubCode(lookup, key, code)) {
   2121      return false;
   2122    }
   2123  } else if (!stubInfo) {
   2124    MOZ_ASSERT(IsPortableBaselineInterpreterEnabled());
   2125 
   2126    // Portable baseline interpreter case. We want to generate the
   2127    // CacheIR bytecode but not compile it to native code.
   2128    //
   2129    // We lie that all stubs make GC calls; this is simpler than
   2130    // iterating over ops to determine if it is actually the base, and
   2131    // we don't invoke the BaselineCacheIRCompiler so we otherwise
   2132    // don't know for sure.
   2133    stubInfo = CacheIRStubInfo::New(kind, ICStubEngine::Baseline,
   2134                                    /* makes GC calls = */ true, StubDataOffset,
   2135                                    writer);
   2136    if (!stubInfo) {
   2137      return false;
   2138    }
   2139 
   2140    CacheIRStubKey key(stubInfo);
   2141    if (!jitZone->putBaselineCacheIRStubCode(lookup, key,
   2142                                             /* stubCode = */ nullptr)) {
   2143      return false;
   2144    }
   2145  }
   2146  MOZ_ASSERT_IF(IsBaselineInterpreterEnabled(), code);
   2147  MOZ_ASSERT(stubInfo);
   2148  // Assert that the StubInfo recomputing its stub-data size exactly
   2149  // matches the writer's stub-data size, but only if we're not
   2150  // loading an AOT IC -- otherwise, trust the recomputation from
   2151  // field types.
   2152  //
   2153  // Why ignore if AOT? Because the AOT corpus might have been dumped
   2154  // on a machine with a different word size than our machine (e.g.,
   2155  // 64 to 32 bits). The field types are serialized and deserialized,
   2156  // and they are authoritative; the CacheIRWriter's stubDataSize is
   2157  // computed during build and used only for this assert, so it is
   2158  // strictly a redundant check.
   2159  //
   2160  // (This cross-machine movement of the corpus is acceptable/correct
   2161  // because the CacheIR itself, and our encoding of it in the corpus
   2162  // source code, is platform-independent. The worst that happens is
   2163  // that some platforms may not generate all possible ICs for another
   2164  // platform (e.g. due to limited registers on x86-32) but it is always
   2165  // fine not to have an IC preloaded in the corpus.
   2166  MOZ_ASSERT_IF(!isAOTFill, stubInfo->stubDataSize() == writer.stubDataSize());
   2167 
   2168  return true;
   2169 }
   2170 
   2171 ICAttachResult js::jit::AttachBaselineCacheIRStub(
   2172    JSContext* cx, const CacheIRWriter& writer, CacheKind kind,
   2173    JSScript* outerScript, ICScript* icScript, ICFallbackStub* stub,
   2174    const char* name) {
   2175  // We shouldn't GC or report OOM (or any other exception) here.
   2176  AutoAssertNoPendingException aanpe(cx);
   2177  JS::AutoCheckCannotGC nogc;
   2178 
   2179  if (writer.tooLarge()) {
   2180    cx->runtime()->setUseCounter(cx->global(), JSUseCounter::IC_STUB_TOO_LARGE);
   2181    return ICAttachResult::TooLarge;
   2182  }
   2183  if (writer.oom()) {
   2184    cx->runtime()->setUseCounter(cx->global(), JSUseCounter::IC_STUB_OOM);
   2185    return ICAttachResult::OOM;
   2186  }
   2187  MOZ_ASSERT(!writer.failed());
   2188 
   2189  // Just a sanity check: the caller should ensure we don't attach an
   2190  // unlimited number of stubs.
   2191 #ifdef DEBUG
   2192  static const size_t MaxOptimizedCacheIRStubs = 16;
   2193  MOZ_ASSERT(stub->numOptimizedStubs() < MaxOptimizedCacheIRStubs);
   2194 #endif
   2195 
   2196  // Check if we already have JitCode for this stub.
   2197  CacheIRStubInfo* stubInfo;
   2198  JitCode* code;
   2199 
   2200  if (!LookupOrCompileStub(cx, kind, writer, stubInfo, code, name,
   2201                           /* isAOTFill = */ false, cx->zone()->jitZone())) {
   2202    return ICAttachResult::OOM;
   2203  }
   2204 
   2205  ICEntry* icEntry = icScript->icEntryForStub(stub);
   2206 
   2207  // Ensure we don't attach duplicate stubs. This can happen if a stub failed
   2208  // for some reason and the IR generator doesn't check for exactly the same
   2209  // conditions.
   2210  for (ICStub* iter = icEntry->firstStub(); iter != stub;
   2211       iter = iter->toCacheIRStub()->next()) {
   2212    auto otherStub = iter->toCacheIRStub();
   2213    if (otherStub->stubInfo() != stubInfo) {
   2214      continue;
   2215    }
   2216    if (!writer.stubDataEquals(otherStub->stubDataStart())) {
   2217      continue;
   2218    }
   2219 
   2220    // We found a stub that's exactly the same as the stub we're about to
   2221    // attach. Just return nullptr, the caller should do nothing in this
   2222    // case.
   2223    JitSpew(JitSpew_BaselineICFallback,
   2224            "Tried attaching identical stub for (%s:%u:%u)",
   2225            outerScript->filename(), outerScript->lineno(),
   2226            outerScript->column().oneOriginValue());
   2227    return ICAttachResult::DuplicateStub;
   2228  }
   2229 
   2230  // Try including this case in an existing folded stub.
   2231  if (stub->mayHaveFoldedStub() &&
   2232      AddToFoldedStub(cx, writer, icScript, stub)) {
   2233    JitSpew(JitSpew_StubFolding,
   2234            "Added to folded stub at offset %u (icScript: %p) (%s:%u:%u)",
   2235            stub->pcOffset(), icScript, outerScript->filename(),
   2236            outerScript->lineno(), outerScript->column().oneOriginValue());
   2237 
   2238    // Instead of adding a new stub, we have added a new case to an existing
   2239    // folded stub. For invalidating Warp code, there are two cases to consider:
   2240    //
   2241    // (1) If we used MGuardShapeList, we need to invalidate Warp code because
   2242    //     it bakes in the old shape list.
   2243    //
   2244    // (2) If we used MGuardMultipleShapes, we do not need to invalidate Warp,
   2245    //     because the ShapeListObject that stores the cases is shared between
   2246    //     Baseline and Warp.
   2247    //
   2248    // If we have stub folding bailout data stored in the JitZone for this
   2249    // script, this must be case (2). In this case we reset the bailout counter
   2250    // if we have already been transpiled.
   2251    //
   2252    // In both cases we reset the entered count for the fallback stub so that we
   2253    // can still transpile.
   2254    stub->resetEnteredCount();
   2255    JSScript* owningScript = nullptr;
   2256    bool hadGuardMultipleShapesBailout = false;
   2257    if (cx->zone()->jitZone()->hasStubFoldingBailoutData(outerScript)) {
   2258      owningScript = cx->zone()->jitZone()->stubFoldingBailoutOuter();
   2259      hadGuardMultipleShapesBailout = true;
   2260      JitSpew(JitSpew_StubFolding, "Found stub folding bailout outer: %s:%u:%u",
   2261              owningScript->filename(), owningScript->lineno(),
   2262              owningScript->column().oneOriginValue());
   2263    } else {
   2264      owningScript = icScript->isInlined()
   2265                         ? icScript->inliningRoot()->owningScript()
   2266                         : outerScript;
   2267    }
   2268    cx->zone()->jitZone()->clearStubFoldingBailoutData();
   2269    if (stub->usedByTranspiler() && hadGuardMultipleShapesBailout) {
   2270      if (owningScript->hasIonScript()) {
   2271        owningScript->ionScript()->resetNumFixableBailouts();
   2272      } else if (owningScript->hasJitScript()) {
   2273        owningScript->jitScript()->clearFailedICHash();
   2274      }
   2275    } else {
   2276      // Update the last IC counter if this is not a GuardMultipleShapes bailout
   2277      // from Ion.
   2278      owningScript->updateLastICStubCounter();
   2279    }
   2280    return ICAttachResult::Attached;
   2281  }
   2282 
   2283  // Time to allocate and attach a new stub.
   2284 
   2285  size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
   2286 
   2287  void* newStubMem = cx->zone()->jitZone()->stubSpace()->alloc(bytesNeeded);
   2288  if (!newStubMem) {
   2289    return ICAttachResult::OOM;
   2290  }
   2291 
   2292  // Resetting the entered counts on the IC chain makes subsequent reasoning
   2293  // about the chain much easier.
   2294  ResetEnteredCounts(icEntry);
   2295 
   2296  switch (stub->trialInliningState()) {
   2297    case TrialInliningState::Initial:
   2298    case TrialInliningState::Candidate:
   2299      stub->setTrialInliningState(writer.trialInliningState());
   2300      break;
   2301    case TrialInliningState::MonomorphicInlined:
   2302      stub->setTrialInliningState(TrialInliningState::Failure);
   2303      break;
   2304    case TrialInliningState::Inlined:
   2305      stub->setTrialInliningState(TrialInliningState::Failure);
   2306      icScript->removeInlinedChild(stub->pcOffset());
   2307      break;
   2308    case TrialInliningState::Failure:
   2309      break;
   2310  }
   2311 
   2312  auto newStub = new (newStubMem) ICCacheIRStub(code, stubInfo);
   2313  writer.copyStubData(newStub->stubDataStart());
   2314  newStub->setTypeData(writer.typeData());
   2315 
   2316 #ifdef ENABLE_PORTABLE_BASELINE_INTERP
   2317  newStub->updateRawJitCode(pbl::GetICInterpreter());
   2318 #endif
   2319 
   2320  stub->addNewStub(icEntry, newStub);
   2321 
   2322  JSScript* owningScript = icScript->isInlined()
   2323                               ? icScript->inliningRoot()->owningScript()
   2324                               : outerScript;
   2325  owningScript->updateLastICStubCounter();
   2326  return ICAttachResult::Attached;
   2327 }
   2328 
   2329 #ifdef ENABLE_JS_AOT_ICS
   2330 
   2331 #  ifndef ENABLE_PORTABLE_BASELINE_INTERP
   2332 // The AOT loading of ICs doesn't work (yet) in modes with a native
   2333 // JIT enabled because compilation tries to access state that doesn't
   2334 // exist yet (trampolines?) when we create the JitZone.
   2335 #    error AOT ICs are only supported (for now) in PBL builds.
   2336 #  endif
   2337 
   2338 void js::jit::FillAOTICs(JSContext* cx, JitZone* zone) {
   2339  if (JitOptions.enableAOTICs) {
   2340    for (auto& stub : GetAOTStubs()) {
   2341      CacheIRWriter writer(cx, stub);
   2342      if (writer.failed()) {
   2343        zone->setIncompleteAOTICs();
   2344        break;
   2345      }
   2346      CacheIRStubInfo* stubInfo;
   2347      JitCode* code;
   2348      (void)LookupOrCompileStub(cx, stub.kind, writer, stubInfo, code,
   2349                                "aot stub",
   2350                                /* isAOTFill = */ true, zone);
   2351      (void)stubInfo;
   2352      (void)code;
   2353    }
   2354  }
   2355 }
   2356 #endif
   2357 
   2358 uint8_t* ICCacheIRStub::stubDataStart() {
   2359  return reinterpret_cast<uint8_t*>(this) + stubInfo_->stubDataOffset();
   2360 }
   2361 
   2362 bool BaselineCacheIRCompiler::emitCallStringObjectConcatResult(
   2363    ValOperandId lhsId, ValOperandId rhsId) {
   2364  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2365  ValueOperand lhs = allocator.useValueRegister(masm, lhsId);
   2366  ValueOperand rhs = allocator.useValueRegister(masm, rhsId);
   2367 
   2368  AutoScratchRegister scratch(allocator, masm);
   2369 
   2370  allocator.discardStack(masm);
   2371 
   2372  AutoStubFrame stubFrame(*this);
   2373  stubFrame.enter(masm, scratch);
   2374 
   2375  masm.pushValue(rhs);
   2376  masm.pushValue(lhs);
   2377 
   2378  using Fn = bool (*)(JSContext*, HandleValue, HandleValue, MutableHandleValue);
   2379  callVM<Fn, DoConcatStringObject>(masm);
   2380 
   2381  stubFrame.leave(masm);
   2382  return true;
   2383 }
   2384 
   2385 // The value of argc entering the call IC is not always the value of
   2386 // argc entering the callee. (For example, argc for a spread call IC
   2387 // is always 1, but argc for the callee is the length of the array.)
   2388 // In these cases, we update argc as part of the call op itself, to
   2389 // avoid modifying input operands while it is still possible to fail a
   2390 // guard. We also limit callee argc to a reasonable value to avoid
   2391 // blowing the stack limit.
   2392 bool BaselineCacheIRCompiler::updateArgc(CallFlags flags, Register argcReg,
   2393                                         uint32_t argcFixed, Register scratch) {
   2394  CallFlags::ArgFormat format = flags.getArgFormat();
   2395  switch (format) {
   2396    case CallFlags::Standard:
   2397      // Standard calls have no extra guards, and argc is already correct.
   2398      return true;
   2399    case CallFlags::FunCall:
   2400      // One argument to fun_call will become |this|. If there are no arguments
   2401      // to a fun_call, we will push an extra undefined.
   2402      if (argcFixed > 0) {
   2403 #ifdef DEBUG
   2404        Label nonZeroArgs;
   2405        masm.branchTest32(Assembler::NonZero, argcReg, argcReg, &nonZeroArgs);
   2406        masm.assumeUnreachable("non-zero argcFixed implies non-zero argc");
   2407        masm.bind(&nonZeroArgs);
   2408 #endif
   2409        masm.sub32(Imm32(1), argcReg);
   2410      }
   2411      return true;
   2412    case CallFlags::FunApplyNullUndefined:
   2413      // argc must be 0 if null or undefined is passed as second argument to
   2414      // |apply|.
   2415      masm.move32(Imm32(0), argcReg);
   2416      return true;
   2417    default:
   2418      break;
   2419  }
   2420 
   2421  // We need to guard the length of the arguments.
   2422  FailurePath* failure;
   2423  if (!addFailurePath(&failure)) {
   2424    return false;
   2425  }
   2426 
   2427  // Load callee argc into scratch.
   2428  switch (flags.getArgFormat()) {
   2429    case CallFlags::Spread:
   2430    case CallFlags::FunApplyArray: {
   2431      // Load the length of the elements.
   2432      BaselineFrameSlot slot(flags.isConstructing());
   2433      masm.unboxObject(allocator.addressOf(masm, slot), scratch);
   2434      masm.loadPtr(Address(scratch, NativeObject::offsetOfElements()), scratch);
   2435      masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch);
   2436      break;
   2437    }
   2438    case CallFlags::FunApplyArgsObj: {
   2439      // Load the arguments object length.
   2440      BaselineFrameSlot slot(0);
   2441      masm.unboxObject(allocator.addressOf(masm, slot), scratch);
   2442      masm.loadArgumentsObjectLength(scratch, scratch, failure->label());
   2443      break;
   2444    }
   2445    default:
   2446      MOZ_CRASH("Unknown arg format");
   2447  }
   2448 
   2449  // Ensure that callee argc does not exceed the limit.
   2450  masm.branch32(Assembler::Above, scratch, Imm32(JIT_ARGS_LENGTH_MAX),
   2451                failure->label());
   2452 
   2453  // We're past the final guard. Update argc with the new value.
   2454  masm.move32(scratch, argcReg);
   2455 
   2456  return true;
   2457 }
   2458 
   2459 void BaselineCacheIRCompiler::pushArguments(Register argcReg,
   2460                                            Register calleeReg,
   2461                                            Register scratch, Register scratch2,
   2462                                            CallFlags flags, uint32_t argcFixed,
   2463                                            bool isJitCall) {
   2464  if (isJitCall) {
   2465    // If we're calling jitcode, we have to align the stack and ensure that
   2466    // enough arguments are being passed, filling in any missing arguments
   2467    // with `undefined`. `newTarget` should be pushed after alignment padding
   2468    // but before the `undefined` values, so we also handle it here.
   2469    prepareForArguments(argcReg, calleeReg, scratch, scratch2, flags,
   2470                        argcFixed);
   2471  } else if (flags.isConstructing()) {
   2472    // If we're not calling jitcode, push newTarget now so that the shared
   2473    // paths below can assume it's already pushed.
   2474    pushNewTarget();
   2475  }
   2476 
   2477  switch (flags.getArgFormat()) {
   2478    case CallFlags::Standard:
   2479      pushStandardArguments(argcReg, scratch, scratch2, argcFixed, isJitCall,
   2480                            flags.isConstructing());
   2481      break;
   2482    case CallFlags::Spread:
   2483      pushArrayArguments(argcReg, scratch, scratch2, isJitCall,
   2484                         flags.isConstructing());
   2485      break;
   2486    case CallFlags::FunCall:
   2487      pushFunCallArguments(argcReg, calleeReg, scratch, scratch2, argcFixed,
   2488                           isJitCall);
   2489      break;
   2490    case CallFlags::FunApplyArgsObj:
   2491      pushFunApplyArgsObj(argcReg, calleeReg, scratch, scratch2, isJitCall);
   2492      break;
   2493    case CallFlags::FunApplyArray:
   2494      pushArrayArguments(argcReg, scratch, scratch2, isJitCall,
   2495                         /*isConstructing =*/false);
   2496      break;
   2497    case CallFlags::FunApplyNullUndefined:
   2498      pushFunApplyNullUndefinedArguments(calleeReg, isJitCall);
   2499      break;
   2500    default:
   2501      MOZ_CRASH("Invalid arg format");
   2502  }
   2503 }
   2504 
   2505 void BaselineCacheIRCompiler::prepareForArguments(
   2506    Register argcReg, Register calleeReg, Register scratch, Register scratch2,
   2507    CallFlags flags, uint32_t argcFixed) {
   2508  bool isConstructing = flags.isConstructing();
   2509 
   2510  // We will align the stack based on an argument count that doesn't include
   2511  // |this| or |newTarget|. We only care about the parity of the count, so
   2512  // if we are constructing (and therefore passing both) we can compute
   2513  // alignment as if |this| was included.
   2514  bool countIncludesThis = isConstructing;
   2515 
   2516  // Check for arguments underflow. If we aren't passing enough arguments,
   2517  // fill in missing arguments with `undefined`.
   2518  Label noUnderflow, done;
   2519  masm.loadFunctionArgCount(calleeReg, scratch);
   2520  masm.branch32(Assembler::AboveOrEqual, argcReg, scratch, &noUnderflow);
   2521 
   2522  masm.alignJitStackBasedOnNArgs(scratch, countIncludesThis);
   2523 
   2524  // Push newTarget, if necessary
   2525  if (isConstructing) {
   2526    pushNewTarget();
   2527  }
   2528 
   2529  // Push `undefined` in a loop
   2530  Label loop;
   2531  masm.bind(&loop);
   2532  masm.Push(UndefinedValue());
   2533  masm.sub32(Imm32(1), scratch);
   2534  masm.branch32(Assembler::Above, scratch, argcReg, &loop);
   2535  masm.jump(&done);
   2536 
   2537  masm.bind(&noUnderflow);
   2538 
   2539  if (flags.getArgFormat() == CallFlags::Standard &&
   2540      argcFixed < MaxUnrolledArgCopy) {
   2541    masm.alignJitStackBasedOnNArgs(argcFixed, countIncludesThis);
   2542  } else if (flags.getArgFormat() == CallFlags::FunCall &&
   2543             argcFixed < MaxUnrolledArgCopy) {
   2544    // If any arguments are passed, one argument becomes |this|.
   2545    uint32_t actualArgc = argcFixed > 0 ? argcFixed - 1 : 0;
   2546    masm.alignJitStackBasedOnNArgs(actualArgc, countIncludesThis);
   2547  } else {
   2548    masm.alignJitStackBasedOnNArgs(argcReg, countIncludesThis);
   2549  }
   2550 
   2551  if (isConstructing) {
   2552    pushNewTarget();
   2553  }
   2554 
   2555  masm.bind(&done);
   2556 }
   2557 
   2558 void BaselineCacheIRCompiler::pushNewTarget() {
   2559  // When it's time to push `newTarget`, the stack looks like this
   2560  // (higher addresses at the top):
   2561  //
   2562  // .                        .
   2563  // +------------------------+
   2564  // | callee                 |
   2565  // | this                   |
   2566  // | arg0                   | <= pushed on caller's expression stack
   2567  // | arg1                   |
   2568  // | ...                    |
   2569  // | argN                   |
   2570  // | newTarget              | <---- we want to copy this
   2571  // +------------------------+
   2572  // | frame descriptor       | <= BaselineStubFrame
   2573  // | return address         |
   2574  // | caller frame pointer   | <-- frame pointer points here
   2575  // +------------------------+
   2576  // | stub ptr               |
   2577  // | InlinedICScript?       |
   2578  // | (alignment padding?)   | <-- stack pointer points here
   2579  // +------------------------+
   2580  //
   2581  // `newTarget` is the last argument pushed, so it's immediately above the
   2582  // stub frame on the stack.
   2583  masm.pushValue(Address(FramePointer, BaselineStubFrameLayout::Size()));
   2584 }
   2585 
   2586 static uint32_t ArgsOffsetFromFP(bool isConstructing) {
   2587  // The arguments are on the stack just above the stub frame. If we are
   2588  // constructing, we have to skip newTarget.
   2589  uint32_t offset = BaselineStubFrameLayout::Size();
   2590  if (isConstructing) {
   2591    offset += sizeof(Value);
   2592  }
   2593  return offset;
   2594 }
   2595 
   2596 void BaselineCacheIRCompiler::pushStandardArguments(
   2597    Register argcReg, Register scratch, Register scratch2, uint32_t argcFixed,
   2598    bool isJitCall, bool isConstructing) {
   2599  MOZ_ASSERT(enteredStubFrame_);
   2600 
   2601  // The arguments to the call IC were pushed on the stack from left to right,
   2602  // meaning that the first argument is at the highest address and the last
   2603  // argument is at the lowest address. Our callee needs them to be in the
   2604  // opposite order, so we duplicate them now.
   2605 
   2606  int additionalArgc = 1 + !isJitCall;  // this + maybe callee
   2607  uint32_t argsOffset = ArgsOffsetFromFP(isConstructing);
   2608 
   2609  if (argcFixed < MaxUnrolledArgCopy) {
   2610    // For small argc, we unroll the argument pushing loop.
   2611 
   2612 #ifdef DEBUG
   2613    Label ok;
   2614    masm.branch32(Assembler::Equal, argcReg, Imm32(argcFixed), &ok);
   2615    masm.assumeUnreachable("Invalid argcFixed value");
   2616    masm.bind(&ok);
   2617 #endif
   2618 
   2619    size_t numCopiedValues = argcFixed + additionalArgc;
   2620    for (size_t i = 0; i < numCopiedValues; ++i) {
   2621      masm.pushValue(Address(FramePointer, argsOffset + i * sizeof(Value)));
   2622    }
   2623  } else {
   2624    MOZ_ASSERT(argcFixed == MaxUnrolledArgCopy);
   2625 
   2626    // Compute pointers to the start and end of the arguments area.
   2627    Register argPtr = scratch;
   2628    Register argEnd = scratch2;
   2629    masm.computeEffectiveAddress(Address(FramePointer, argsOffset), argPtr);
   2630    BaseValueIndex endAddr(FramePointer, argcReg,
   2631                           argsOffset + additionalArgc * sizeof(Value));
   2632    masm.computeEffectiveAddress(endAddr, argEnd);
   2633 
   2634    // Push all values, starting at the last one.
   2635    // We always push at least one value (`this`), so we don't need
   2636    // a loop guard.
   2637    Label loop;
   2638    masm.bind(&loop);
   2639    {
   2640      masm.pushValue(Address(argPtr, 0));
   2641      masm.addPtr(Imm32(sizeof(Value)), argPtr);
   2642      masm.branchPtr(Assembler::Below, argPtr, argEnd, &loop);
   2643    }
   2644  }
   2645 }
   2646 
   2647 void BaselineCacheIRCompiler::pushArrayArguments(Register argcReg,
   2648                                                 Register scratch,
   2649                                                 Register scratch2,
   2650                                                 bool isJitCall,
   2651                                                 bool isConstructing) {
   2652  MOZ_ASSERT(enteredStubFrame_);
   2653 
   2654  // If the array is empty, we can skip the loop entirely.
   2655  Label emptyArray;
   2656  masm.branchTest32(Assembler::Zero, argcReg, argcReg, &emptyArray);
   2657 
   2658  // Pull the array off the stack and load a pointer to its first element.
   2659  Register startReg = scratch;
   2660  size_t arrayOffset = ArgsOffsetFromFP(isConstructing);
   2661  masm.unboxObject(Address(FramePointer, arrayOffset), startReg);
   2662  masm.loadPtr(Address(startReg, NativeObject::offsetOfElements()), startReg);
   2663 
   2664  // Set up endReg to point to &array[argc - 1].
   2665  Register endReg = scratch2;
   2666  BaseValueIndex endAddr(startReg, argcReg, -int32_t(sizeof(Value)));
   2667  masm.computeEffectiveAddress(endAddr, endReg);
   2668 
   2669  // Loop to push all arguments. We've already checked for an empty array above.
   2670  Label loop;
   2671  masm.bind(&loop);
   2672  masm.pushValue(Address(endReg, 0));
   2673  masm.subPtr(Imm32(sizeof(Value)), endReg);
   2674  masm.branchPtr(Assembler::AboveOrEqual, endReg, startReg, &loop);
   2675 
   2676  masm.bind(&emptyArray);
   2677 
   2678  // Push |this|.
   2679  size_t thisvOffset = arrayOffset + sizeof(Value);
   2680  masm.pushValue(Address(FramePointer, thisvOffset));
   2681 
   2682  // Push |callee| if needed.
   2683  if (!isJitCall) {
   2684    size_t calleeOffset = arrayOffset + 2 * sizeof(Value);
   2685    masm.pushValue(Address(FramePointer, calleeOffset));
   2686  }
   2687 }
   2688 
   2689 void BaselineCacheIRCompiler::pushFunApplyNullUndefinedArguments(
   2690    Register calleeReg, bool isJitCall) {
   2691  // argc is already set to 0, so we just have to push |this| and (for native
   2692  // calls) the callee.
   2693 
   2694  MOZ_ASSERT(enteredStubFrame_);
   2695 
   2696  // Push |this|.
   2697  size_t thisvOffset =
   2698      ArgsOffsetFromFP(/*isConstructing*/ false) + sizeof(Value);
   2699  masm.pushValue(Address(FramePointer, thisvOffset));
   2700 
   2701  // Push |callee| if needed.
   2702  if (!isJitCall) {
   2703    masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
   2704  }
   2705 }
   2706 
   2707 void BaselineCacheIRCompiler::pushFunCallArguments(
   2708    Register argcReg, Register calleeReg, Register scratch, Register scratch2,
   2709    uint32_t argcFixed, bool isJitCall) {
   2710  if (argcFixed == 0) {
   2711    // Store the new |this|.
   2712    masm.pushValue(UndefinedValue());
   2713 
   2714    // Store |callee| if needed.
   2715    if (!isJitCall) {
   2716      masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
   2717    }
   2718    return;
   2719  }
   2720 
   2721  // When we call fun_call, the stack looks like the left column (note
   2722  // that newTarget will not be present, because fun_call cannot be a
   2723  // constructor call):
   2724  //
   2725  // ***Arguments to fun_call***
   2726  // callee (fun_call)               ***Arguments to target***
   2727  // this (target function)   -----> callee
   2728  // arg0 (this of target)    -----> this
   2729  // arg1 (arg0 of target)    -----> arg0
   2730  // argN (argN-1 of target)  -----> arg1
   2731  //
   2732  // As demonstrated in the right column, this is exactly what we need
   2733  // the stack to look like when calling pushStandardArguments for target,
   2734  // except with one more argument. If we subtract 1 from argc, as we
   2735  // have already done in updateArgc, everything works out correctly.
   2736 
   2737  if (argcFixed != MaxUnrolledArgCopy) {
   2738    argcFixed--;
   2739  }
   2740  pushStandardArguments(argcReg, scratch, scratch2, argcFixed, isJitCall,
   2741                        /*isConstructing =*/false);
   2742 }
   2743 
   2744 void BaselineCacheIRCompiler::pushFunApplyArgsObj(Register argcReg,
   2745                                                  Register calleeReg,
   2746                                                  Register scratch,
   2747                                                  Register scratch2,
   2748                                                  bool isJitCall) {
   2749  MOZ_ASSERT(enteredStubFrame_);
   2750  // If there are no arguments, we can skip the loop entirely.
   2751  Label emptyArgs;
   2752  masm.branchTest32(Assembler::Zero, argcReg, argcReg, &emptyArgs);
   2753 
   2754  // Load ArgumentsData
   2755  Register argsReg = scratch;
   2756  uint32_t argsOffset = ArgsOffsetFromFP(/*isConstructing*/ false);
   2757  masm.unboxObject(Address(FramePointer, BaselineStubFrameLayout::Size()),
   2758                   argsReg);
   2759  masm.loadPrivate(Address(argsReg, ArgumentsObject::getDataSlotOffset()),
   2760                   argsReg);
   2761 
   2762  // We push the arguments onto the stack last-to-first.
   2763  // Compute the bounds of the arguments array.
   2764  Register currReg = scratch2;
   2765  Address argsStartAddr(argsReg, ArgumentsData::offsetOfArgs());
   2766  masm.computeEffectiveAddress(argsStartAddr, argsReg);
   2767  BaseValueIndex argsEndAddr(argsReg, argcReg, -int32_t(sizeof(Value)));
   2768  masm.computeEffectiveAddress(argsEndAddr, currReg);
   2769 
   2770  // Loop until all arguments have been pushed.
   2771  Label loop;
   2772  masm.bind(&loop);
   2773  Address currArgAddr(currReg, 0);
   2774 #ifdef DEBUG
   2775  // Arguments are forwarded to the call object if they are closed over.
   2776  // In this case, OVERRIDDEN_ELEMENTS_BIT should be set.
   2777  Label notForwarded;
   2778  masm.branchTestMagic(Assembler::NotEqual, currArgAddr, &notForwarded);
   2779  masm.assumeUnreachable("Should have checked for overridden elements");
   2780  masm.bind(&notForwarded);
   2781 #endif
   2782  masm.pushValue(currArgAddr);
   2783  masm.subPtr(Imm32(sizeof(Value)), currReg);
   2784  masm.branchPtr(Assembler::AboveOrEqual, currReg, argsReg, &loop);
   2785 
   2786  masm.bind(&emptyArgs);
   2787 
   2788  // Push arg0 as |this| for call
   2789  masm.pushValue(Address(FramePointer, argsOffset + sizeof(Value)));
   2790 
   2791  // Push |callee| if needed.
   2792  if (!isJitCall) {
   2793    masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
   2794  }
   2795 }
   2796 
   2797 void BaselineCacheIRCompiler::pushBoundFunctionArguments(
   2798    Register argcReg, Register calleeReg, Register scratch, Register scratch2,
   2799    CallFlags flags, uint32_t numBoundArgs, bool isJitCall) {
   2800  bool isConstructing = flags.isConstructing();
   2801 
   2802  // Calculate total number of actual arguments
   2803  Register countReg = scratch;
   2804  masm.computeEffectiveAddress(Address(argcReg, numBoundArgs), countReg);
   2805 
   2806  Address boundTarget(calleeReg, BoundFunctionObject::offsetOfTargetSlot());
   2807 
   2808  if (isJitCall) {
   2809    // We align the jit stack depending on whether the number of arguments
   2810    // is even or odd. To simplify the underflow check, we don't include
   2811    // |this| or (if constructing) |newTarget| in countReg. If we're
   2812    // not constructing, then we should align as if countReg doesn't include
   2813    // |this|. If we're passing both values, then countReg+2 has the same
   2814    // parity as countReg, so we should align as if |this| was included.
   2815    bool countIncludesThis = isConstructing;
   2816 
   2817    Label noUnderflow, readyForArgs;
   2818    masm.unboxObject(boundTarget, scratch2);
   2819    masm.loadFunctionArgCount(scratch2, scratch2);
   2820    masm.branch32(Assembler::AboveOrEqual, countReg, scratch2, &noUnderflow);
   2821 
   2822    masm.alignJitStackBasedOnNArgs(scratch2, countIncludesThis);
   2823    if (isConstructing) {
   2824      masm.pushValue(boundTarget);
   2825    }
   2826 
   2827    Label undefLoop;
   2828    masm.bind(&undefLoop);
   2829    masm.Push(UndefinedValue());
   2830    masm.sub32(Imm32(1), scratch2);
   2831    masm.branch32(Assembler::Above, scratch2, countReg, &undefLoop);
   2832    masm.jump(&readyForArgs);
   2833 
   2834    masm.bind(&noUnderflow);
   2835    masm.alignJitStackBasedOnNArgs(countReg, countIncludesThis);
   2836    if (isConstructing) {
   2837      masm.pushValue(boundTarget);
   2838    }
   2839    masm.bind(&readyForArgs);
   2840  } else if (isConstructing) {
   2841    masm.pushValue(boundTarget);
   2842  }
   2843 
   2844  // Skip the argument loop if no args are being passed.
   2845  Label noArgs;
   2846  masm.branchTest32(Assembler::Zero, argcReg, argcReg, &noArgs);
   2847 
   2848  // Ensure argPtr initially points to the last argument. Skip the stub frame.
   2849  Register argPtr = scratch2;
   2850  Address argAddress(FramePointer, BaselineStubFrameLayout::Size());
   2851  if (isConstructing) {
   2852    // Skip newTarget.
   2853    argAddress.offset += sizeof(Value);
   2854  }
   2855  masm.computeEffectiveAddress(argAddress, argPtr);
   2856 
   2857  // Push all supplied arguments, starting at the last one.
   2858  Label argsLoop;
   2859  masm.move32(argcReg, countReg);
   2860  masm.bind(&argsLoop);
   2861  {
   2862    masm.pushValue(Address(argPtr, 0));
   2863    masm.addPtr(Imm32(sizeof(Value)), argPtr);
   2864 
   2865    masm.branchSub32(Assembler::NonZero, Imm32(1), countReg, &argsLoop);
   2866  }
   2867  masm.bind(&noArgs);
   2868 
   2869  // Push the bound arguments, starting at the last one.
   2870  constexpr size_t inlineArgsOffset =
   2871      BoundFunctionObject::offsetOfFirstInlineBoundArg();
   2872  if (numBoundArgs <= BoundFunctionObject::MaxInlineBoundArgs) {
   2873    for (size_t i = 0; i < numBoundArgs; i++) {
   2874      size_t argIndex = numBoundArgs - i - 1;
   2875      Address argAddr(calleeReg, inlineArgsOffset + argIndex * sizeof(Value));
   2876      masm.pushValue(argAddr);
   2877    }
   2878  } else {
   2879    masm.unboxObject(Address(calleeReg, inlineArgsOffset), scratch);
   2880    masm.loadPtr(Address(scratch, NativeObject::offsetOfElements()), scratch);
   2881    for (size_t i = 0; i < numBoundArgs; i++) {
   2882      size_t argIndex = numBoundArgs - i - 1;
   2883      Address argAddr(scratch, argIndex * sizeof(Value));
   2884      masm.pushValue(argAddr);
   2885    }
   2886  }
   2887 
   2888  if (isConstructing) {
   2889    // Push the |this| Value. This is either the object we allocated or the
   2890    // JS_UNINITIALIZED_LEXICAL magic value. It's stored in the BaselineFrame,
   2891    // so skip past the stub frame, (unbound) arguments and newTarget.
   2892    BaseValueIndex thisAddress(FramePointer, argcReg,
   2893                               BaselineStubFrameLayout::Size() + sizeof(Value));
   2894    masm.pushValue(thisAddress, scratch);
   2895  } else {
   2896    // Push the bound |this|.
   2897    Address boundThis(calleeReg, BoundFunctionObject::offsetOfBoundThisSlot());
   2898    masm.pushValue(boundThis);
   2899  }
   2900 }
   2901 
   2902 bool BaselineCacheIRCompiler::emitCallNativeShared(
   2903    NativeCallType callType, ObjOperandId calleeId, Int32OperandId argcId,
   2904    CallFlags flags, uint32_t argcFixed, Maybe<bool> ignoresReturnValue,
   2905    Maybe<uint32_t> targetOffset, ClearLocalAllocSite clearLocalAllocSite) {
   2906  AutoOutputRegister output(*this);
   2907  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   2908  AutoScratchRegister scratch2(allocator, masm);
   2909 
   2910  Register calleeReg = allocator.useRegister(masm, calleeId);
   2911  Register argcReg = allocator.useRegister(masm, argcId);
   2912 
   2913  bool isConstructing = flags.isConstructing();
   2914  bool isSameRealm = flags.isSameRealm();
   2915 
   2916  if (!updateArgc(flags, argcReg, argcFixed, scratch)) {
   2917    return false;
   2918  }
   2919 
   2920  allocator.discardStack(masm);
   2921 
   2922  // Push a stub frame so that we can perform a non-tail call.
   2923  // Note that this leaves the return address in TailCallReg.
   2924  AutoStubFrame stubFrame(*this);
   2925  stubFrame.enter(masm, scratch);
   2926 
   2927  if (!isSameRealm) {
   2928    masm.switchToObjectRealm(calleeReg, scratch);
   2929  }
   2930 
   2931  pushArguments(argcReg, calleeReg, scratch, scratch2, flags, argcFixed,
   2932                /*isJitCall =*/false);
   2933 
   2934  // Native functions have the signature:
   2935  //
   2936  //    bool (*)(JSContext*, unsigned, Value* vp)
   2937  //
   2938  // Where vp[0] is space for callee/return value, vp[1] is |this|, and vp[2]
   2939  // onward are the function arguments.
   2940 
   2941  // Initialize vp.
   2942  masm.moveStackPtrTo(scratch2.get());
   2943 
   2944  // Construct a native exit frame.
   2945  masm.push(argcReg);
   2946 
   2947  masm.push(FrameDescriptor(FrameType::BaselineStub));
   2948  masm.push(ICTailCallReg);
   2949  masm.push(FramePointer);
   2950  masm.loadJSContext(scratch);
   2951  masm.enterFakeExitFrameForNative(scratch, scratch, isConstructing);
   2952 
   2953  // Execute call.
   2954  masm.setupUnalignedABICall(scratch);
   2955  masm.loadJSContext(scratch);
   2956  masm.passABIArg(scratch);
   2957  masm.passABIArg(argcReg);
   2958  masm.passABIArg(scratch2);
   2959 
   2960  switch (callType) {
   2961    case NativeCallType::Native: {
   2962 #ifdef JS_SIMULATOR
   2963      // The simulator requires VM calls to be redirected to a special
   2964      // swi instruction to handle them, so we store the redirected
   2965      // pointer in the stub and use that instead of the original one.
   2966      // (See CacheIRWriter::callNativeFunction.)
   2967      Address redirectedAddr(stubAddress(*targetOffset));
   2968      masm.callWithABI(redirectedAddr);
   2969 #else
   2970      if (*ignoresReturnValue) {
   2971        masm.loadPrivate(
   2972            Address(calleeReg, JSFunction::offsetOfJitInfoOrScript()),
   2973            calleeReg);
   2974        masm.callWithABI(
   2975            Address(calleeReg, JSJitInfo::offsetOfIgnoresReturnValueNative()));
   2976      } else {
   2977        // This depends on the native function pointer being stored unchanged as
   2978        // a PrivateValue.
   2979        masm.callWithABI(Address(calleeReg, JSFunction::offsetOfNativeOrEnv()));
   2980      }
   2981 #endif
   2982    } break;
   2983    case NativeCallType::ClassHook: {
   2984      Address nativeAddr(stubAddress(*targetOffset));
   2985      masm.callWithABI(nativeAddr);
   2986    } break;
   2987  }
   2988 
   2989  // Test for failure.
   2990  masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
   2991 
   2992  // Load the return value.
   2993  masm.loadValue(
   2994      Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
   2995      output.valueReg());
   2996 
   2997  stubFrame.leave(masm);
   2998 
   2999  if (!isSameRealm) {
   3000    masm.switchToBaselineFrameRealm(scratch2);
   3001  }
   3002 
   3003  // We will also unilaterally clear this on exception handling.
   3004  if (clearLocalAllocSite == ClearLocalAllocSite::Yes) {
   3005    masm.storeLocalAllocSite(ImmPtr(nullptr), scratch2);
   3006  }
   3007 
   3008  return true;
   3009 }
   3010 
   3011 void BaselineCacheIRCompiler::loadAllocSiteIntoContext(uint32_t siteOffset) {
   3012  AutoScratchRegister scratch(allocator, masm);
   3013  AutoScratchRegister site(allocator, masm);
   3014 
   3015  StubFieldOffset siteField(siteOffset, StubField::Type::AllocSite);
   3016  emitLoadStubField(siteField, site);
   3017 
   3018  masm.storeLocalAllocSite(site.get(), scratch);
   3019 }
   3020 
   3021 #ifdef JS_SIMULATOR
   3022 bool BaselineCacheIRCompiler::emitCallNativeFunction(ObjOperandId calleeId,
   3023                                                     Int32OperandId argcId,
   3024                                                     CallFlags flags,
   3025                                                     uint32_t argcFixed,
   3026                                                     uint32_t targetOffset) {
   3027  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3028  Maybe<bool> ignoresReturnValue;
   3029  Maybe<uint32_t> targetOffset_ = mozilla::Some(targetOffset);
   3030  return emitCallNativeShared(NativeCallType::Native, calleeId, argcId, flags,
   3031                              argcFixed, ignoresReturnValue, targetOffset_);
   3032 }
   3033 
   3034 bool BaselineCacheIRCompiler::emitCallDOMFunction(
   3035    ObjOperandId calleeId, Int32OperandId argcId, ObjOperandId thisObjId,
   3036    CallFlags flags, uint32_t argcFixed, uint32_t targetOffset) {
   3037  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3038  Maybe<bool> ignoresReturnValue;
   3039  Maybe<uint32_t> targetOffset_ = mozilla::Some(targetOffset);
   3040  return emitCallNativeShared(NativeCallType::Native, calleeId, argcId, flags,
   3041                              argcFixed, ignoresReturnValue, targetOffset_);
   3042 }
   3043 
   3044 bool BaselineCacheIRCompiler::emitCallDOMFunctionWithAllocSite(
   3045    ObjOperandId calleeId, Int32OperandId argcId, ObjOperandId thisObjId,
   3046    CallFlags flags, uint32_t argcFixed, uint32_t siteOffset,
   3047    uint32_t targetOffset) {
   3048  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3049  loadAllocSiteIntoContext(siteOffset);
   3050  Maybe<bool> ignoresReturnValue;
   3051  Maybe<uint32_t> targetOffset_ = mozilla::Some(targetOffset);
   3052  return emitCallNativeShared(NativeCallType::Native, calleeId, argcId, flags,
   3053                              argcFixed, ignoresReturnValue, targetOffset_,
   3054                              ClearLocalAllocSite::Yes);
   3055 }
   3056 #else
   3057 bool BaselineCacheIRCompiler::emitCallNativeFunction(ObjOperandId calleeId,
   3058                                                     Int32OperandId argcId,
   3059                                                     CallFlags flags,
   3060                                                     uint32_t argcFixed,
   3061                                                     bool ignoresReturnValue) {
   3062  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3063  Maybe<bool> ignoresReturnValue_ = mozilla::Some(ignoresReturnValue);
   3064  Maybe<uint32_t> targetOffset;
   3065  return emitCallNativeShared(NativeCallType::Native, calleeId, argcId, flags,
   3066                              argcFixed, ignoresReturnValue_, targetOffset);
   3067 }
   3068 
   3069 bool BaselineCacheIRCompiler::emitCallDOMFunction(ObjOperandId calleeId,
   3070                                                  Int32OperandId argcId,
   3071                                                  ObjOperandId thisObjId,
   3072                                                  CallFlags flags,
   3073                                                  uint32_t argcFixed) {
   3074  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3075  Maybe<bool> ignoresReturnValue = mozilla::Some(false);
   3076  Maybe<uint32_t> targetOffset;
   3077  return emitCallNativeShared(NativeCallType::Native, calleeId, argcId, flags,
   3078                              argcFixed, ignoresReturnValue, targetOffset);
   3079 }
   3080 
   3081 bool BaselineCacheIRCompiler::emitCallDOMFunctionWithAllocSite(
   3082    ObjOperandId calleeId, Int32OperandId argcId, ObjOperandId thisObjId,
   3083    CallFlags flags, uint32_t argcFixed, uint32_t siteOffset) {
   3084  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3085  loadAllocSiteIntoContext(siteOffset);
   3086  Maybe<bool> ignoresReturnValue = mozilla::Some(false);
   3087  Maybe<uint32_t> targetOffset;
   3088  return emitCallNativeShared(NativeCallType::Native, calleeId, argcId, flags,
   3089                              argcFixed, ignoresReturnValue, targetOffset,
   3090                              ClearLocalAllocSite::Yes);
   3091 }
   3092 #endif
   3093 
   3094 bool BaselineCacheIRCompiler::emitCallClassHook(ObjOperandId calleeId,
   3095                                                Int32OperandId argcId,
   3096                                                CallFlags flags,
   3097                                                uint32_t argcFixed,
   3098                                                uint32_t targetOffset) {
   3099  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3100  Maybe<bool> ignoresReturnValue;
   3101  Maybe<uint32_t> targetOffset_ = mozilla::Some(targetOffset);
   3102  return emitCallNativeShared(NativeCallType::ClassHook, calleeId, argcId,
   3103                              flags, argcFixed, ignoresReturnValue,
   3104                              targetOffset_);
   3105 }
   3106 
   3107 // Helper function for loading call arguments from the stack.  Loads
   3108 // and unboxes an object from a specific slot.
   3109 void BaselineCacheIRCompiler::loadStackObject(ArgumentKind kind,
   3110                                              CallFlags flags, Register argcReg,
   3111                                              Register dest) {
   3112  MOZ_ASSERT(enteredStubFrame_);
   3113 
   3114  bool addArgc = false;
   3115  int32_t slotIndex = GetIndexOfArgument(kind, flags, &addArgc);
   3116 
   3117  if (addArgc) {
   3118    int32_t slotOffset =
   3119        slotIndex * sizeof(JS::Value) + BaselineStubFrameLayout::Size();
   3120    BaseValueIndex slotAddr(FramePointer, argcReg, slotOffset);
   3121    masm.unboxObject(slotAddr, dest);
   3122  } else {
   3123    int32_t slotOffset =
   3124        slotIndex * sizeof(JS::Value) + BaselineStubFrameLayout::Size();
   3125    Address slotAddr(FramePointer, slotOffset);
   3126    masm.unboxObject(slotAddr, dest);
   3127  }
   3128 }
   3129 
   3130 template <typename T>
   3131 void BaselineCacheIRCompiler::storeThis(const T& newThis, Register argcReg,
   3132                                        CallFlags flags) {
   3133  switch (flags.getArgFormat()) {
   3134    case CallFlags::Standard: {
   3135      BaseValueIndex thisAddress(
   3136          FramePointer,
   3137          argcReg,                               // Arguments
   3138          1 * sizeof(Value) +                    // NewTarget
   3139              BaselineStubFrameLayout::Size());  // Stub frame
   3140      masm.storeValue(newThis, thisAddress);
   3141    } break;
   3142    case CallFlags::Spread: {
   3143      Address thisAddress(FramePointer,
   3144                          2 * sizeof(Value) +  // Arg array, NewTarget
   3145                              BaselineStubFrameLayout::Size());  // Stub frame
   3146      masm.storeValue(newThis, thisAddress);
   3147    } break;
   3148    default:
   3149      MOZ_CRASH("Invalid arg format for scripted constructor");
   3150  }
   3151 }
   3152 
   3153 /*
   3154 * Scripted constructors require a |this| object to be created prior to the
   3155 * call. When this function is called, the stack looks like (bottom->top):
   3156 *
   3157 * [..., Callee, ThisV, Arg0V, ..., ArgNV, NewTarget, StubFrameHeader]
   3158 *
   3159 * At this point, |ThisV| is JSWhyMagic::JS_IS_CONSTRUCTING.
   3160 *
   3161 * This function calls CreateThis to generate a new |this| object, then
   3162 * overwrites the magic ThisV on the stack.
   3163 */
   3164 void BaselineCacheIRCompiler::createThis(Register argcReg, Register calleeReg,
   3165                                         Register scratch, CallFlags flags,
   3166                                         bool isBoundFunction) {
   3167  MOZ_ASSERT(flags.isConstructing());
   3168 
   3169  if (flags.needsUninitializedThis()) {
   3170    storeThis(MagicValue(JS_UNINITIALIZED_LEXICAL), argcReg, flags);
   3171    return;
   3172  }
   3173 
   3174  // Save live registers that don't have to be traced.
   3175  LiveGeneralRegisterSet liveNonGCRegs;
   3176  liveNonGCRegs.add(argcReg);
   3177  masm.PushRegsInMask(liveNonGCRegs);
   3178 
   3179  // CreateThis takes two arguments: callee, and newTarget.
   3180 
   3181  if (isBoundFunction) {
   3182    // Push the bound function's target as callee and newTarget.
   3183    Address boundTarget(calleeReg, BoundFunctionObject::offsetOfTargetSlot());
   3184    masm.unboxObject(boundTarget, scratch);
   3185    masm.push(scratch);
   3186    masm.push(scratch);
   3187  } else {
   3188    // Push newTarget:
   3189    loadStackObject(ArgumentKind::NewTarget, flags, argcReg, scratch);
   3190    masm.push(scratch);
   3191 
   3192    // Push callee:
   3193    loadStackObject(ArgumentKind::Callee, flags, argcReg, scratch);
   3194    masm.push(scratch);
   3195  }
   3196 
   3197  // Call CreateThisFromIC.
   3198  using Fn =
   3199      bool (*)(JSContext*, HandleObject, HandleObject, MutableHandleValue);
   3200  callVM<Fn, CreateThisFromIC>(masm);
   3201 
   3202 #ifdef DEBUG
   3203  Label createdThisOK;
   3204  masm.branchTestObject(Assembler::Equal, JSReturnOperand, &createdThisOK);
   3205  masm.branchTestMagic(Assembler::Equal, JSReturnOperand, &createdThisOK);
   3206  masm.assumeUnreachable(
   3207      "The return of CreateThis must be an object or uninitialized.");
   3208  masm.bind(&createdThisOK);
   3209 #endif
   3210 
   3211  // Restore saved registers.
   3212  masm.PopRegsInMask(liveNonGCRegs);
   3213 
   3214  // Restore ICStubReg. The stub might have been moved if CreateThisFromIC
   3215  // discarded JIT code.
   3216  Address stubAddr(FramePointer, BaselineStubFrameLayout::ICStubOffsetFromFP);
   3217  masm.loadPtr(stubAddr, ICStubReg);
   3218 
   3219  // Save |this| value back into pushed arguments on stack.
   3220  MOZ_ASSERT(!liveNonGCRegs.aliases(JSReturnOperand));
   3221  storeThis(JSReturnOperand, argcReg, flags);
   3222 
   3223  // Restore calleeReg. CreateThisFromIC may trigger a GC, so we reload the
   3224  // callee from the stub frame (which is traced) instead of spilling it to
   3225  // the stack.
   3226  loadStackObject(ArgumentKind::Callee, flags, argcReg, calleeReg);
   3227 }
   3228 
   3229 void BaselineCacheIRCompiler::updateReturnValue() {
   3230  Label skipThisReplace;
   3231  masm.branchTestObject(Assembler::Equal, JSReturnOperand, &skipThisReplace);
   3232 
   3233  // If a constructor does not explicitly return an object, the return value
   3234  // of the constructor is |this|. We load it out of the baseline stub frame.
   3235 
   3236  // At this point, the stack looks like this:
   3237  //  newTarget
   3238  //  ArgN
   3239  //  ...
   3240  //  Arg0
   3241  //  ThisVal         <---- We want this value.
   3242  //  Callee token          | Skip two stack slots.
   3243  //  Frame descriptor      v
   3244  //  [Top of stack]
   3245  size_t thisvOffset =
   3246      JitFrameLayout::offsetOfThis() - JitFrameLayout::bytesPoppedAfterCall();
   3247  Address thisAddress(masm.getStackPointer(), thisvOffset);
   3248  masm.loadValue(thisAddress, JSReturnOperand);
   3249 
   3250 #ifdef DEBUG
   3251  masm.branchTestObject(Assembler::Equal, JSReturnOperand, &skipThisReplace);
   3252  masm.assumeUnreachable("Return of constructing call should be an object.");
   3253 #endif
   3254  masm.bind(&skipThisReplace);
   3255 }
   3256 
   3257 bool BaselineCacheIRCompiler::emitCallScriptedFunctionShared(
   3258    ObjOperandId calleeId, Int32OperandId argcId, CallFlags flags,
   3259    uint32_t argcFixed, Maybe<uint32_t> icScriptOffset) {
   3260  AutoOutputRegister output(*this);
   3261  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3262  AutoScratchRegister scratch2(allocator, masm);
   3263 
   3264  Register calleeReg = allocator.useRegister(masm, calleeId);
   3265  Register argcReg = allocator.useRegister(masm, argcId);
   3266 
   3267  bool isInlined = icScriptOffset.isSome();
   3268 
   3269  bool isConstructing = flags.isConstructing();
   3270  bool isSameRealm = flags.isSameRealm();
   3271 
   3272  if (!updateArgc(flags, argcReg, argcFixed, scratch)) {
   3273    return false;
   3274  }
   3275 
   3276  allocator.discardStack(masm);
   3277 
   3278  // Push a stub frame so that we can perform a non-tail call.
   3279  AutoStubFrame stubFrame(*this);
   3280  stubFrame.enter(masm, scratch);
   3281 
   3282  if (!isSameRealm) {
   3283    masm.switchToObjectRealm(calleeReg, scratch);
   3284  }
   3285  if (isInlined) {
   3286    stubFrame.pushInlinedICScript(masm, stubAddress(*icScriptOffset));
   3287  }
   3288 
   3289  if (isConstructing) {
   3290    createThis(argcReg, calleeReg, scratch, flags,
   3291               /* isBoundFunction = */ false);
   3292  }
   3293 
   3294  pushArguments(argcReg, calleeReg, scratch, scratch2, flags, argcFixed,
   3295                /*isJitCall =*/true);
   3296 
   3297  // Note that we use Push, not push, so that callJit will align the stack
   3298  // properly on ARM.
   3299  masm.PushCalleeToken(calleeReg, isConstructing);
   3300  masm.PushFrameDescriptorForJitCall(FrameType::BaselineStub, argcReg, scratch,
   3301                                     isInlined);
   3302 
   3303  // Load the start of the target JitCode.
   3304  Register code = scratch2;
   3305  if (isInlined) {
   3306    masm.loadJitCodeRawNoIon(calleeReg, code, scratch);
   3307  } else {
   3308    masm.loadJitCodeRaw(calleeReg, code);
   3309  }
   3310 
   3311  masm.callJit(code);
   3312 
   3313  // If this is a constructing call, and the callee returns a non-object,
   3314  // replace it with the |this| object passed in.
   3315  if (isConstructing) {
   3316    updateReturnValue();
   3317  }
   3318 
   3319  stubFrame.leave(masm);
   3320 
   3321  if (!isSameRealm) {
   3322    masm.switchToBaselineFrameRealm(scratch2);
   3323  }
   3324 
   3325  return true;
   3326 }
   3327 
   3328 bool BaselineCacheIRCompiler::emitCallScriptedFunction(ObjOperandId calleeId,
   3329                                                       Int32OperandId argcId,
   3330                                                       CallFlags flags,
   3331                                                       uint32_t argcFixed) {
   3332  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3333  Maybe<uint32_t> icScriptOffset = mozilla::Nothing();
   3334  return emitCallScriptedFunctionShared(calleeId, argcId, flags, argcFixed,
   3335                                        icScriptOffset);
   3336 }
   3337 
   3338 bool BaselineCacheIRCompiler::emitCallInlinedFunction(ObjOperandId calleeId,
   3339                                                      Int32OperandId argcId,
   3340                                                      uint32_t icScriptOffset,
   3341                                                      CallFlags flags,
   3342                                                      uint32_t argcFixed) {
   3343  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3344  return emitCallScriptedFunctionShared(calleeId, argcId, flags, argcFixed,
   3345                                        mozilla::Some(icScriptOffset));
   3346 }
   3347 
   3348 bool BaselineCacheIRCompiler::emitCallWasmFunction(
   3349    ObjOperandId calleeId, Int32OperandId argcId, CallFlags flags,
   3350    uint32_t argcFixed, uint32_t funcExportOffset, uint32_t instanceOffset) {
   3351  return emitCallScriptedFunction(calleeId, argcId, flags, argcFixed);
   3352 }
   3353 
   3354 #ifdef JS_PUNBOX64
   3355 template <typename IdType>
   3356 bool BaselineCacheIRCompiler::emitCallScriptedProxyGetShared(
   3357    ValOperandId targetId, ObjOperandId receiverId, ObjOperandId handlerId,
   3358    ObjOperandId trapId, IdType id, uint32_t nargsAndFlags) {
   3359  Register handler = allocator.useRegister(masm, handlerId);
   3360  ValueOperand target = allocator.useValueRegister(masm, targetId);
   3361  Register receiver = allocator.useRegister(masm, receiverId);
   3362  Register callee = allocator.useRegister(masm, trapId);
   3363  ValueOperand idVal;
   3364  if constexpr (std::is_same_v<IdType, ValOperandId>) {
   3365    idVal = allocator.useValueRegister(masm, id);
   3366  }
   3367 
   3368  AutoScratchRegister code(allocator, masm);
   3369 
   3370  AutoScratchRegister scratch(allocator, masm);
   3371  ValueOperand scratchVal(scratch);
   3372 
   3373  allocator.discardStack(masm);
   3374 
   3375  AutoStubFrame stubFrame(*this);
   3376  stubFrame.enter(masm, scratch);
   3377 
   3378  // We need to keep the target around to potentially validate the proxy result
   3379  stubFrame.storeTracedValue(masm, target);
   3380  if constexpr (std::is_same_v<IdType, ValOperandId>) {
   3381    stubFrame.storeTracedValue(masm, idVal);
   3382 #  ifdef DEBUG
   3383    Label notPrivateSymbol;
   3384    masm.branchTestSymbol(Assembler::NotEqual, idVal, &notPrivateSymbol);
   3385    masm.unboxSymbol(idVal, scratch);
   3386    masm.branch32(
   3387        Assembler::NotEqual, Address(scratch, JS::Symbol::offsetOfCode()),
   3388        Imm32(uint32_t(JS::SymbolCode::PrivateNameSymbol)), &notPrivateSymbol);
   3389    masm.assumeUnreachable("Unexpected private field in callScriptedProxy");
   3390    masm.bind(&notPrivateSymbol);
   3391 #  endif
   3392  } else {
   3393    // We need to either trace the id here or grab the ICStubReg back from
   3394    // FramePointer + sizeof(void*) after the call in order to load it again.
   3395    // We elect to do this because it unifies the code path after the call.
   3396    Address idAddr(stubAddress(id));
   3397    masm.loadPtr(idAddr, scratch);
   3398    masm.tagValue(JSVAL_TYPE_STRING, scratch, scratchVal);
   3399    stubFrame.storeTracedValue(masm, scratchVal);
   3400  }
   3401 
   3402  uint16_t nargs = nargsAndFlags >> JSFunction::ArgCountShift;
   3403  masm.alignJitStackBasedOnNArgs(std::max(uint16_t(3), nargs),
   3404                                 /*countIncludesThis = */ false);
   3405  for (size_t i = 3; i < nargs; i++) {
   3406    masm.Push(UndefinedValue());
   3407  }
   3408 
   3409  masm.tagValue(JSVAL_TYPE_OBJECT, receiver, scratchVal);
   3410  masm.Push(scratchVal);
   3411 
   3412  if constexpr (std::is_same_v<IdType, ValOperandId>) {
   3413    masm.Push(idVal);
   3414  } else {
   3415    stubFrame.loadTracedValue(masm, 1, scratchVal);
   3416    masm.Push(scratchVal);
   3417  }
   3418 
   3419  masm.Push(target);
   3420 
   3421  masm.tagValue(JSVAL_TYPE_OBJECT, handler, scratchVal);
   3422  masm.Push(scratchVal);
   3423 
   3424  masm.loadJitCodeRaw(callee, code);
   3425 
   3426  masm.Push(callee);
   3427  masm.Push(FrameDescriptor(FrameType::BaselineStub, 3));
   3428 
   3429  masm.callJit(code);
   3430 
   3431  Register scratch2 = code;
   3432 
   3433  Label success;
   3434  stubFrame.loadTracedValue(masm, 0, scratchVal);
   3435  masm.unboxObject(scratchVal, scratch);
   3436  masm.branchTestObjectNeedsProxyResultValidation(Assembler::Zero, scratch,
   3437                                                  scratch2, &success);
   3438  ValueOperand scratchVal2(scratch2);
   3439  stubFrame.loadTracedValue(masm, 1, scratchVal2);
   3440  masm.Push(JSReturnOperand);
   3441  masm.Push(scratchVal2);
   3442  masm.Push(scratch);
   3443  using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue,
   3444                      MutableHandleValue);
   3445  callVM<Fn, CheckProxyGetByValueResult>(masm);
   3446 
   3447  masm.bind(&success);
   3448 
   3449  stubFrame.leave(masm);
   3450 
   3451  return true;
   3452 }
   3453 
   3454 bool BaselineCacheIRCompiler::emitCallScriptedProxyGetResult(
   3455    ValOperandId targetId, ObjOperandId receiverId, ObjOperandId handlerId,
   3456    ObjOperandId trapId, uint32_t idOffset, uint32_t nargsAndFlags) {
   3457  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3458 
   3459  return emitCallScriptedProxyGetShared(targetId, receiverId, handlerId, trapId,
   3460                                        idOffset, nargsAndFlags);
   3461 }
   3462 
   3463 bool BaselineCacheIRCompiler::emitCallScriptedProxyGetByValueResult(
   3464    ValOperandId targetId, ObjOperandId receiverId, ObjOperandId handlerId,
   3465    ValOperandId idId, ObjOperandId trapId, uint32_t nargsAndFlags) {
   3466  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3467 
   3468  return emitCallScriptedProxyGetShared(targetId, receiverId, handlerId, trapId,
   3469                                        idId, nargsAndFlags);
   3470 }
   3471 #endif
   3472 
   3473 bool BaselineCacheIRCompiler::emitCallBoundScriptedFunction(
   3474    ObjOperandId calleeId, ObjOperandId targetId, Int32OperandId argcId,
   3475    CallFlags flags, uint32_t numBoundArgs) {
   3476  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3477 
   3478  AutoOutputRegister output(*this);
   3479  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3480  AutoScratchRegister scratch2(allocator, masm);
   3481 
   3482  Register calleeReg = allocator.useRegister(masm, calleeId);
   3483  Register argcReg = allocator.useRegister(masm, argcId);
   3484 
   3485  bool isConstructing = flags.isConstructing();
   3486  bool isSameRealm = flags.isSameRealm();
   3487 
   3488  allocator.discardStack(masm);
   3489 
   3490  // Push a stub frame so that we can perform a non-tail call.
   3491  AutoStubFrame stubFrame(*this);
   3492  stubFrame.enter(masm, scratch);
   3493 
   3494  Address boundTarget(calleeReg, BoundFunctionObject::offsetOfTargetSlot());
   3495 
   3496  // If we're constructing, switch to the target's realm and create |this|. If
   3497  // we're not constructing, we switch to the target's realm after pushing the
   3498  // arguments and loading the target.
   3499  if (isConstructing) {
   3500    if (!isSameRealm) {
   3501      masm.unboxObject(boundTarget, scratch);
   3502      masm.switchToObjectRealm(scratch, scratch);
   3503    }
   3504    createThis(argcReg, calleeReg, scratch, flags,
   3505               /* isBoundFunction = */ true);
   3506  }
   3507 
   3508  // Push all arguments, including |this|.
   3509  pushBoundFunctionArguments(argcReg, calleeReg, scratch, scratch2, flags,
   3510                             numBoundArgs, /* isJitCall = */ true);
   3511 
   3512  // Load the target JSFunction.
   3513  masm.unboxObject(boundTarget, calleeReg);
   3514 
   3515  if (!isConstructing && !isSameRealm) {
   3516    masm.switchToObjectRealm(calleeReg, scratch);
   3517  }
   3518 
   3519  // Update argc.
   3520  masm.add32(Imm32(numBoundArgs), argcReg);
   3521 
   3522  // Load the start of the target JitCode.
   3523  Register code = scratch2;
   3524  masm.loadJitCodeRaw(calleeReg, code);
   3525 
   3526  // Note that we use Push, not push, so that callJit will align the stack
   3527  // properly on ARM.
   3528  masm.PushCalleeToken(calleeReg, isConstructing);
   3529  masm.PushFrameDescriptorForJitCall(FrameType::BaselineStub, argcReg, scratch);
   3530 
   3531  masm.callJit(code);
   3532 
   3533  if (isConstructing) {
   3534    updateReturnValue();
   3535  }
   3536 
   3537  stubFrame.leave(masm);
   3538 
   3539  if (!isSameRealm) {
   3540    masm.switchToBaselineFrameRealm(scratch2);
   3541  }
   3542 
   3543  return true;
   3544 }
   3545 
   3546 bool BaselineCacheIRCompiler::emitNewArrayObjectResult(uint32_t arrayLength,
   3547                                                       uint32_t shapeOffset,
   3548                                                       uint32_t siteOffset) {
   3549  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3550 
   3551  gc::AllocKind allocKind = GuessArrayGCKind(arrayLength);
   3552  MOZ_ASSERT(gc::GetObjectFinalizeKind(&ArrayObject::class_) ==
   3553             gc::FinalizeKind::None);
   3554  MOZ_ASSERT(!IsFinalizedKind(allocKind));
   3555 
   3556  uint32_t slotCount = GetGCKindSlots(allocKind);
   3557  MOZ_ASSERT(slotCount >= ObjectElements::VALUES_PER_HEADER);
   3558  uint32_t arrayCapacity = slotCount - ObjectElements::VALUES_PER_HEADER;
   3559 
   3560  AutoOutputRegister output(*this);
   3561  AutoScratchRegister result(allocator, masm);
   3562  AutoScratchRegister scratch(allocator, masm);
   3563  AutoScratchRegister site(allocator, masm);
   3564  AutoScratchRegisterMaybeOutput shape(allocator, masm, output);
   3565 
   3566  Address shapeAddr(stubAddress(shapeOffset));
   3567  masm.loadPtr(shapeAddr, shape);
   3568 
   3569  Address siteAddr(stubAddress(siteOffset));
   3570  masm.loadPtr(siteAddr, site);
   3571 
   3572  allocator.discardStack(masm);
   3573 
   3574  Label done;
   3575  Label fail;
   3576 
   3577  masm.createArrayWithFixedElements(
   3578      result, shape, scratch, InvalidReg, arrayLength, arrayCapacity, 0, 0,
   3579      allocKind, gc::Heap::Default, &fail, AllocSiteInput(site));
   3580  masm.jump(&done);
   3581 
   3582  {
   3583    masm.bind(&fail);
   3584 
   3585    // We get here if the nursery is full (unlikely) but also for tenured
   3586    // allocations if the current arena is full and we need to allocate a new
   3587    // one (fairly common).
   3588 
   3589    AutoStubFrame stubFrame(*this);
   3590    stubFrame.enter(masm, scratch);
   3591 
   3592    masm.Push(site);
   3593    masm.Push(Imm32(int32_t(allocKind)));
   3594    masm.Push(Imm32(arrayLength));
   3595 
   3596    using Fn =
   3597        ArrayObject* (*)(JSContext*, uint32_t, gc::AllocKind, gc::AllocSite*);
   3598    callVM<Fn, NewArrayObjectBaselineFallback>(masm);
   3599 
   3600    stubFrame.leave(masm);
   3601    masm.storeCallPointerResult(result);
   3602  }
   3603 
   3604  masm.bind(&done);
   3605  masm.tagValue(JSVAL_TYPE_OBJECT, result, output.valueReg());
   3606  return true;
   3607 }
   3608 
   3609 bool BaselineCacheIRCompiler::emitNewPlainObjectResult(uint32_t numFixedSlots,
   3610                                                       uint32_t numDynamicSlots,
   3611                                                       gc::AllocKind allocKind,
   3612                                                       uint32_t shapeOffset,
   3613                                                       uint32_t siteOffset) {
   3614  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3615 
   3616  AutoOutputRegister output(*this);
   3617  AutoScratchRegister obj(allocator, masm);
   3618  AutoScratchRegister scratch(allocator, masm);
   3619  AutoScratchRegister site(allocator, masm);
   3620  AutoScratchRegisterMaybeOutput shape(allocator, masm, output);
   3621 
   3622  Address shapeAddr(stubAddress(shapeOffset));
   3623  masm.loadPtr(shapeAddr, shape);
   3624 
   3625  Address siteAddr(stubAddress(siteOffset));
   3626  masm.loadPtr(siteAddr, site);
   3627 
   3628  allocator.discardStack(masm);
   3629 
   3630  Label done;
   3631  Label fail;
   3632 
   3633  masm.createPlainGCObject(obj, shape, scratch, shape, numFixedSlots,
   3634                           numDynamicSlots, allocKind, gc::Heap::Default, &fail,
   3635                           AllocSiteInput(site));
   3636  masm.jump(&done);
   3637 
   3638  {
   3639    masm.bind(&fail);
   3640 
   3641    // We get here if the nursery is full (unlikely) but also for tenured
   3642    // allocations if the current arena is full and we need to allocate a new
   3643    // one (fairly common).
   3644 
   3645    AutoStubFrame stubFrame(*this);
   3646    stubFrame.enter(masm, scratch);
   3647 
   3648    masm.Push(site);
   3649    masm.Push(Imm32(int32_t(allocKind)));
   3650    masm.loadPtr(shapeAddr, shape);  // This might have been overwritten.
   3651    masm.Push(shape);
   3652 
   3653    using Fn = JSObject* (*)(JSContext*, Handle<SharedShape*>, gc::AllocKind,
   3654                             gc::AllocSite*);
   3655    callVM<Fn, NewPlainObjectBaselineFallback>(masm);
   3656 
   3657    stubFrame.leave(masm);
   3658    masm.storeCallPointerResult(obj);
   3659  }
   3660 
   3661  masm.bind(&done);
   3662  masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
   3663  return true;
   3664 }
   3665 
   3666 bool BaselineCacheIRCompiler::emitNewFunctionCloneResult(
   3667    uint32_t canonicalOffset, gc::AllocKind allocKind, uint32_t siteOffset) {
   3668  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3669 
   3670  AutoOutputRegister output(*this);
   3671  AutoScratchRegisterMaybeOutput result(allocator, masm, output);
   3672  AutoScratchRegisterMaybeOutputType site(allocator, masm, output);
   3673  AutoScratchRegister canonical(allocator, masm);
   3674  AutoScratchRegister envChain(allocator, masm);
   3675  AutoScratchRegister scratch(allocator, masm);
   3676  MOZ_ASSERT(result.get() != site.get());
   3677 
   3678  // Load the canonical function and the frame's environment chain.
   3679  masm.loadPtr(stubAddress(canonicalOffset), canonical);
   3680  Address envAddr(baselineFrameReg_,
   3681                  BaselineFrame::reverseOffsetOfEnvironmentChain());
   3682  masm.loadPtr(envAddr, envChain);
   3683 
   3684  Address siteAddr(stubAddress(siteOffset));
   3685  masm.loadPtr(siteAddr, site);
   3686 
   3687  allocator.discardStack(masm);
   3688 
   3689  // Try to allocate a new function object in JIT code.
   3690  Label done, fail;
   3691 
   3692  masm.createFunctionClone(result, canonical, envChain, scratch, allocKind,
   3693                           &fail, AllocSiteInput(site));
   3694  masm.jump(&done);
   3695 
   3696  {
   3697    masm.bind(&fail);
   3698 
   3699    AutoStubFrame stubFrame(*this);
   3700    stubFrame.enter(masm, scratch);
   3701 
   3702    masm.Push(site);
   3703    masm.Push(envChain);
   3704    masm.Push(canonical);
   3705 
   3706    using Fn =
   3707        JSObject* (*)(JSContext*, HandleFunction, HandleObject, gc::AllocSite*);
   3708    callVM<Fn, js::LambdaBaselineFallback>(masm);
   3709 
   3710    stubFrame.leave(masm);
   3711    masm.storeCallPointerResult(result);
   3712  }
   3713 
   3714  masm.bind(&done);
   3715  masm.tagValue(JSVAL_TYPE_OBJECT, result, output.valueReg());
   3716  return true;
   3717 }
   3718 
   3719 bool BaselineCacheIRCompiler::emitCloseIterScriptedResult(
   3720    ObjOperandId iterId, ObjOperandId calleeId, uint32_t calleeNargs) {
   3721  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3722  Register iter = allocator.useRegister(masm, iterId);
   3723  Register callee = allocator.useRegister(masm, calleeId);
   3724 
   3725  AutoScratchRegister code(allocator, masm);
   3726  AutoScratchRegister scratch(allocator, masm);
   3727 
   3728  masm.loadJitCodeRaw(callee, code);
   3729 
   3730  allocator.discardStack(masm);
   3731 
   3732  AutoStubFrame stubFrame(*this);
   3733  stubFrame.enter(masm, scratch);
   3734 
   3735  // Call the return method.
   3736  masm.alignJitStackBasedOnNArgs(calleeNargs, /*countIncludesThis = */ false);
   3737  for (uint32_t i = 0; i < calleeNargs; i++) {
   3738    masm.pushValue(UndefinedValue());
   3739  }
   3740  masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(iter)));
   3741  masm.Push(callee);
   3742  masm.Push(FrameDescriptor(FrameType::BaselineStub, /* argc = */ 0));
   3743 
   3744  masm.callJit(code);
   3745 
   3746  // Verify that the return value is an object.
   3747  Label success;
   3748  masm.branchTestObject(Assembler::Equal, JSReturnOperand, &success);
   3749 
   3750  masm.Push(Imm32(int32_t(CheckIsObjectKind::IteratorReturn)));
   3751  using Fn = bool (*)(JSContext*, CheckIsObjectKind);
   3752  callVM<Fn, ThrowCheckIsObject>(masm);
   3753 
   3754  masm.bind(&success);
   3755 
   3756  stubFrame.leave(masm);
   3757  return true;
   3758 }
   3759 
   3760 static void CallRegExpStub(MacroAssembler& masm, size_t jitZoneStubOffset,
   3761                           Register temp, Label* vmCall) {
   3762  // Call jitZone()->regExpStub. We store a pointer to the RegExp
   3763  // stub in the IC stub to keep it alive, but we shouldn't use it if the stub
   3764  // has been discarded in the meantime (because we might have changed GC string
   3765  // pretenuring heuristics that affect behavior of the stub). This is uncommon
   3766  // but can happen if we discarded all JIT code but had some active (Baseline)
   3767  // scripts on the stack.
   3768  masm.movePtr(ImmPtr(masm.realm()->zone()->jitZone()), temp);
   3769  masm.loadPtr(Address(temp, jitZoneStubOffset), temp);
   3770  masm.branchTestPtr(Assembler::Zero, temp, temp, vmCall);
   3771  masm.call(Address(temp, JitCode::offsetOfCode()));
   3772 }
   3773 
   3774 // Used to move inputs to the registers expected by the RegExp stub.
   3775 static void SetRegExpStubInputRegisters(MacroAssembler& masm,
   3776                                        Register* regexpSrc,
   3777                                        Register regexpDest, Register* inputSrc,
   3778                                        Register inputDest,
   3779                                        Register* lastIndexSrc,
   3780                                        Register lastIndexDest) {
   3781  MoveResolver& moves = masm.moveResolver();
   3782  if (*regexpSrc != regexpDest) {
   3783    masm.propagateOOM(moves.addMove(MoveOperand(*regexpSrc),
   3784                                    MoveOperand(regexpDest), MoveOp::GENERAL));
   3785    *regexpSrc = regexpDest;
   3786  }
   3787  if (*inputSrc != inputDest) {
   3788    masm.propagateOOM(moves.addMove(MoveOperand(*inputSrc),
   3789                                    MoveOperand(inputDest), MoveOp::GENERAL));
   3790    *inputSrc = inputDest;
   3791  }
   3792  if (lastIndexSrc && *lastIndexSrc != lastIndexDest) {
   3793    masm.propagateOOM(moves.addMove(MoveOperand(*lastIndexSrc),
   3794                                    MoveOperand(lastIndexDest), MoveOp::INT32));
   3795    *lastIndexSrc = lastIndexDest;
   3796  }
   3797 
   3798  masm.propagateOOM(moves.resolve());
   3799 
   3800  MoveEmitter emitter(masm);
   3801  emitter.emit(moves);
   3802  emitter.finish();
   3803 }
   3804 
   3805 bool BaselineCacheIRCompiler::emitCallRegExpMatcherResult(
   3806    ObjOperandId regexpId, StringOperandId inputId, Int32OperandId lastIndexId,
   3807    uint32_t stubOffset) {
   3808  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3809 
   3810  AutoOutputRegister output(*this);
   3811  Register regexp = allocator.useRegister(masm, regexpId);
   3812  Register input = allocator.useRegister(masm, inputId);
   3813  Register lastIndex = allocator.useRegister(masm, lastIndexId);
   3814  Register scratch = output.valueReg().scratchReg();
   3815 
   3816  allocator.discardStack(masm);
   3817 
   3818  AutoStubFrame stubFrame(*this);
   3819  stubFrame.enter(masm, scratch);
   3820 
   3821  SetRegExpStubInputRegisters(masm, &regexp, RegExpMatcherRegExpReg, &input,
   3822                              RegExpMatcherStringReg, &lastIndex,
   3823                              RegExpMatcherLastIndexReg);
   3824 
   3825  masm.reserveStack(RegExpReservedStack);
   3826 
   3827  Label done, vmCall, vmCallNoMatches;
   3828  CallRegExpStub(masm, JitZone::offsetOfRegExpMatcherStub(), scratch,
   3829                 &vmCallNoMatches);
   3830  masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, &vmCall);
   3831 
   3832  masm.jump(&done);
   3833 
   3834  {
   3835    Label pushedMatches;
   3836    masm.bind(&vmCallNoMatches);
   3837    masm.push(ImmWord(0));
   3838    masm.jump(&pushedMatches);
   3839 
   3840    masm.bind(&vmCall);
   3841    masm.computeEffectiveAddress(
   3842        Address(masm.getStackPointer(), InputOutputDataSize), scratch);
   3843    masm.Push(scratch);
   3844 
   3845    masm.bind(&pushedMatches);
   3846    masm.Push(lastIndex);
   3847    masm.Push(input);
   3848    masm.Push(regexp);
   3849 
   3850    using Fn = bool (*)(JSContext*, HandleObject regexp, HandleString input,
   3851                        int32_t lastIndex, MatchPairs* pairs,
   3852                        MutableHandleValue output);
   3853    callVM<Fn, RegExpMatcherRaw>(masm);
   3854  }
   3855 
   3856  masm.bind(&done);
   3857 
   3858  static_assert(R0 == JSReturnOperand);
   3859 
   3860  stubFrame.leave(masm);
   3861  return true;
   3862 }
   3863 
   3864 bool BaselineCacheIRCompiler::emitCallRegExpSearcherResult(
   3865    ObjOperandId regexpId, StringOperandId inputId, Int32OperandId lastIndexId,
   3866    uint32_t stubOffset) {
   3867  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3868 
   3869  AutoOutputRegister output(*this);
   3870  Register regexp = allocator.useRegister(masm, regexpId);
   3871  Register input = allocator.useRegister(masm, inputId);
   3872  Register lastIndex = allocator.useRegister(masm, lastIndexId);
   3873  Register scratch = output.valueReg().scratchReg();
   3874 
   3875  allocator.discardStack(masm);
   3876 
   3877  AutoStubFrame stubFrame(*this);
   3878  stubFrame.enter(masm, scratch);
   3879 
   3880  SetRegExpStubInputRegisters(masm, &regexp, RegExpSearcherRegExpReg, &input,
   3881                              RegExpSearcherStringReg, &lastIndex,
   3882                              RegExpSearcherLastIndexReg);
   3883  // Ensure `scratch` doesn't conflict with the stub's input registers.
   3884  scratch = ReturnReg;
   3885 
   3886  masm.reserveStack(RegExpReservedStack);
   3887 
   3888  Label done, vmCall, vmCallNoMatches;
   3889  CallRegExpStub(masm, JitZone::offsetOfRegExpSearcherStub(), scratch,
   3890                 &vmCallNoMatches);
   3891  masm.branch32(Assembler::Equal, scratch, Imm32(RegExpSearcherResultFailed),
   3892                &vmCall);
   3893 
   3894  masm.jump(&done);
   3895 
   3896  {
   3897    Label pushedMatches;
   3898    masm.bind(&vmCallNoMatches);
   3899    masm.push(ImmWord(0));
   3900    masm.jump(&pushedMatches);
   3901 
   3902    masm.bind(&vmCall);
   3903    masm.computeEffectiveAddress(
   3904        Address(masm.getStackPointer(), InputOutputDataSize), scratch);
   3905    masm.Push(scratch);
   3906 
   3907    masm.bind(&pushedMatches);
   3908    masm.Push(lastIndex);
   3909    masm.Push(input);
   3910    masm.Push(regexp);
   3911 
   3912    using Fn = bool (*)(JSContext*, HandleObject regexp, HandleString input,
   3913                        int32_t lastIndex, MatchPairs* pairs, int32_t* result);
   3914    callVM<Fn, RegExpSearcherRaw>(masm);
   3915  }
   3916 
   3917  masm.bind(&done);
   3918 
   3919  masm.tagValue(JSVAL_TYPE_INT32, ReturnReg, output.valueReg());
   3920 
   3921  stubFrame.leave(masm);
   3922  return true;
   3923 }
   3924 
   3925 bool BaselineCacheIRCompiler::emitRegExpBuiltinExecMatchResult(
   3926    ObjOperandId regexpId, StringOperandId inputId, uint32_t stubOffset) {
   3927  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3928 
   3929  AutoOutputRegister output(*this);
   3930  Register regexp = allocator.useRegister(masm, regexpId);
   3931  Register input = allocator.useRegister(masm, inputId);
   3932  Register scratch = output.valueReg().scratchReg();
   3933 
   3934  allocator.discardStack(masm);
   3935 
   3936  AutoStubFrame stubFrame(*this);
   3937  stubFrame.enter(masm, scratch);
   3938 
   3939  SetRegExpStubInputRegisters(masm, &regexp, RegExpMatcherRegExpReg, &input,
   3940                              RegExpMatcherStringReg, nullptr, InvalidReg);
   3941 
   3942  masm.reserveStack(RegExpReservedStack);
   3943 
   3944  Label done, vmCall, vmCallNoMatches;
   3945  CallRegExpStub(masm, JitZone::offsetOfRegExpExecMatchStub(), scratch,
   3946                 &vmCallNoMatches);
   3947  masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, &vmCall);
   3948 
   3949  masm.jump(&done);
   3950 
   3951  {
   3952    Label pushedMatches;
   3953    masm.bind(&vmCallNoMatches);
   3954    masm.push(ImmWord(0));
   3955    masm.jump(&pushedMatches);
   3956 
   3957    masm.bind(&vmCall);
   3958    masm.computeEffectiveAddress(
   3959        Address(masm.getStackPointer(), InputOutputDataSize), scratch);
   3960    masm.Push(scratch);
   3961 
   3962    masm.bind(&pushedMatches);
   3963    masm.Push(input);
   3964    masm.Push(regexp);
   3965 
   3966    using Fn =
   3967        bool (*)(JSContext*, Handle<RegExpObject*> regexp, HandleString input,
   3968                 MatchPairs* pairs, MutableHandleValue output);
   3969    callVM<Fn, RegExpBuiltinExecMatchFromJit>(masm);
   3970  }
   3971 
   3972  masm.bind(&done);
   3973 
   3974  static_assert(R0 == JSReturnOperand);
   3975 
   3976  stubFrame.leave(masm);
   3977  return true;
   3978 }
   3979 
   3980 bool BaselineCacheIRCompiler::emitRegExpBuiltinExecTestResult(
   3981    ObjOperandId regexpId, StringOperandId inputId, uint32_t stubOffset) {
   3982  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3983 
   3984  AutoOutputRegister output(*this);
   3985  Register regexp = allocator.useRegister(masm, regexpId);
   3986  Register input = allocator.useRegister(masm, inputId);
   3987  Register scratch = output.valueReg().scratchReg();
   3988 
   3989  allocator.discardStack(masm);
   3990 
   3991  AutoStubFrame stubFrame(*this);
   3992  stubFrame.enter(masm, scratch);
   3993 
   3994  SetRegExpStubInputRegisters(masm, &regexp, RegExpExecTestRegExpReg, &input,
   3995                              RegExpExecTestStringReg, nullptr, InvalidReg);
   3996 
   3997  // Ensure `scratch` doesn't conflict with the stub's input registers.
   3998  scratch = ReturnReg;
   3999 
   4000  masm.reserveStack(RegExpReservedStack);
   4001 
   4002  Label done, vmCall;
   4003  CallRegExpStub(masm, JitZone::offsetOfRegExpExecTestStub(), scratch, &vmCall);
   4004  masm.branch32(Assembler::Equal, scratch, Imm32(RegExpExecTestResultFailed),
   4005                &vmCall);
   4006 
   4007  masm.jump(&done);
   4008 
   4009  {
   4010    masm.bind(&vmCall);
   4011 
   4012    masm.Push(input);
   4013    masm.Push(regexp);
   4014 
   4015    using Fn = bool (*)(JSContext*, Handle<RegExpObject*> regexp,
   4016                        HandleString input, bool* result);
   4017    callVM<Fn, RegExpBuiltinExecTestFromJit>(masm);
   4018  }
   4019 
   4020  masm.bind(&done);
   4021 
   4022  masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, output.valueReg());
   4023 
   4024  stubFrame.leave(masm);
   4025  return true;
   4026 }
   4027 
   4028 bool BaselineCacheIRCompiler::emitRegExpHasCaptureGroupsResult(
   4029    ObjOperandId regexpId, StringOperandId inputId) {
   4030  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4031 
   4032  AutoOutputRegister output(*this);
   4033  Register regexp = allocator.useRegister(masm, regexpId);
   4034  Register input = allocator.useRegister(masm, inputId);
   4035  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   4036 
   4037  allocator.discardStack(masm);
   4038 
   4039  // Load RegExpShared in |scratch|.
   4040  Label vmCall;
   4041  masm.loadParsedRegExpShared(regexp, scratch, &vmCall);
   4042 
   4043  // Return true iff pairCount > 1.
   4044  Label returnTrue, done;
   4045  masm.branch32(Assembler::Above,
   4046                Address(scratch, RegExpShared::offsetOfPairCount()), Imm32(1),
   4047                &returnTrue);
   4048  masm.moveValue(BooleanValue(false), output.valueReg());
   4049  masm.jump(&done);
   4050 
   4051  masm.bind(&returnTrue);
   4052  masm.moveValue(BooleanValue(true), output.valueReg());
   4053  masm.jump(&done);
   4054 
   4055  {
   4056    masm.bind(&vmCall);
   4057 
   4058    AutoStubFrame stubFrame(*this);
   4059    stubFrame.enter(masm, scratch);
   4060 
   4061    masm.Push(input);
   4062    masm.Push(regexp);
   4063 
   4064    using Fn =
   4065        bool (*)(JSContext*, Handle<RegExpObject*>, Handle<JSString*>, bool*);
   4066    callVM<Fn, RegExpHasCaptureGroups>(masm);
   4067 
   4068    stubFrame.leave(masm);
   4069    masm.storeCallBoolResult(scratch);
   4070    masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
   4071  }
   4072 
   4073  masm.bind(&done);
   4074  return true;
   4075 }