tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

CacheIRCompiler.cpp (400671B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/CacheIRCompiler.h"
      8 
      9 #include "mozilla/ArrayUtils.h"
     10 #include "mozilla/FunctionTypeTraits.h"
     11 #include "mozilla/MaybeOneOf.h"
     12 #include "mozilla/ScopeExit.h"
     13 
     14 #include <type_traits>
     15 #include <utility>
     16 
     17 #include "jslibmath.h"
     18 #include "jsmath.h"
     19 
     20 #include "builtin/DataViewObject.h"
     21 #include "builtin/Object.h"
     22 #include "gc/GCEnum.h"
     23 #include "jit/BaselineCacheIRCompiler.h"
     24 #include "jit/CacheIRGenerator.h"
     25 #include "jit/IonCacheIRCompiler.h"
     26 #include "jit/JitFrames.h"
     27 #include "jit/JitRuntime.h"
     28 #include "jit/JitZone.h"
     29 #include "jit/SharedICHelpers.h"
     30 #include "jit/SharedICRegisters.h"
     31 #include "jit/VMFunctions.h"
     32 #include "js/friend/DOMProxy.h"     // JS::ExpandoAndGeneration
     33 #include "js/friend/XrayJitInfo.h"  // js::jit::GetXrayJitInfo
     34 #include "js/ScalarType.h"          // js::Scalar::Type
     35 #include "js/SweepingAPI.h"
     36 #include "proxy/DOMProxy.h"
     37 #include "proxy/Proxy.h"
     38 #include "proxy/ScriptedProxyHandler.h"
     39 #include "util/DifferentialTesting.h"
     40 #include "vm/ArgumentsObject.h"
     41 #include "vm/ArrayBufferObject.h"
     42 #include "vm/ArrayBufferViewObject.h"
     43 #include "vm/BigIntType.h"
     44 #include "vm/FunctionFlags.h"  // js::FunctionFlags
     45 #include "vm/GeneratorObject.h"
     46 #include "vm/GetterSetter.h"
     47 #include "vm/Interpreter.h"
     48 #include "vm/ObjectFuse.h"
     49 #include "vm/TypedArrayObject.h"
     50 #include "vm/TypeofEqOperand.h"  // TypeofEqOperand
     51 #include "vm/Uint8Clamped.h"
     52 
     53 #include "builtin/Boolean-inl.h"
     54 #include "jit/MacroAssembler-inl.h"
     55 #include "jit/SharedICHelpers-inl.h"
     56 #include "jit/VMFunctionList-inl.h"
     57 
     58 using namespace js;
     59 using namespace js::jit;
     60 
     61 using mozilla::Maybe;
     62 
     63 using JS::ExpandoAndGeneration;
     64 
     65 ValueOperand CacheRegisterAllocator::useValueRegister(MacroAssembler& masm,
     66                                                      ValOperandId op) {
     67  OperandLocation& loc = operandLocations_[op.id()];
     68 
     69  switch (loc.kind()) {
     70    case OperandLocation::ValueReg:
     71      currentOpRegs_.add(loc.valueReg());
     72      return loc.valueReg();
     73 
     74    case OperandLocation::ValueStack: {
     75      ValueOperand reg = allocateValueRegister(masm);
     76      popValue(masm, &loc, reg);
     77      return reg;
     78    }
     79 
     80    case OperandLocation::BaselineFrame: {
     81      ValueOperand reg = allocateValueRegister(masm);
     82      Address addr = addressOf(masm, loc.baselineFrameSlot());
     83      masm.loadValue(addr, reg);
     84      loc.setValueReg(reg);
     85      return reg;
     86    }
     87 
     88    case OperandLocation::Constant: {
     89      ValueOperand reg = allocateValueRegister(masm);
     90      masm.moveValue(loc.constant(), reg);
     91      loc.setValueReg(reg);
     92      return reg;
     93    }
     94 
     95    case OperandLocation::PayloadReg: {
     96      // Temporarily add the payload register to currentOpRegs_ so
     97      // allocateValueRegister will stay away from it.
     98      currentOpRegs_.add(loc.payloadReg());
     99      ValueOperand reg = allocateValueRegister(masm);
    100      masm.tagValue(loc.payloadType(), loc.payloadReg(), reg);
    101      currentOpRegs_.take(loc.payloadReg());
    102      availableRegs_.add(loc.payloadReg());
    103      loc.setValueReg(reg);
    104      return reg;
    105    }
    106 
    107    case OperandLocation::PayloadStack: {
    108      ValueOperand reg = allocateValueRegister(masm);
    109      popPayload(masm, &loc, reg.scratchReg());
    110      masm.tagValue(loc.payloadType(), reg.scratchReg(), reg);
    111      loc.setValueReg(reg);
    112      return reg;
    113    }
    114 
    115    case OperandLocation::DoubleReg: {
    116      ValueOperand reg = allocateValueRegister(masm);
    117      {
    118        ScratchDoubleScope fpscratch(masm);
    119        masm.boxDouble(loc.doubleReg(), reg, fpscratch);
    120      }
    121      loc.setValueReg(reg);
    122      return reg;
    123    }
    124 
    125    case OperandLocation::Uninitialized:
    126      break;
    127  }
    128 
    129  MOZ_CRASH();
    130 }
    131 
    132 // Load a value operand directly into a float register. Caller must have
    133 // guarded isNumber on the provided val.
    134 void CacheRegisterAllocator::ensureDoubleRegister(MacroAssembler& masm,
    135                                                  NumberOperandId op,
    136                                                  FloatRegister dest) const {
    137  // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
    138  // any stack slot offsets below.
    139  int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
    140 
    141  const OperandLocation& loc = operandLocations_[op.id()];
    142 
    143  Label failure, done;
    144  switch (loc.kind()) {
    145    case OperandLocation::ValueReg: {
    146      masm.ensureDouble(loc.valueReg(), dest, &failure);
    147      break;
    148    }
    149 
    150    case OperandLocation::ValueStack: {
    151      Address addr = valueAddress(masm, &loc);
    152      addr.offset += stackOffset;
    153      masm.ensureDouble(addr, dest, &failure);
    154      break;
    155    }
    156 
    157    case OperandLocation::BaselineFrame: {
    158      Address addr = addressOf(masm, loc.baselineFrameSlot());
    159      addr.offset += stackOffset;
    160      masm.ensureDouble(addr, dest, &failure);
    161      break;
    162    }
    163 
    164    case OperandLocation::DoubleReg: {
    165      masm.moveDouble(loc.doubleReg(), dest);
    166      return;
    167    }
    168 
    169    case OperandLocation::Constant: {
    170      MOZ_ASSERT(loc.constant().isNumber(),
    171                 "Caller must ensure the operand is a number value");
    172      masm.loadConstantDouble(loc.constant().toNumber(), dest);
    173      return;
    174    }
    175 
    176    case OperandLocation::PayloadReg: {
    177      // Doubles can't be stored in payload registers, so this must be an int32.
    178      MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
    179                 "Caller must ensure the operand is a number value");
    180      masm.convertInt32ToDouble(loc.payloadReg(), dest);
    181      return;
    182    }
    183 
    184    case OperandLocation::PayloadStack: {
    185      // Doubles can't be stored in payload registers, so this must be an int32.
    186      MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
    187                 "Caller must ensure the operand is a number value");
    188      MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
    189      Address addr = payloadAddress(masm, &loc);
    190      addr.offset += stackOffset;
    191      masm.convertInt32ToDouble(addr, dest);
    192      return;
    193    }
    194 
    195    case OperandLocation::Uninitialized:
    196      MOZ_CRASH("Unhandled operand type in ensureDoubleRegister");
    197      return;
    198  }
    199  masm.jump(&done);
    200  masm.bind(&failure);
    201  masm.assumeUnreachable(
    202      "Missing guard allowed non-number to hit ensureDoubleRegister");
    203  masm.bind(&done);
    204 }
    205 
    206 void CacheRegisterAllocator::copyToScratchRegister(MacroAssembler& masm,
    207                                                   TypedOperandId typedId,
    208                                                   Register dest) const {
    209  // If AutoScratchFloatRegister is active, we have to add sizeof(double) to
    210  // any stack slot offsets below.
    211  int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
    212 
    213  const OperandLocation& loc = operandLocations_[typedId.id()];
    214 
    215  switch (loc.kind()) {
    216    case OperandLocation::ValueReg: {
    217      masm.unboxNonDouble(loc.valueReg(), dest, typedId.type());
    218      break;
    219    }
    220    case OperandLocation::ValueStack: {
    221      Address addr = valueAddress(masm, &loc);
    222      addr.offset += stackOffset;
    223      masm.unboxNonDouble(addr, dest, typedId.type());
    224      break;
    225    }
    226    case OperandLocation::BaselineFrame: {
    227      Address addr = addressOf(masm, loc.baselineFrameSlot());
    228      addr.offset += stackOffset;
    229      masm.unboxNonDouble(addr, dest, typedId.type());
    230      break;
    231    }
    232    case OperandLocation::PayloadReg: {
    233      MOZ_ASSERT(loc.payloadType() == typedId.type());
    234      masm.mov(loc.payloadReg(), dest);
    235      return;
    236    }
    237    case OperandLocation::PayloadStack: {
    238      MOZ_ASSERT(loc.payloadType() == typedId.type());
    239      MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
    240      Address addr = payloadAddress(masm, &loc);
    241      addr.offset += stackOffset;
    242      masm.loadPtr(addr, dest);
    243      return;
    244    }
    245    case OperandLocation::DoubleReg:
    246    case OperandLocation::Constant:
    247    case OperandLocation::Uninitialized:
    248      MOZ_CRASH("Unhandled operand location");
    249  }
    250 }
    251 
    252 void CacheRegisterAllocator::copyToScratchValueRegister(
    253    MacroAssembler& masm, ValOperandId valId, ValueOperand dest) const {
    254  MOZ_ASSERT(!addedFailurePath_);
    255  MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
    256 
    257  const OperandLocation& loc = operandLocations_[valId.id()];
    258  switch (loc.kind()) {
    259    case OperandLocation::ValueReg:
    260      masm.moveValue(loc.valueReg(), dest);
    261      break;
    262    case OperandLocation::ValueStack: {
    263      Address addr = valueAddress(masm, &loc);
    264      masm.loadValue(addr, dest);
    265      break;
    266    }
    267    case OperandLocation::BaselineFrame: {
    268      Address addr = addressOf(masm, loc.baselineFrameSlot());
    269      masm.loadValue(addr, dest);
    270      break;
    271    }
    272    case OperandLocation::Constant:
    273      masm.moveValue(loc.constant(), dest);
    274      break;
    275    case OperandLocation::PayloadReg:
    276      masm.tagValue(loc.payloadType(), loc.payloadReg(), dest);
    277      break;
    278    case OperandLocation::PayloadStack: {
    279      Address addr = payloadAddress(masm, &loc);
    280      masm.loadPtr(addr, dest.scratchReg());
    281      masm.tagValue(loc.payloadType(), dest.scratchReg(), dest);
    282      break;
    283    }
    284    case OperandLocation::DoubleReg: {
    285      ScratchDoubleScope fpscratch(masm);
    286      masm.boxDouble(loc.doubleReg(), dest, fpscratch);
    287      break;
    288    }
    289    case OperandLocation::Uninitialized:
    290      MOZ_CRASH();
    291  }
    292 }
    293 
    294 Register CacheRegisterAllocator::useRegister(MacroAssembler& masm,
    295                                             TypedOperandId typedId) {
    296  MOZ_ASSERT(!addedFailurePath_);
    297  MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
    298 
    299  OperandLocation& loc = operandLocations_[typedId.id()];
    300  switch (loc.kind()) {
    301    case OperandLocation::PayloadReg:
    302      currentOpRegs_.add(loc.payloadReg());
    303      return loc.payloadReg();
    304 
    305    case OperandLocation::ValueReg: {
    306      // It's possible the value is still boxed: as an optimization, we unbox
    307      // the first time we use a value as object.
    308      ValueOperand val = loc.valueReg();
    309      availableRegs_.add(val);
    310      Register reg = val.scratchReg();
    311      availableRegs_.take(reg);
    312      masm.unboxNonDouble(val, reg, typedId.type());
    313      loc.setPayloadReg(reg, typedId.type());
    314      currentOpRegs_.add(reg);
    315      return reg;
    316    }
    317 
    318    case OperandLocation::PayloadStack: {
    319      Register reg = allocateRegister(masm);
    320      popPayload(masm, &loc, reg);
    321      return reg;
    322    }
    323 
    324    case OperandLocation::ValueStack: {
    325      // The value is on the stack, but boxed. If it's on top of the stack we
    326      // unbox it and then remove it from the stack, else we just unbox.
    327      Register reg = allocateRegister(masm);
    328      if (loc.valueStack() == stackPushed_) {
    329        masm.unboxNonDouble(Address(masm.getStackPointer(), 0), reg,
    330                            typedId.type());
    331        masm.addToStackPtr(Imm32(sizeof(js::Value)));
    332        MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
    333        stackPushed_ -= sizeof(js::Value);
    334      } else {
    335        MOZ_ASSERT(loc.valueStack() < stackPushed_);
    336        masm.unboxNonDouble(
    337            Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()),
    338            reg, typedId.type());
    339      }
    340      loc.setPayloadReg(reg, typedId.type());
    341      return reg;
    342    }
    343 
    344    case OperandLocation::BaselineFrame: {
    345      Register reg = allocateRegister(masm);
    346      Address addr = addressOf(masm, loc.baselineFrameSlot());
    347      masm.unboxNonDouble(addr, reg, typedId.type());
    348      loc.setPayloadReg(reg, typedId.type());
    349      return reg;
    350    };
    351 
    352    case OperandLocation::Constant: {
    353      Value v = loc.constant();
    354      Register reg = allocateRegister(masm);
    355      if (v.isString()) {
    356        masm.movePtr(ImmGCPtr(v.toString()), reg);
    357      } else if (v.isSymbol()) {
    358        masm.movePtr(ImmGCPtr(v.toSymbol()), reg);
    359      } else if (v.isBigInt()) {
    360        masm.movePtr(ImmGCPtr(v.toBigInt()), reg);
    361      } else if (v.isBoolean()) {
    362        masm.movePtr(ImmWord(v.toBoolean() ? 1 : 0), reg);
    363      } else {
    364        MOZ_CRASH("Unexpected Value");
    365      }
    366      loc.setPayloadReg(reg, v.extractNonDoubleType());
    367      return reg;
    368    }
    369 
    370    case OperandLocation::DoubleReg:
    371    case OperandLocation::Uninitialized:
    372      break;
    373  }
    374 
    375  MOZ_CRASH();
    376 }
    377 
    378 ConstantOrRegister CacheRegisterAllocator::useConstantOrRegister(
    379    MacroAssembler& masm, ValOperandId val) {
    380  MOZ_ASSERT(!addedFailurePath_);
    381  MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
    382 
    383  OperandLocation& loc = operandLocations_[val.id()];
    384  switch (loc.kind()) {
    385    case OperandLocation::Constant:
    386      return loc.constant();
    387 
    388    case OperandLocation::PayloadReg:
    389    case OperandLocation::PayloadStack: {
    390      JSValueType payloadType = loc.payloadType();
    391      Register reg = useRegister(masm, TypedOperandId(val, payloadType));
    392      return TypedOrValueRegister(MIRTypeFromValueType(payloadType),
    393                                  AnyRegister(reg));
    394    }
    395 
    396    case OperandLocation::ValueReg:
    397    case OperandLocation::ValueStack:
    398    case OperandLocation::BaselineFrame:
    399      return TypedOrValueRegister(useValueRegister(masm, val));
    400 
    401    case OperandLocation::DoubleReg:
    402      return TypedOrValueRegister(MIRType::Double,
    403                                  AnyRegister(loc.doubleReg()));
    404 
    405    case OperandLocation::Uninitialized:
    406      break;
    407  }
    408 
    409  MOZ_CRASH();
    410 }
    411 
    412 Register CacheRegisterAllocator::defineRegister(MacroAssembler& masm,
    413                                                TypedOperandId typedId) {
    414  MOZ_ASSERT(!addedFailurePath_);
    415  MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
    416 
    417  OperandLocation& loc = operandLocations_[typedId.id()];
    418  MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
    419 
    420  Register reg = allocateRegister(masm);
    421  loc.setPayloadReg(reg, typedId.type());
    422  return reg;
    423 }
    424 
    425 ValueOperand CacheRegisterAllocator::defineValueRegister(MacroAssembler& masm,
    426                                                         ValOperandId val) {
    427  MOZ_ASSERT(!addedFailurePath_);
    428  MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
    429 
    430  OperandLocation& loc = operandLocations_[val.id()];
    431  MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
    432 
    433  ValueOperand reg = allocateValueRegister(masm);
    434  loc.setValueReg(reg);
    435  return reg;
    436 }
    437 
    438 void CacheRegisterAllocator::freeDeadOperandLocations(MacroAssembler& masm) {
    439  // See if any operands are dead so we can reuse their registers. Note that
    440  // we skip the input operands, as those are also used by failure paths, and
    441  // we currently don't track those uses.
    442  for (size_t i = writer_.numInputOperands(); i < operandLocations_.length();
    443       i++) {
    444    if (!writer_.operandIsDead(i, currentInstruction_)) {
    445      continue;
    446    }
    447 
    448    OperandLocation& loc = operandLocations_[i];
    449    switch (loc.kind()) {
    450      case OperandLocation::PayloadReg:
    451        availableRegs_.add(loc.payloadReg());
    452        break;
    453      case OperandLocation::ValueReg:
    454        availableRegs_.add(loc.valueReg());
    455        break;
    456      case OperandLocation::PayloadStack:
    457        masm.propagateOOM(freePayloadSlots_.append(loc.payloadStack()));
    458        break;
    459      case OperandLocation::ValueStack:
    460        masm.propagateOOM(freeValueSlots_.append(loc.valueStack()));
    461        break;
    462      case OperandLocation::Uninitialized:
    463      case OperandLocation::BaselineFrame:
    464      case OperandLocation::Constant:
    465      case OperandLocation::DoubleReg:
    466        break;
    467    }
    468    loc.setUninitialized();
    469  }
    470 }
    471 
    472 void CacheRegisterAllocator::discardStack(MacroAssembler& masm) {
    473  // This should only be called when we are no longer using the operands,
    474  // as we're discarding everything from the native stack. Set all operand
    475  // locations to Uninitialized to catch bugs.
    476  for (size_t i = 0; i < operandLocations_.length(); i++) {
    477    operandLocations_[i].setUninitialized();
    478  }
    479 
    480  if (stackPushed_ > 0) {
    481    masm.addToStackPtr(Imm32(stackPushed_));
    482    stackPushed_ = 0;
    483  }
    484  freePayloadSlots_.clear();
    485  freeValueSlots_.clear();
    486 }
    487 
    488 Register CacheRegisterAllocator::allocateRegister(MacroAssembler& masm) {
    489  MOZ_ASSERT(!addedFailurePath_);
    490  MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
    491 
    492  if (availableRegs_.empty()) {
    493    freeDeadOperandLocations(masm);
    494  }
    495 
    496  if (availableRegs_.empty()) {
    497    // Still no registers available, try to spill unused operands to
    498    // the stack.
    499    for (size_t i = 0; i < operandLocations_.length(); i++) {
    500      OperandLocation& loc = operandLocations_[i];
    501      if (loc.kind() == OperandLocation::PayloadReg) {
    502        Register reg = loc.payloadReg();
    503        if (currentOpRegs_.has(reg)) {
    504          continue;
    505        }
    506 
    507        spillOperandToStack(masm, &loc);
    508        availableRegs_.add(reg);
    509        break;  // We got a register, so break out of the loop.
    510      }
    511      if (loc.kind() == OperandLocation::ValueReg) {
    512        ValueOperand reg = loc.valueReg();
    513        if (currentOpRegs_.aliases(reg)) {
    514          continue;
    515        }
    516 
    517        spillOperandToStack(masm, &loc);
    518        availableRegs_.add(reg);
    519        break;  // Break out of the loop.
    520      }
    521    }
    522  }
    523 
    524  if (availableRegs_.empty() && !availableRegsAfterSpill_.empty()) {
    525    Register reg = availableRegsAfterSpill_.takeAny();
    526    masm.push(reg);
    527    stackPushed_ += sizeof(uintptr_t);
    528 
    529    masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
    530 
    531    availableRegs_.add(reg);
    532  }
    533 
    534  // At this point, there must be a free register.
    535  MOZ_RELEASE_ASSERT(!availableRegs_.empty());
    536 
    537  Register reg = availableRegs_.takeAny();
    538  currentOpRegs_.add(reg);
    539  return reg;
    540 }
    541 
    542 void CacheRegisterAllocator::allocateFixedRegister(MacroAssembler& masm,
    543                                                   Register reg) {
    544  MOZ_ASSERT(!addedFailurePath_);
    545  MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
    546 
    547  // Fixed registers should be allocated first, to ensure they're
    548  // still available.
    549  MOZ_ASSERT(!currentOpRegs_.has(reg), "Register is in use");
    550 
    551  freeDeadOperandLocations(masm);
    552 
    553  if (availableRegs_.has(reg)) {
    554    availableRegs_.take(reg);
    555    currentOpRegs_.add(reg);
    556    return;
    557  }
    558 
    559  // Register may be available only after spilling contents.
    560  if (availableRegsAfterSpill_.has(reg)) {
    561    availableRegsAfterSpill_.take(reg);
    562    masm.push(reg);
    563    stackPushed_ += sizeof(uintptr_t);
    564 
    565    masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
    566    currentOpRegs_.add(reg);
    567    return;
    568  }
    569 
    570  // The register must be used by some operand. Spill it to the stack.
    571  for (size_t i = 0; i < operandLocations_.length(); i++) {
    572    OperandLocation& loc = operandLocations_[i];
    573    if (loc.kind() == OperandLocation::PayloadReg) {
    574      if (loc.payloadReg() != reg) {
    575        continue;
    576      }
    577 
    578      spillOperandToStackOrRegister(masm, &loc);
    579      currentOpRegs_.add(reg);
    580      return;
    581    }
    582    if (loc.kind() == OperandLocation::ValueReg) {
    583      if (!loc.valueReg().aliases(reg)) {
    584        continue;
    585      }
    586 
    587      ValueOperand valueReg = loc.valueReg();
    588      spillOperandToStackOrRegister(masm, &loc);
    589 
    590      availableRegs_.add(valueReg);
    591      availableRegs_.take(reg);
    592      currentOpRegs_.add(reg);
    593      return;
    594    }
    595  }
    596 
    597  MOZ_CRASH("Invalid register");
    598 }
    599 
    600 void CacheRegisterAllocator::allocateFixedValueRegister(MacroAssembler& masm,
    601                                                        ValueOperand reg) {
    602 #ifdef JS_NUNBOX32
    603  allocateFixedRegister(masm, reg.payloadReg());
    604  allocateFixedRegister(masm, reg.typeReg());
    605 #else
    606  allocateFixedRegister(masm, reg.valueReg());
    607 #endif
    608 }
    609 
    610 #ifdef JS_NUNBOX32
    611 // Possible miscompilation in clang-12 (bug 1689641)
    612 MOZ_NEVER_INLINE
    613 #endif
    614 ValueOperand CacheRegisterAllocator::allocateValueRegister(
    615    MacroAssembler& masm) {
    616 #ifdef JS_NUNBOX32
    617  Register reg1 = allocateRegister(masm);
    618  Register reg2 = allocateRegister(masm);
    619  return ValueOperand(reg1, reg2);
    620 #else
    621  Register reg = allocateRegister(masm);
    622  return ValueOperand(reg);
    623 #endif
    624 }
    625 
    626 bool CacheRegisterAllocator::init() {
    627  if (!origInputLocations_.resize(writer_.numInputOperands())) {
    628    return false;
    629  }
    630  if (!operandLocations_.resize(writer_.numOperandIds())) {
    631    return false;
    632  }
    633  return true;
    634 }
    635 
    636 void CacheRegisterAllocator::initAvailableRegsAfterSpill() {
    637  // Registers not in availableRegs_ and not used by input operands are
    638  // available after being spilled.
    639  availableRegsAfterSpill_.set() = GeneralRegisterSet::Intersect(
    640      GeneralRegisterSet::Not(availableRegs_.set()),
    641      GeneralRegisterSet::Not(inputRegisterSet()));
    642 }
    643 
    644 void CacheRegisterAllocator::fixupAliasedInputs(MacroAssembler& masm) {
    645  // If IC inputs alias each other, make sure they are stored in different
    646  // locations so we don't have to deal with this complexity in the rest of
    647  // the allocator.
    648  //
    649  // Note that this can happen in IonMonkey with something like |o.foo = o|
    650  // or |o[i] = i|.
    651 
    652  size_t numInputs = writer_.numInputOperands();
    653  MOZ_ASSERT(origInputLocations_.length() == numInputs);
    654 
    655  for (size_t i = 1; i < numInputs; i++) {
    656    OperandLocation& loc1 = operandLocations_[i];
    657    if (!loc1.isInRegister()) {
    658      continue;
    659    }
    660 
    661    for (size_t j = 0; j < i; j++) {
    662      OperandLocation& loc2 = operandLocations_[j];
    663      if (!loc1.aliasesReg(loc2)) {
    664        continue;
    665      }
    666 
    667      // loc1 and loc2 alias so we spill one of them. If one is a
    668      // ValueReg and the other is a PayloadReg, we have to spill the
    669      // PayloadReg: spilling the ValueReg instead would leave its type
    670      // register unallocated on 32-bit platforms.
    671      if (loc1.kind() == OperandLocation::ValueReg) {
    672        spillOperandToStack(masm, &loc2);
    673      } else {
    674        MOZ_ASSERT(loc1.kind() == OperandLocation::PayloadReg);
    675        spillOperandToStack(masm, &loc1);
    676        break;  // Spilled loc1, so nothing else will alias it.
    677      }
    678    }
    679  }
    680 
    681 #ifdef DEBUG
    682  assertValidState();
    683 #endif
    684 }
    685 
    686 GeneralRegisterSet CacheRegisterAllocator::inputRegisterSet() const {
    687  MOZ_ASSERT(origInputLocations_.length() == writer_.numInputOperands());
    688 
    689  AllocatableGeneralRegisterSet result;
    690  for (size_t i = 0; i < writer_.numInputOperands(); i++) {
    691    const OperandLocation& loc = operandLocations_[i];
    692    MOZ_ASSERT(loc == origInputLocations_[i]);
    693 
    694    switch (loc.kind()) {
    695      case OperandLocation::PayloadReg:
    696        result.addUnchecked(loc.payloadReg());
    697        continue;
    698      case OperandLocation::ValueReg:
    699        result.addUnchecked(loc.valueReg());
    700        continue;
    701      case OperandLocation::PayloadStack:
    702      case OperandLocation::ValueStack:
    703      case OperandLocation::BaselineFrame:
    704      case OperandLocation::Constant:
    705      case OperandLocation::DoubleReg:
    706        continue;
    707      case OperandLocation::Uninitialized:
    708        break;
    709    }
    710    MOZ_CRASH("Invalid kind");
    711  }
    712 
    713  return result.set();
    714 }
    715 
    716 JSValueType CacheRegisterAllocator::knownType(ValOperandId val) const {
    717  const OperandLocation& loc = operandLocations_[val.id()];
    718 
    719  switch (loc.kind()) {
    720    case OperandLocation::ValueReg:
    721    case OperandLocation::ValueStack:
    722    case OperandLocation::BaselineFrame:
    723      return JSVAL_TYPE_UNKNOWN;
    724 
    725    case OperandLocation::PayloadStack:
    726    case OperandLocation::PayloadReg:
    727      return loc.payloadType();
    728 
    729    case OperandLocation::Constant:
    730      return loc.constant().isDouble() ? JSVAL_TYPE_DOUBLE
    731                                       : loc.constant().extractNonDoubleType();
    732 
    733    case OperandLocation::DoubleReg:
    734      return JSVAL_TYPE_DOUBLE;
    735 
    736    case OperandLocation::Uninitialized:
    737      break;
    738  }
    739 
    740  MOZ_CRASH("Invalid kind");
    741 }
    742 
    743 void CacheRegisterAllocator::initInputLocation(
    744    size_t i, const TypedOrValueRegister& reg) {
    745  if (reg.hasValue()) {
    746    initInputLocation(i, reg.valueReg());
    747  } else if (reg.typedReg().isFloat()) {
    748    MOZ_ASSERT(reg.type() == MIRType::Double);
    749    initInputLocation(i, reg.typedReg().fpu());
    750  } else {
    751    initInputLocation(i, reg.typedReg().gpr(),
    752                      ValueTypeFromMIRType(reg.type()));
    753  }
    754 }
    755 
    756 void CacheRegisterAllocator::initInputLocation(
    757    size_t i, const ConstantOrRegister& value) {
    758  if (value.constant()) {
    759    initInputLocation(i, value.value());
    760  } else {
    761    initInputLocation(i, value.reg());
    762  }
    763 }
    764 
    765 void CacheRegisterAllocator::spillOperandToStack(MacroAssembler& masm,
    766                                                 OperandLocation* loc) {
    767  MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
    768 
    769  if (loc->kind() == OperandLocation::ValueReg) {
    770    if (!freeValueSlots_.empty()) {
    771      uint32_t stackPos = freeValueSlots_.popCopy();
    772      MOZ_ASSERT(stackPos <= stackPushed_);
    773      masm.storeValue(loc->valueReg(),
    774                      Address(masm.getStackPointer(), stackPushed_ - stackPos));
    775      loc->setValueStack(stackPos);
    776      return;
    777    }
    778    stackPushed_ += sizeof(js::Value);
    779    masm.pushValue(loc->valueReg());
    780    loc->setValueStack(stackPushed_);
    781    return;
    782  }
    783 
    784  MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
    785 
    786  if (!freePayloadSlots_.empty()) {
    787    uint32_t stackPos = freePayloadSlots_.popCopy();
    788    MOZ_ASSERT(stackPos <= stackPushed_);
    789    masm.storePtr(loc->payloadReg(),
    790                  Address(masm.getStackPointer(), stackPushed_ - stackPos));
    791    loc->setPayloadStack(stackPos, loc->payloadType());
    792    return;
    793  }
    794  stackPushed_ += sizeof(uintptr_t);
    795  masm.push(loc->payloadReg());
    796  loc->setPayloadStack(stackPushed_, loc->payloadType());
    797 }
    798 
    799 void CacheRegisterAllocator::spillOperandToStackOrRegister(
    800    MacroAssembler& masm, OperandLocation* loc) {
    801  MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
    802 
    803  // If enough registers are available, use them.
    804  if (loc->kind() == OperandLocation::ValueReg) {
    805    static const size_t BoxPieces = sizeof(Value) / sizeof(uintptr_t);
    806    if (availableRegs_.set().size() >= BoxPieces) {
    807      ValueOperand reg = availableRegs_.takeAnyValue();
    808      masm.moveValue(loc->valueReg(), reg);
    809      loc->setValueReg(reg);
    810      return;
    811    }
    812  } else {
    813    MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
    814    if (!availableRegs_.empty()) {
    815      Register reg = availableRegs_.takeAny();
    816      masm.movePtr(loc->payloadReg(), reg);
    817      loc->setPayloadReg(reg, loc->payloadType());
    818      return;
    819    }
    820  }
    821 
    822  // Not enough registers available, spill to the stack.
    823  spillOperandToStack(masm, loc);
    824 }
    825 
    826 void CacheRegisterAllocator::popPayload(MacroAssembler& masm,
    827                                        OperandLocation* loc, Register dest) {
    828  MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
    829  MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
    830 
    831  // The payload is on the stack. If it's on top of the stack we can just
    832  // pop it, else we emit a load.
    833  if (loc->payloadStack() == stackPushed_) {
    834    masm.pop(dest);
    835    stackPushed_ -= sizeof(uintptr_t);
    836  } else {
    837    MOZ_ASSERT(loc->payloadStack() < stackPushed_);
    838    masm.loadPtr(payloadAddress(masm, loc), dest);
    839    masm.propagateOOM(freePayloadSlots_.append(loc->payloadStack()));
    840  }
    841 
    842  loc->setPayloadReg(dest, loc->payloadType());
    843 }
    844 
    845 Address CacheRegisterAllocator::valueAddress(MacroAssembler& masm,
    846                                             const OperandLocation* loc) const {
    847  MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
    848  return Address(masm.getStackPointer(), stackPushed_ - loc->valueStack());
    849 }
    850 
    851 Address CacheRegisterAllocator::payloadAddress(
    852    MacroAssembler& masm, const OperandLocation* loc) const {
    853  MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
    854  return Address(masm.getStackPointer(), stackPushed_ - loc->payloadStack());
    855 }
    856 
    857 void CacheRegisterAllocator::popValue(MacroAssembler& masm,
    858                                      OperandLocation* loc, ValueOperand dest) {
    859  MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
    860  MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
    861 
    862  // The Value is on the stack. If it's on top of the stack we can just
    863  // pop it, else we emit a load.
    864  if (loc->valueStack() == stackPushed_) {
    865    masm.popValue(dest);
    866    stackPushed_ -= sizeof(js::Value);
    867  } else {
    868    MOZ_ASSERT(loc->valueStack() < stackPushed_);
    869    masm.loadValue(
    870        Address(masm.getStackPointer(), stackPushed_ - loc->valueStack()),
    871        dest);
    872    masm.propagateOOM(freeValueSlots_.append(loc->valueStack()));
    873  }
    874 
    875  loc->setValueReg(dest);
    876 }
    877 
    878 #ifdef DEBUG
    879 void CacheRegisterAllocator::assertValidState() const {
    880  // Assert different operands don't have aliasing storage. We depend on this
    881  // when spilling registers, for instance.
    882 
    883  if (!JitOptions.fullDebugChecks) {
    884    return;
    885  }
    886 
    887  for (size_t i = 0; i < operandLocations_.length(); i++) {
    888    const auto& loc1 = operandLocations_[i];
    889    if (loc1.isUninitialized()) {
    890      continue;
    891    }
    892 
    893    for (size_t j = 0; j < i; j++) {
    894      const auto& loc2 = operandLocations_[j];
    895      if (loc2.isUninitialized()) {
    896        continue;
    897      }
    898      MOZ_ASSERT(!loc1.aliasesReg(loc2));
    899    }
    900  }
    901 }
    902 #endif
    903 
    904 bool OperandLocation::aliasesReg(const OperandLocation& other) const {
    905  MOZ_ASSERT(&other != this);
    906 
    907  switch (other.kind_) {
    908    case PayloadReg:
    909      return aliasesReg(other.payloadReg());
    910    case ValueReg:
    911      return aliasesReg(other.valueReg());
    912    case PayloadStack:
    913    case ValueStack:
    914    case BaselineFrame:
    915    case Constant:
    916    case DoubleReg:
    917      return false;
    918    case Uninitialized:
    919      break;
    920  }
    921 
    922  MOZ_CRASH("Invalid kind");
    923 }
    924 
    925 void CacheRegisterAllocator::restoreInputState(MacroAssembler& masm,
    926                                               bool shouldDiscardStack) {
    927  size_t numInputOperands = origInputLocations_.length();
    928  MOZ_ASSERT(writer_.numInputOperands() == numInputOperands);
    929 
    930  for (size_t j = 0; j < numInputOperands; j++) {
    931    const OperandLocation& dest = origInputLocations_[j];
    932    OperandLocation& cur = operandLocations_[j];
    933    if (dest == cur) {
    934      continue;
    935    }
    936 
    937    auto autoAssign = mozilla::MakeScopeExit([&] { cur = dest; });
    938 
    939    // We have a cycle if a destination register will be used later
    940    // as source register. If that happens, just push the current value
    941    // on the stack and later get it from there.
    942    for (size_t k = j + 1; k < numInputOperands; k++) {
    943      OperandLocation& laterSource = operandLocations_[k];
    944      if (dest.aliasesReg(laterSource)) {
    945        spillOperandToStack(masm, &laterSource);
    946      }
    947    }
    948 
    949    if (dest.kind() == OperandLocation::ValueReg) {
    950      // We have to restore a Value register.
    951      switch (cur.kind()) {
    952        case OperandLocation::ValueReg:
    953          masm.moveValue(cur.valueReg(), dest.valueReg());
    954          continue;
    955        case OperandLocation::PayloadReg:
    956          masm.tagValue(cur.payloadType(), cur.payloadReg(), dest.valueReg());
    957          continue;
    958        case OperandLocation::PayloadStack: {
    959          Register scratch = dest.valueReg().scratchReg();
    960          popPayload(masm, &cur, scratch);
    961          masm.tagValue(cur.payloadType(), scratch, dest.valueReg());
    962          continue;
    963        }
    964        case OperandLocation::ValueStack:
    965          popValue(masm, &cur, dest.valueReg());
    966          continue;
    967        case OperandLocation::DoubleReg:
    968          masm.boxDouble(cur.doubleReg(), dest.valueReg(), cur.doubleReg());
    969          continue;
    970        case OperandLocation::Constant:
    971        case OperandLocation::BaselineFrame:
    972        case OperandLocation::Uninitialized:
    973          break;
    974      }
    975    } else if (dest.kind() == OperandLocation::PayloadReg) {
    976      // We have to restore a payload register.
    977      switch (cur.kind()) {
    978        case OperandLocation::ValueReg:
    979          MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
    980          masm.unboxNonDouble(cur.valueReg(), dest.payloadReg(),
    981                              dest.payloadType());
    982          continue;
    983        case OperandLocation::PayloadReg:
    984          MOZ_ASSERT(cur.payloadType() == dest.payloadType());
    985          masm.mov(cur.payloadReg(), dest.payloadReg());
    986          continue;
    987        case OperandLocation::PayloadStack: {
    988          MOZ_ASSERT(cur.payloadType() == dest.payloadType());
    989          popPayload(masm, &cur, dest.payloadReg());
    990          continue;
    991        }
    992        case OperandLocation::ValueStack:
    993          MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
    994          MOZ_ASSERT(cur.valueStack() <= stackPushed_);
    995          MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
    996          masm.unboxNonDouble(
    997              Address(masm.getStackPointer(), stackPushed_ - cur.valueStack()),
    998              dest.payloadReg(), dest.payloadType());
    999          continue;
   1000        case OperandLocation::Constant:
   1001        case OperandLocation::BaselineFrame:
   1002        case OperandLocation::DoubleReg:
   1003        case OperandLocation::Uninitialized:
   1004          break;
   1005      }
   1006    } else if (dest.kind() == OperandLocation::Constant ||
   1007               dest.kind() == OperandLocation::BaselineFrame ||
   1008               dest.kind() == OperandLocation::DoubleReg) {
   1009      // Nothing to do.
   1010      continue;
   1011    }
   1012 
   1013    MOZ_CRASH("Invalid kind");
   1014  }
   1015 
   1016  for (const SpilledRegister& spill : spilledRegs_) {
   1017    MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
   1018 
   1019    if (spill.stackPushed == stackPushed_) {
   1020      masm.pop(spill.reg);
   1021      stackPushed_ -= sizeof(uintptr_t);
   1022    } else {
   1023      MOZ_ASSERT(spill.stackPushed < stackPushed_);
   1024      masm.loadPtr(
   1025          Address(masm.getStackPointer(), stackPushed_ - spill.stackPushed),
   1026          spill.reg);
   1027    }
   1028  }
   1029 
   1030  if (shouldDiscardStack) {
   1031    discardStack(masm);
   1032  }
   1033 }
   1034 
   1035 size_t CacheIRStubInfo::stubDataSize() const {
   1036  size_t field = 0;
   1037  size_t size = 0;
   1038  while (true) {
   1039    StubField::Type type = fieldType(field++);
   1040    if (type == StubField::Type::Limit) {
   1041      return size;
   1042    }
   1043    size += StubField::sizeInBytes(type);
   1044  }
   1045 }
   1046 
   1047 template <typename T>
   1048 static GCPtr<T>* AsGCPtr(void* ptr) {
   1049  return static_cast<GCPtr<T>*>(ptr);
   1050 }
   1051 
   1052 void CacheIRStubInfo::replaceStubRawWord(uint8_t* stubData, uint32_t offset,
   1053                                         uintptr_t oldWord,
   1054                                         uintptr_t newWord) const {
   1055  MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
   1056  uintptr_t* addr = reinterpret_cast<uintptr_t*>(stubData + offset);
   1057  MOZ_ASSERT(*addr == oldWord);
   1058  *addr = newWord;
   1059 }
   1060 
   1061 void CacheIRStubInfo::replaceStubRawValueBits(uint8_t* stubData,
   1062                                              uint32_t offset, uint64_t oldBits,
   1063                                              uint64_t newBits) const {
   1064  MOZ_ASSERT(uint64_t(stubData + offset) % sizeof(uint64_t) == 0);
   1065  uint64_t* addr = reinterpret_cast<uint64_t*>(stubData + offset);
   1066  MOZ_ASSERT(*addr == oldBits);
   1067  *addr = newBits;
   1068 }
   1069 
   1070 template <class Stub, StubField::Type type>
   1071 typename MapStubFieldToType<type>::WrappedType& CacheIRStubInfo::getStubField(
   1072    Stub* stub, uint32_t offset) const {
   1073  uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
   1074  MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
   1075 
   1076  using WrappedType = typename MapStubFieldToType<type>::WrappedType;
   1077  return *reinterpret_cast<WrappedType*>(stubData + offset);
   1078 }
   1079 
   1080 #define INSTANTIATE_GET_STUB_FIELD(Type)                                   \
   1081  template typename MapStubFieldToType<Type>::WrappedType&                 \
   1082  CacheIRStubInfo::getStubField<ICCacheIRStub, Type>(ICCacheIRStub * stub, \
   1083                                                     uint32_t offset) const;
   1084 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Shape)
   1085 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakShape)
   1086 INSTANTIATE_GET_STUB_FIELD(StubField::Type::JSObject)
   1087 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakObject)
   1088 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Symbol)
   1089 INSTANTIATE_GET_STUB_FIELD(StubField::Type::String)
   1090 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakBaseScript)
   1091 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Value)
   1092 INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakValue)
   1093 INSTANTIATE_GET_STUB_FIELD(StubField::Type::Id)
   1094 #undef INSTANTIATE_GET_STUB_FIELD
   1095 
   1096 template <class Stub, class T>
   1097 T* CacheIRStubInfo::getPtrStubField(Stub* stub, uint32_t offset) const {
   1098  uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
   1099  MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
   1100 
   1101  return *reinterpret_cast<T**>(stubData + offset);
   1102 }
   1103 
   1104 template gc::AllocSite* CacheIRStubInfo::getPtrStubField(ICCacheIRStub* stub,
   1105                                                         uint32_t offset) const;
   1106 
   1107 template <StubField::Type type, typename V>
   1108 static void InitWrappedPtr(void* ptr, V val) {
   1109  using RawType = typename MapStubFieldToType<type>::RawType;
   1110  using WrappedType = typename MapStubFieldToType<type>::WrappedType;
   1111  auto* wrapped = static_cast<WrappedType*>(ptr);
   1112  new (wrapped) WrappedType(mozilla::BitwiseCast<RawType>(val));
   1113 }
   1114 
   1115 template <StubField::Type type>
   1116 static void InitWrappedValuePtr(void* ptr, uint64_t val) {
   1117  using WrappedType = typename MapStubFieldToType<type>::WrappedType;
   1118  auto* wrapped = static_cast<WrappedType*>(ptr);
   1119  new (wrapped) WrappedType(Value::fromRawBits(val));
   1120 }
   1121 
   1122 static void InitWordStubField(StubField::Type type, void* dest,
   1123                              uintptr_t value) {
   1124  MOZ_ASSERT(StubField::sizeIsWord(type));
   1125  MOZ_ASSERT((uintptr_t(dest) % sizeof(uintptr_t)) == 0,
   1126             "Unaligned stub field");
   1127 
   1128  switch (type) {
   1129    case StubField::Type::RawInt32:
   1130    case StubField::Type::RawPointer:
   1131    case StubField::Type::AllocSite:
   1132      *static_cast<uintptr_t*>(dest) = value;
   1133      break;
   1134    case StubField::Type::Shape:
   1135      InitWrappedPtr<StubField::Type::Shape>(dest, value);
   1136      break;
   1137    case StubField::Type::WeakShape:
   1138      // No read barrier required to copy weak pointer.
   1139      InitWrappedPtr<StubField::Type::WeakShape>(dest, value);
   1140      break;
   1141    case StubField::Type::JSObject:
   1142      InitWrappedPtr<StubField::Type::JSObject>(dest, value);
   1143      break;
   1144    case StubField::Type::WeakObject:
   1145      // No read barrier required to copy weak pointer.
   1146      InitWrappedPtr<StubField::Type::WeakObject>(dest, value);
   1147      break;
   1148    case StubField::Type::Symbol:
   1149      InitWrappedPtr<StubField::Type::Symbol>(dest, value);
   1150      break;
   1151    case StubField::Type::String:
   1152      InitWrappedPtr<StubField::Type::String>(dest, value);
   1153      break;
   1154    case StubField::Type::WeakBaseScript:
   1155      // No read barrier required to copy weak pointer.
   1156      InitWrappedPtr<StubField::Type::WeakBaseScript>(dest, value);
   1157      break;
   1158    case StubField::Type::JitCode:
   1159      InitWrappedPtr<StubField::Type::JitCode>(dest, value);
   1160      break;
   1161    case StubField::Type::Id:
   1162      AsGCPtr<jsid>(dest)->init(jsid::fromRawBits(value));
   1163      break;
   1164    case StubField::Type::RawInt64:
   1165    case StubField::Type::Double:
   1166    case StubField::Type::Value:
   1167    case StubField::Type::WeakValue:
   1168    case StubField::Type::Limit:
   1169      MOZ_CRASH("Invalid type");
   1170  }
   1171 }
   1172 
   1173 static void InitInt64StubField(StubField::Type type, void* dest,
   1174                               uint64_t value) {
   1175  MOZ_ASSERT(StubField::sizeIsInt64(type));
   1176  MOZ_ASSERT((uintptr_t(dest) % sizeof(uint64_t)) == 0, "Unaligned stub field");
   1177 
   1178  switch (type) {
   1179    case StubField::Type::RawInt64:
   1180    case StubField::Type::Double:
   1181      *static_cast<uint64_t*>(dest) = value;
   1182      break;
   1183    case StubField::Type::Value:
   1184      InitWrappedValuePtr<StubField::Type::Value>(dest, value);
   1185      break;
   1186    case StubField::Type::WeakValue:
   1187      // No read barrier required to copy weak pointer.
   1188      InitWrappedValuePtr<StubField::Type::WeakValue>(dest, value);
   1189      break;
   1190    case StubField::Type::RawInt32:
   1191    case StubField::Type::RawPointer:
   1192    case StubField::Type::AllocSite:
   1193    case StubField::Type::Shape:
   1194    case StubField::Type::WeakShape:
   1195    case StubField::Type::JSObject:
   1196    case StubField::Type::WeakObject:
   1197    case StubField::Type::Symbol:
   1198    case StubField::Type::String:
   1199    case StubField::Type::WeakBaseScript:
   1200    case StubField::Type::JitCode:
   1201    case StubField::Type::Id:
   1202    case StubField::Type::Limit:
   1203      MOZ_CRASH("Invalid type");
   1204  }
   1205 }
   1206 
   1207 void CacheIRWriter::copyStubData(uint8_t* dest) const {
   1208  MOZ_ASSERT(!failed());
   1209 
   1210  for (const StubField& field : stubFields_) {
   1211    if (field.sizeIsWord()) {
   1212      InitWordStubField(field.type(), dest, field.asWord());
   1213      dest += sizeof(uintptr_t);
   1214    } else {
   1215      InitInt64StubField(field.type(), dest, field.asInt64());
   1216      dest += sizeof(uint64_t);
   1217    }
   1218  }
   1219 }
   1220 
   1221 ICCacheIRStub* ICCacheIRStub::clone(JSRuntime* rt, ICStubSpace& newSpace) {
   1222  const CacheIRStubInfo* info = stubInfo();
   1223  MOZ_ASSERT(info->makesGCCalls());
   1224 
   1225  size_t bytesNeeded = info->stubDataOffset() + info->stubDataSize();
   1226 
   1227  AutoEnterOOMUnsafeRegion oomUnsafe;
   1228  void* newStubMem = newSpace.alloc(bytesNeeded);
   1229  if (!newStubMem) {
   1230    oomUnsafe.crash("ICCacheIRStub::clone");
   1231  }
   1232 
   1233  ICCacheIRStub* newStub = new (newStubMem) ICCacheIRStub(*this);
   1234 
   1235  const uint8_t* src = this->stubDataStart();
   1236  uint8_t* dest = newStub->stubDataStart();
   1237 
   1238  // Because this can be called during sweeping when discarding JIT code, we
   1239  // have to lock the store buffer
   1240  gc::AutoLockStoreBuffer lock(rt);
   1241 
   1242  uint32_t field = 0;
   1243  while (true) {
   1244    StubField::Type type = info->fieldType(field);
   1245    if (type == StubField::Type::Limit) {
   1246      break;  // Done.
   1247    }
   1248 
   1249    if (StubField::sizeIsWord(type)) {
   1250      const uintptr_t* srcField = reinterpret_cast<const uintptr_t*>(src);
   1251      InitWordStubField(type, dest, *srcField);
   1252      src += sizeof(uintptr_t);
   1253      dest += sizeof(uintptr_t);
   1254    } else {
   1255      const uint64_t* srcField = reinterpret_cast<const uint64_t*>(src);
   1256      InitInt64StubField(type, dest, *srcField);
   1257      src += sizeof(uint64_t);
   1258      dest += sizeof(uint64_t);
   1259    }
   1260 
   1261    field++;
   1262  }
   1263 
   1264  return newStub;
   1265 }
   1266 
   1267 template <typename T>
   1268 static inline bool ShouldTraceWeakEdgeInStub(JSTracer* trc) {
   1269  if constexpr (std::is_same_v<T, IonICStub>) {
   1270    // 'Weak' edges are traced strongly in IonICs.
   1271    return true;
   1272  } else {
   1273    static_assert(std::is_same_v<T, ICCacheIRStub>);
   1274    return trc->traceWeakEdges();
   1275  }
   1276 }
   1277 
   1278 template <typename T>
   1279 void jit::TraceCacheIRStub(JSTracer* trc, T* stub,
   1280                           const CacheIRStubInfo* stubInfo) {
   1281  using Type = StubField::Type;
   1282 
   1283  uint32_t field = 0;
   1284  size_t offset = 0;
   1285  while (true) {
   1286    Type fieldType = stubInfo->fieldType(field);
   1287    switch (fieldType) {
   1288      case Type::RawInt32:
   1289      case Type::RawPointer:
   1290      case Type::RawInt64:
   1291      case Type::Double:
   1292        break;
   1293      case Type::Shape: {
   1294        // For CCW IC stubs, we can store same-zone but cross-compartment
   1295        // shapes. Use TraceSameZoneCrossCompartmentEdge to not assert in the
   1296        // GC. Note: CacheIRWriter::writeShapeField asserts we never store
   1297        // cross-zone shapes.
   1298        GCPtr<Shape*>& shapeField =
   1299            stubInfo->getStubField<T, Type::Shape>(stub, offset);
   1300        TraceSameZoneCrossCompartmentEdge(trc, &shapeField, "cacheir-shape");
   1301        break;
   1302      }
   1303      case Type::WeakShape:
   1304        if (ShouldTraceWeakEdgeInStub<T>(trc)) {
   1305          WeakHeapPtr<Shape*>& shapeField =
   1306              stubInfo->getStubField<T, Type::WeakShape>(stub, offset);
   1307          if (shapeField) {
   1308            TraceSameZoneCrossCompartmentEdge(trc, &shapeField,
   1309                                              "cacheir-weak-shape");
   1310          }
   1311        }
   1312        break;
   1313      case Type::JSObject: {
   1314        TraceEdge(trc, &stubInfo->getStubField<T, Type::JSObject>(stub, offset),
   1315                  "cacheir-object");
   1316        break;
   1317      }
   1318      case Type::WeakObject:
   1319        if (ShouldTraceWeakEdgeInStub<T>(trc)) {
   1320          TraceNullableEdge(
   1321              trc, &stubInfo->getStubField<T, Type::WeakObject>(stub, offset),
   1322              "cacheir-weak-object");
   1323        }
   1324        break;
   1325      case Type::Symbol:
   1326        TraceEdge(trc, &stubInfo->getStubField<T, Type::Symbol>(stub, offset),
   1327                  "cacheir-symbol");
   1328        break;
   1329      case Type::String:
   1330        TraceEdge(trc, &stubInfo->getStubField<T, Type::String>(stub, offset),
   1331                  "cacheir-string");
   1332        break;
   1333      case Type::WeakBaseScript:
   1334        if (ShouldTraceWeakEdgeInStub<T>(trc)) {
   1335          TraceNullableEdge(
   1336              trc,
   1337              &stubInfo->getStubField<T, Type::WeakBaseScript>(stub, offset),
   1338              "cacheir-weak-script");
   1339        }
   1340        break;
   1341      case Type::JitCode:
   1342        TraceEdge(trc, &stubInfo->getStubField<T, Type::JitCode>(stub, offset),
   1343                  "cacheir-jitcode");
   1344        break;
   1345      case Type::Id:
   1346        TraceEdge(trc, &stubInfo->getStubField<T, Type::Id>(stub, offset),
   1347                  "cacheir-id");
   1348        break;
   1349      case Type::Value:
   1350        TraceEdge(trc, &stubInfo->getStubField<T, Type::Value>(stub, offset),
   1351                  "cacheir-value");
   1352        break;
   1353      case Type::WeakValue:
   1354        if (ShouldTraceWeakEdgeInStub<T>(trc)) {
   1355          TraceEdge(trc,
   1356                    &stubInfo->getStubField<T, Type::WeakValue>(stub, offset),
   1357                    "cacheir-weak-value");
   1358        }
   1359        break;
   1360      case Type::AllocSite: {
   1361        gc::AllocSite* site =
   1362            stubInfo->getPtrStubField<T, gc::AllocSite>(stub, offset);
   1363        site->trace(trc);
   1364        break;
   1365      }
   1366      case Type::Limit:
   1367        return;  // Done.
   1368    }
   1369    field++;
   1370    offset += StubField::sizeInBytes(fieldType);
   1371  }
   1372 }
   1373 
   1374 template void jit::TraceCacheIRStub(JSTracer* trc, ICCacheIRStub* stub,
   1375                                    const CacheIRStubInfo* stubInfo);
   1376 
   1377 template void jit::TraceCacheIRStub(JSTracer* trc, IonICStub* stub,
   1378                                    const CacheIRStubInfo* stubInfo);
   1379 
   1380 template <typename T>
   1381 bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
   1382                               const CacheIRStubInfo* stubInfo) {
   1383  using Type = StubField::Type;
   1384 
   1385  // Trace all fields before returning because this stub can be traced again
   1386  // later through TraceBaselineStubFrame.
   1387  bool isDead = false;
   1388 
   1389  uint32_t field = 0;
   1390  size_t offset = 0;
   1391  while (true) {
   1392    Type fieldType = stubInfo->fieldType(field);
   1393    switch (fieldType) {
   1394      case Type::WeakShape: {
   1395        WeakHeapPtr<Shape*>& shapeField =
   1396            stubInfo->getStubField<T, Type::WeakShape>(stub, offset);
   1397        auto r = TraceWeakEdge(trc, &shapeField, "cacheir-weak-shape");
   1398        if (r.isDead()) {
   1399          isDead = true;
   1400        }
   1401        break;
   1402      }
   1403      case Type::WeakObject: {
   1404        WeakHeapPtr<JSObject*>& objectField =
   1405            stubInfo->getStubField<T, Type::WeakObject>(stub, offset);
   1406        auto r = TraceWeakEdge(trc, &objectField, "cacheir-weak-object");
   1407        if (r.isDead()) {
   1408          isDead = true;
   1409        }
   1410        break;
   1411      }
   1412      case Type::WeakBaseScript: {
   1413        WeakHeapPtr<BaseScript*>& scriptField =
   1414            stubInfo->getStubField<T, Type::WeakBaseScript>(stub, offset);
   1415        auto r = TraceWeakEdge(trc, &scriptField, "cacheir-weak-script");
   1416        if (r.isDead()) {
   1417          isDead = true;
   1418        }
   1419        break;
   1420      }
   1421      case Type::WeakValue: {
   1422        WeakHeapPtr<Value>& valueField =
   1423            stubInfo->getStubField<T, Type::WeakValue>(stub, offset);
   1424        auto r = TraceWeakEdge(trc, &valueField, "cacheir-weak-value");
   1425        if (r.isDead()) {
   1426          isDead = true;
   1427        }
   1428        break;
   1429      }
   1430      case Type::Limit:
   1431        // Done.
   1432        return !isDead;
   1433      case Type::RawInt32:
   1434      case Type::RawPointer:
   1435      case Type::Shape:
   1436      case Type::JSObject:
   1437      case Type::Symbol:
   1438      case Type::String:
   1439      case Type::JitCode:
   1440      case Type::Id:
   1441      case Type::AllocSite:
   1442      case Type::RawInt64:
   1443      case Type::Value:
   1444      case Type::Double:
   1445        break;  // Skip non-weak fields.
   1446    }
   1447    field++;
   1448    offset += StubField::sizeInBytes(fieldType);
   1449  }
   1450 }
   1451 
   1452 template bool jit::TraceWeakCacheIRStub(JSTracer* trc, ICCacheIRStub* stub,
   1453                                        const CacheIRStubInfo* stubInfo);
   1454 
   1455 template bool jit::TraceWeakCacheIRStub(JSTracer* trc, IonICStub* stub,
   1456                                        const CacheIRStubInfo* stubInfo);
   1457 
   1458 bool CacheIRWriter::stubDataEquals(const uint8_t* stubData) const {
   1459  MOZ_ASSERT(!failed());
   1460 
   1461  const uintptr_t* stubDataWords = reinterpret_cast<const uintptr_t*>(stubData);
   1462 
   1463  for (const StubField& field : stubFields_) {
   1464    if (field.sizeIsWord()) {
   1465      if (field.asWord() != *stubDataWords) {
   1466        return false;
   1467      }
   1468      stubDataWords++;
   1469      continue;
   1470    }
   1471 
   1472    if (field.asInt64() != *reinterpret_cast<const uint64_t*>(stubDataWords)) {
   1473      return false;
   1474    }
   1475    stubDataWords += sizeof(uint64_t) / sizeof(uintptr_t);
   1476  }
   1477 
   1478  return true;
   1479 }
   1480 
   1481 bool CacheIRWriter::stubDataEqualsIgnoringShapeAndOffset(
   1482    const uint8_t* stubData, uint32_t shapeFieldOffset,
   1483    mozilla::Maybe<uint32_t> offsetFieldOffset) const {
   1484  MOZ_ASSERT(!failed());
   1485 
   1486  uint32_t offset = 0;
   1487  for (const StubField& field : stubFields_) {
   1488    if (offset == shapeFieldOffset) {
   1489      // Don't compare shapeField.
   1490    } else if (offsetFieldOffset.isSome() && offset == *offsetFieldOffset) {
   1491      // Skip offsetField, the "FromOffset" variant doesn't have this.
   1492      continue;
   1493    } else {
   1494      if (field.sizeIsWord()) {
   1495        uintptr_t raw = *reinterpret_cast<const uintptr_t*>(stubData + offset);
   1496        if (field.asWord() != raw) {
   1497          return false;
   1498        }
   1499      } else {
   1500        MOZ_ASSERT(field.sizeIsInt64());
   1501        uint64_t raw = *reinterpret_cast<const uint64_t*>(stubData + offset);
   1502        if (field.asInt64() != raw) {
   1503          return false;
   1504        }
   1505      }
   1506    }
   1507    offset += StubField::sizeInBytes(field.type());
   1508  }
   1509 
   1510  return true;
   1511 }
   1512 
   1513 HashNumber CacheIRStubKey::hash(const CacheIRStubKey::Lookup& l) {
   1514  HashNumber hash = mozilla::HashBytes(l.code, l.length);
   1515  hash = mozilla::AddToHash(hash, uint32_t(l.kind));
   1516  hash = mozilla::AddToHash(hash, uint32_t(l.engine));
   1517  return hash;
   1518 }
   1519 
   1520 bool CacheIRStubKey::match(const CacheIRStubKey& entry,
   1521                           const CacheIRStubKey::Lookup& l) {
   1522  if (entry.stubInfo->kind() != l.kind) {
   1523    return false;
   1524  }
   1525 
   1526  if (entry.stubInfo->engine() != l.engine) {
   1527    return false;
   1528  }
   1529 
   1530  if (entry.stubInfo->codeLength() != l.length) {
   1531    return false;
   1532  }
   1533 
   1534  if (!mozilla::ArrayEqual(entry.stubInfo->code(), l.code, l.length)) {
   1535    return false;
   1536  }
   1537 
   1538  return true;
   1539 }
   1540 
   1541 CacheIRReader::CacheIRReader(const CacheIRStubInfo* stubInfo)
   1542    : CacheIRReader(stubInfo->code(),
   1543                    stubInfo->code() + stubInfo->codeLength()) {}
   1544 
   1545 CacheIRStubInfo* CacheIRStubInfo::New(CacheKind kind, ICStubEngine engine,
   1546                                      bool makesGCCalls,
   1547                                      uint32_t stubDataOffset,
   1548                                      const CacheIRWriter& writer) {
   1549  size_t numStubFields = writer.numStubFields();
   1550  size_t bytesNeeded =
   1551      sizeof(CacheIRStubInfo) + writer.codeLength() +
   1552      (numStubFields + 1);  // +1 for the GCType::Limit terminator.
   1553  uint8_t* p = js_pod_malloc<uint8_t>(bytesNeeded);
   1554  if (!p) {
   1555    return nullptr;
   1556  }
   1557 
   1558  // Copy the CacheIR code.
   1559  uint8_t* codeStart = p + sizeof(CacheIRStubInfo);
   1560  mozilla::PodCopy(codeStart, writer.codeStart(), writer.codeLength());
   1561 
   1562  static_assert(sizeof(StubField::Type) == sizeof(uint8_t),
   1563                "StubField::Type must fit in uint8_t");
   1564 
   1565  // Copy the stub field types.
   1566  uint8_t* fieldTypes = codeStart + writer.codeLength();
   1567  for (size_t i = 0; i < numStubFields; i++) {
   1568    fieldTypes[i] = uint8_t(writer.stubFieldType(i));
   1569  }
   1570  fieldTypes[numStubFields] = uint8_t(StubField::Type::Limit);
   1571 
   1572  return new (p) CacheIRStubInfo(kind, engine, makesGCCalls, stubDataOffset,
   1573                                 writer.codeLength());
   1574 }
   1575 
   1576 bool OperandLocation::operator==(const OperandLocation& other) const {
   1577  if (kind_ != other.kind_) {
   1578    return false;
   1579  }
   1580 
   1581  switch (kind()) {
   1582    case Uninitialized:
   1583      return true;
   1584    case PayloadReg:
   1585      return payloadReg() == other.payloadReg() &&
   1586             payloadType() == other.payloadType();
   1587    case ValueReg:
   1588      return valueReg() == other.valueReg();
   1589    case PayloadStack:
   1590      return payloadStack() == other.payloadStack() &&
   1591             payloadType() == other.payloadType();
   1592    case ValueStack:
   1593      return valueStack() == other.valueStack();
   1594    case BaselineFrame:
   1595      return baselineFrameSlot() == other.baselineFrameSlot();
   1596    case Constant:
   1597      return constant() == other.constant();
   1598    case DoubleReg:
   1599      return doubleReg() == other.doubleReg();
   1600  }
   1601 
   1602  MOZ_CRASH("Invalid OperandLocation kind");
   1603 }
   1604 
   1605 AutoOutputRegister::AutoOutputRegister(CacheIRCompiler& compiler)
   1606    : output_(compiler.outputUnchecked_.ref()), alloc_(compiler.allocator) {
   1607  if (output_.hasValue()) {
   1608    alloc_.allocateFixedValueRegister(compiler.masm, output_.valueReg());
   1609  } else if (!output_.typedReg().isFloat()) {
   1610    alloc_.allocateFixedRegister(compiler.masm, output_.typedReg().gpr());
   1611  }
   1612 }
   1613 
   1614 AutoOutputRegister::~AutoOutputRegister() {
   1615  if (output_.hasValue()) {
   1616    alloc_.releaseValueRegister(output_.valueReg());
   1617  } else if (!output_.typedReg().isFloat()) {
   1618    alloc_.releaseRegister(output_.typedReg().gpr());
   1619  }
   1620 }
   1621 
   1622 bool FailurePath::canShareFailurePath(const FailurePath& other) const {
   1623  if (stackPushed_ != other.stackPushed_) {
   1624    return false;
   1625  }
   1626 
   1627  if (spilledRegs_.length() != other.spilledRegs_.length()) {
   1628    return false;
   1629  }
   1630 
   1631  for (size_t i = 0; i < spilledRegs_.length(); i++) {
   1632    if (spilledRegs_[i] != other.spilledRegs_[i]) {
   1633      return false;
   1634    }
   1635  }
   1636 
   1637  MOZ_ASSERT(inputs_.length() == other.inputs_.length());
   1638 
   1639  for (size_t i = 0; i < inputs_.length(); i++) {
   1640    if (inputs_[i] != other.inputs_[i]) {
   1641      return false;
   1642    }
   1643  }
   1644  return true;
   1645 }
   1646 
   1647 bool CacheIRCompiler::addFailurePath(FailurePath** failure) {
   1648 #ifdef DEBUG
   1649  allocator.setAddedFailurePath();
   1650 #endif
   1651  MOZ_ASSERT(!allocator.hasAutoScratchFloatRegisterSpill());
   1652 
   1653  FailurePath newFailure;
   1654  for (size_t i = 0; i < writer_.numInputOperands(); i++) {
   1655    if (!newFailure.appendInput(allocator.operandLocation(i))) {
   1656      return false;
   1657    }
   1658  }
   1659  if (!newFailure.setSpilledRegs(allocator.spilledRegs())) {
   1660    return false;
   1661  }
   1662  newFailure.setStackPushed(allocator.stackPushed());
   1663 
   1664  // Reuse the previous failure path if the current one is the same, to
   1665  // avoid emitting duplicate code.
   1666  if (failurePaths.length() > 0 &&
   1667      failurePaths.back().canShareFailurePath(newFailure)) {
   1668    *failure = &failurePaths.back();
   1669    return true;
   1670  }
   1671 
   1672  if (!failurePaths.append(std::move(newFailure))) {
   1673    return false;
   1674  }
   1675 
   1676  *failure = &failurePaths.back();
   1677  return true;
   1678 }
   1679 
   1680 bool CacheIRCompiler::emitFailurePath(size_t index) {
   1681  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1682  FailurePath& failure = failurePaths[index];
   1683 
   1684  allocator.setStackPushed(failure.stackPushed());
   1685 
   1686  for (size_t i = 0; i < writer_.numInputOperands(); i++) {
   1687    allocator.setOperandLocation(i, failure.input(i));
   1688  }
   1689 
   1690  if (!allocator.setSpilledRegs(failure.spilledRegs())) {
   1691    return false;
   1692  }
   1693 
   1694  masm.bind(failure.label());
   1695  allocator.restoreInputState(masm);
   1696  return true;
   1697 }
   1698 
   1699 bool CacheIRCompiler::emitGuardIsNumber(ValOperandId inputId) {
   1700  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1701  JSValueType knownType = allocator.knownType(inputId);
   1702 
   1703  // Doubles and ints are numbers!
   1704  if (knownType == JSVAL_TYPE_DOUBLE || knownType == JSVAL_TYPE_INT32) {
   1705    return true;
   1706  }
   1707 
   1708  ValueOperand input = allocator.useValueRegister(masm, inputId);
   1709  FailurePath* failure;
   1710  if (!addFailurePath(&failure)) {
   1711    return false;
   1712  }
   1713 
   1714  masm.branchTestNumber(Assembler::NotEqual, input, failure->label());
   1715  return true;
   1716 }
   1717 
   1718 bool CacheIRCompiler::emitGuardToObject(ValOperandId inputId) {
   1719  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1720  if (allocator.knownType(inputId) == JSVAL_TYPE_OBJECT) {
   1721    return true;
   1722  }
   1723 
   1724  ValueOperand input = allocator.useValueRegister(masm, inputId);
   1725  FailurePath* failure;
   1726  if (!addFailurePath(&failure)) {
   1727    return false;
   1728  }
   1729  masm.branchTestObject(Assembler::NotEqual, input, failure->label());
   1730  return true;
   1731 }
   1732 
   1733 bool CacheIRCompiler::emitGuardIsNullOrUndefined(ValOperandId inputId) {
   1734  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1735  JSValueType knownType = allocator.knownType(inputId);
   1736  if (knownType == JSVAL_TYPE_UNDEFINED || knownType == JSVAL_TYPE_NULL) {
   1737    return true;
   1738  }
   1739 
   1740  ValueOperand input = allocator.useValueRegister(masm, inputId);
   1741  FailurePath* failure;
   1742  if (!addFailurePath(&failure)) {
   1743    return false;
   1744  }
   1745 
   1746  Label success;
   1747  masm.branchTestNull(Assembler::Equal, input, &success);
   1748  masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
   1749 
   1750  masm.bind(&success);
   1751  return true;
   1752 }
   1753 
   1754 bool CacheIRCompiler::emitGuardIsNull(ValOperandId inputId) {
   1755  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1756  JSValueType knownType = allocator.knownType(inputId);
   1757  if (knownType == JSVAL_TYPE_NULL) {
   1758    return true;
   1759  }
   1760 
   1761  ValueOperand input = allocator.useValueRegister(masm, inputId);
   1762  FailurePath* failure;
   1763  if (!addFailurePath(&failure)) {
   1764    return false;
   1765  }
   1766 
   1767  masm.branchTestNull(Assembler::NotEqual, input, failure->label());
   1768  return true;
   1769 }
   1770 
   1771 bool CacheIRCompiler::emitGuardIsUndefined(ValOperandId inputId) {
   1772  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1773  JSValueType knownType = allocator.knownType(inputId);
   1774  if (knownType == JSVAL_TYPE_UNDEFINED) {
   1775    return true;
   1776  }
   1777 
   1778  ValueOperand input = allocator.useValueRegister(masm, inputId);
   1779  FailurePath* failure;
   1780  if (!addFailurePath(&failure)) {
   1781    return false;
   1782  }
   1783 
   1784  masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
   1785  return true;
   1786 }
   1787 
   1788 bool CacheIRCompiler::emitGuardIsNotUninitializedLexical(ValOperandId valId) {
   1789  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1790 
   1791  ValueOperand val = allocator.useValueRegister(masm, valId);
   1792 
   1793  FailurePath* failure;
   1794  if (!addFailurePath(&failure)) {
   1795    return false;
   1796  }
   1797 
   1798  masm.branchTestMagicValue(Assembler::Equal, val, JS_UNINITIALIZED_LEXICAL,
   1799                            failure->label());
   1800  return true;
   1801 }
   1802 
   1803 bool CacheIRCompiler::emitGuardBooleanToInt32(ValOperandId inputId,
   1804                                              Int32OperandId resultId) {
   1805  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1806  Register output = allocator.defineRegister(masm, resultId);
   1807 
   1808  if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
   1809    Register input =
   1810        allocator.useRegister(masm, BooleanOperandId(inputId.id()));
   1811    masm.move32(input, output);
   1812    return true;
   1813  }
   1814  ValueOperand input = allocator.useValueRegister(masm, inputId);
   1815 
   1816  FailurePath* failure;
   1817  if (!addFailurePath(&failure)) {
   1818    return false;
   1819  }
   1820 
   1821  masm.fallibleUnboxBoolean(input, output, failure->label());
   1822  return true;
   1823 }
   1824 
   1825 bool CacheIRCompiler::emitGuardToString(ValOperandId inputId) {
   1826  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1827  if (allocator.knownType(inputId) == JSVAL_TYPE_STRING) {
   1828    return true;
   1829  }
   1830 
   1831  ValueOperand input = allocator.useValueRegister(masm, inputId);
   1832  FailurePath* failure;
   1833  if (!addFailurePath(&failure)) {
   1834    return false;
   1835  }
   1836  masm.branchTestString(Assembler::NotEqual, input, failure->label());
   1837  return true;
   1838 }
   1839 
   1840 bool CacheIRCompiler::emitGuardToSymbol(ValOperandId inputId) {
   1841  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1842  if (allocator.knownType(inputId) == JSVAL_TYPE_SYMBOL) {
   1843    return true;
   1844  }
   1845 
   1846  ValueOperand input = allocator.useValueRegister(masm, inputId);
   1847  FailurePath* failure;
   1848  if (!addFailurePath(&failure)) {
   1849    return false;
   1850  }
   1851  masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
   1852  return true;
   1853 }
   1854 
   1855 bool CacheIRCompiler::emitGuardToBigInt(ValOperandId inputId) {
   1856  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1857  if (allocator.knownType(inputId) == JSVAL_TYPE_BIGINT) {
   1858    return true;
   1859  }
   1860 
   1861  ValueOperand input = allocator.useValueRegister(masm, inputId);
   1862  FailurePath* failure;
   1863  if (!addFailurePath(&failure)) {
   1864    return false;
   1865  }
   1866  masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
   1867  return true;
   1868 }
   1869 
   1870 bool CacheIRCompiler::emitGuardToBoolean(ValOperandId inputId) {
   1871  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1872 
   1873  if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
   1874    return true;
   1875  }
   1876 
   1877  ValueOperand input = allocator.useValueRegister(masm, inputId);
   1878  FailurePath* failure;
   1879  if (!addFailurePath(&failure)) {
   1880    return false;
   1881  }
   1882  masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
   1883  return true;
   1884 }
   1885 
   1886 bool CacheIRCompiler::emitGuardToInt32(ValOperandId inputId) {
   1887  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1888 
   1889  if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
   1890    return true;
   1891  }
   1892 
   1893  ValueOperand input = allocator.useValueRegister(masm, inputId);
   1894 
   1895  FailurePath* failure;
   1896  if (!addFailurePath(&failure)) {
   1897    return false;
   1898  }
   1899 
   1900  masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
   1901  return true;
   1902 }
   1903 
   1904 bool CacheIRCompiler::emitGuardToNonGCThing(ValOperandId inputId) {
   1905  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1906 
   1907  ValueOperand input = allocator.useValueRegister(masm, inputId);
   1908 
   1909  FailurePath* failure;
   1910  if (!addFailurePath(&failure)) {
   1911    return false;
   1912  }
   1913 
   1914  masm.branchTestGCThing(Assembler::Equal, input, failure->label());
   1915  return true;
   1916 }
   1917 
   1918 // Infallible |emitDouble| emitters can use this implementation to avoid
   1919 // generating extra clean-up instructions to restore the scratch float register.
   1920 // To select this function simply omit the |Label* fail| parameter for the
   1921 // emitter lambda function.
   1922 template <typename EmitDouble>
   1923 static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 1,
   1924                        void>
   1925 EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
   1926                ValueOperand input, FailurePath* failure,
   1927                EmitDouble emitDouble) {
   1928  AutoScratchFloatRegister floatReg(compiler);
   1929 
   1930  masm.unboxDouble(input, floatReg);
   1931  emitDouble(floatReg.get());
   1932 }
   1933 
   1934 template <typename EmitDouble>
   1935 static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 2,
   1936                        void>
   1937 EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
   1938                ValueOperand input, FailurePath* failure,
   1939                EmitDouble emitDouble) {
   1940  AutoScratchFloatRegister floatReg(compiler, failure);
   1941 
   1942  masm.unboxDouble(input, floatReg);
   1943  emitDouble(floatReg.get(), floatReg.failure());
   1944 }
   1945 
   1946 template <typename EmitInt32, typename EmitDouble>
   1947 static void EmitGuardInt32OrDouble(CacheIRCompiler* compiler,
   1948                                   MacroAssembler& masm, ValueOperand input,
   1949                                   Register output, FailurePath* failure,
   1950                                   EmitInt32 emitInt32, EmitDouble emitDouble) {
   1951  Label done;
   1952 
   1953  {
   1954    ScratchTagScope tag(masm, input);
   1955    masm.splitTagForTest(input, tag);
   1956 
   1957    Label notInt32;
   1958    masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
   1959    {
   1960      ScratchTagScopeRelease _(&tag);
   1961 
   1962      masm.unboxInt32(input, output);
   1963      emitInt32();
   1964 
   1965      masm.jump(&done);
   1966    }
   1967    masm.bind(&notInt32);
   1968 
   1969    masm.branchTestDouble(Assembler::NotEqual, tag, failure->label());
   1970    {
   1971      ScratchTagScopeRelease _(&tag);
   1972 
   1973      EmitGuardDouble(compiler, masm, input, failure, emitDouble);
   1974    }
   1975  }
   1976 
   1977  masm.bind(&done);
   1978 }
   1979 
   1980 bool CacheIRCompiler::emitGuardToInt32Index(ValOperandId inputId,
   1981                                            Int32OperandId resultId) {
   1982  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   1983  Register output = allocator.defineRegister(masm, resultId);
   1984 
   1985  if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
   1986    Register input = allocator.useRegister(masm, Int32OperandId(inputId.id()));
   1987    masm.move32(input, output);
   1988    return true;
   1989  }
   1990 
   1991  ValueOperand input = allocator.useValueRegister(masm, inputId);
   1992 
   1993  FailurePath* failure;
   1994  if (!addFailurePath(&failure)) {
   1995    return false;
   1996  }
   1997 
   1998  EmitGuardInt32OrDouble(
   1999      this, masm, input, output, failure,
   2000      []() {
   2001        // No-op if the value is already an int32.
   2002      },
   2003      [&](FloatRegister floatReg, Label* fail) {
   2004        // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
   2005        masm.convertDoubleToInt32(floatReg, output, fail, false);
   2006      });
   2007 
   2008  return true;
   2009 }
   2010 
   2011 bool CacheIRCompiler::emitInt32ToIntPtr(Int32OperandId inputId,
   2012                                        IntPtrOperandId resultId) {
   2013  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2014 
   2015  Register input = allocator.useRegister(masm, inputId);
   2016  Register output = allocator.defineRegister(masm, resultId);
   2017 
   2018  masm.move32SignExtendToPtr(input, output);
   2019  return true;
   2020 }
   2021 
   2022 bool CacheIRCompiler::emitGuardNumberToIntPtrIndex(NumberOperandId inputId,
   2023                                                   bool supportOOB,
   2024                                                   IntPtrOperandId resultId) {
   2025  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2026 
   2027  Register output = allocator.defineRegister(masm, resultId);
   2028 
   2029  FailurePath* failure = nullptr;
   2030  if (!supportOOB) {
   2031    if (!addFailurePath(&failure)) {
   2032      return false;
   2033    }
   2034  }
   2035 
   2036  AutoScratchFloatRegister floatReg(this, failure);
   2037  allocator.ensureDoubleRegister(masm, inputId, floatReg);
   2038 
   2039  // ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
   2040  if (supportOOB) {
   2041    Label done, fail;
   2042    masm.convertDoubleToPtr(floatReg, output, &fail, false);
   2043    masm.jump(&done);
   2044 
   2045    // Substitute the invalid index with an arbitrary out-of-bounds index.
   2046    masm.bind(&fail);
   2047    masm.movePtr(ImmWord(-1), output);
   2048 
   2049    masm.bind(&done);
   2050  } else {
   2051    masm.convertDoubleToPtr(floatReg, output, floatReg.failure(), false);
   2052  }
   2053 
   2054  return true;
   2055 }
   2056 
   2057 static void TruncateDoubleModUint32(MacroAssembler& masm,
   2058                                    FloatRegister floatReg, Register result,
   2059                                    const LiveRegisterSet& liveVolatileRegs) {
   2060  Label truncateABICall;
   2061  masm.branchTruncateDoubleMaybeModUint32(floatReg, result, &truncateABICall);
   2062 
   2063  if (truncateABICall.used()) {
   2064    Label done;
   2065    masm.jump(&done);
   2066 
   2067    masm.bind(&truncateABICall);
   2068    LiveRegisterSet save = liveVolatileRegs;
   2069    save.takeUnchecked(floatReg);
   2070    // Bug 1451976
   2071    save.takeUnchecked(floatReg.asSingle());
   2072    masm.PushRegsInMask(save);
   2073 
   2074    using Fn = int32_t (*)(double);
   2075    masm.setupUnalignedABICall(result);
   2076    masm.passABIArg(floatReg, ABIType::Float64);
   2077    masm.callWithABI<Fn, JS::ToInt32>(ABIType::General,
   2078                                      CheckUnsafeCallWithABI::DontCheckOther);
   2079    masm.storeCallInt32Result(result);
   2080 
   2081    LiveRegisterSet ignore;
   2082    ignore.add(result);
   2083    masm.PopRegsInMaskIgnore(save, ignore);
   2084 
   2085    masm.bind(&done);
   2086  }
   2087 }
   2088 
   2089 bool CacheIRCompiler::emitGuardToInt32ModUint32(ValOperandId inputId,
   2090                                                Int32OperandId resultId) {
   2091  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2092  Register output = allocator.defineRegister(masm, resultId);
   2093 
   2094  if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
   2095    ConstantOrRegister input = allocator.useConstantOrRegister(masm, inputId);
   2096    if (input.constant()) {
   2097      masm.move32(Imm32(input.value().toInt32()), output);
   2098    } else {
   2099      MOZ_ASSERT(input.reg().type() == MIRType::Int32);
   2100      masm.move32(input.reg().typedReg().gpr(), output);
   2101    }
   2102    return true;
   2103  }
   2104 
   2105  ValueOperand input = allocator.useValueRegister(masm, inputId);
   2106 
   2107  FailurePath* failure;
   2108  if (!addFailurePath(&failure)) {
   2109    return false;
   2110  }
   2111 
   2112  EmitGuardInt32OrDouble(
   2113      this, masm, input, output, failure,
   2114      []() {
   2115        // No-op if the value is already an int32.
   2116      },
   2117      [&](FloatRegister floatReg) {
   2118        TruncateDoubleModUint32(masm, floatReg, output, liveVolatileRegs());
   2119      });
   2120 
   2121  return true;
   2122 }
   2123 
   2124 bool CacheIRCompiler::emitGuardToUint8Clamped(ValOperandId inputId,
   2125                                              Int32OperandId resultId) {
   2126  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2127  Register output = allocator.defineRegister(masm, resultId);
   2128 
   2129  if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
   2130    ConstantOrRegister input = allocator.useConstantOrRegister(masm, inputId);
   2131    if (input.constant()) {
   2132      masm.move32(Imm32(ClampDoubleToUint8(input.value().toInt32())), output);
   2133    } else {
   2134      MOZ_ASSERT(input.reg().type() == MIRType::Int32);
   2135      masm.move32(input.reg().typedReg().gpr(), output);
   2136      masm.clampIntToUint8(output);
   2137    }
   2138    return true;
   2139  }
   2140 
   2141  ValueOperand input = allocator.useValueRegister(masm, inputId);
   2142 
   2143  FailurePath* failure;
   2144  if (!addFailurePath(&failure)) {
   2145    return false;
   2146  }
   2147 
   2148  EmitGuardInt32OrDouble(
   2149      this, masm, input, output, failure,
   2150      [&]() {
   2151        // |output| holds the unboxed int32 value.
   2152        masm.clampIntToUint8(output);
   2153      },
   2154      [&](FloatRegister floatReg) {
   2155        masm.clampDoubleToUint8(floatReg, output);
   2156      });
   2157 
   2158  return true;
   2159 }
   2160 
   2161 bool CacheIRCompiler::emitGuardNonDoubleType(ValOperandId inputId,
   2162                                             ValueType type) {
   2163  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2164 
   2165  if (allocator.knownType(inputId) == JSValueType(type)) {
   2166    return true;
   2167  }
   2168 
   2169  ValueOperand input = allocator.useValueRegister(masm, inputId);
   2170 
   2171  FailurePath* failure;
   2172  if (!addFailurePath(&failure)) {
   2173    return false;
   2174  }
   2175 
   2176  switch (type) {
   2177    case ValueType::String:
   2178      masm.branchTestString(Assembler::NotEqual, input, failure->label());
   2179      break;
   2180    case ValueType::Symbol:
   2181      masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
   2182      break;
   2183    case ValueType::BigInt:
   2184      masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
   2185      break;
   2186    case ValueType::Int32:
   2187      masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
   2188      break;
   2189    case ValueType::Boolean:
   2190      masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
   2191      break;
   2192    case ValueType::Undefined:
   2193      masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
   2194      break;
   2195    case ValueType::Null:
   2196      masm.branchTestNull(Assembler::NotEqual, input, failure->label());
   2197      break;
   2198    case ValueType::Double:
   2199    case ValueType::Magic:
   2200    case ValueType::PrivateGCThing:
   2201    case ValueType::Object:
   2202      MOZ_CRASH("unexpected type");
   2203  }
   2204 
   2205  return true;
   2206 }
   2207 
   2208 static const JSClass* ClassFor(JSContext* cx, GuardClassKind kind) {
   2209  switch (kind) {
   2210    case GuardClassKind::Array:
   2211    case GuardClassKind::PlainObject:
   2212    case GuardClassKind::FixedLengthArrayBuffer:
   2213    case GuardClassKind::ImmutableArrayBuffer:
   2214    case GuardClassKind::ResizableArrayBuffer:
   2215    case GuardClassKind::FixedLengthSharedArrayBuffer:
   2216    case GuardClassKind::GrowableSharedArrayBuffer:
   2217    case GuardClassKind::FixedLengthDataView:
   2218    case GuardClassKind::ImmutableDataView:
   2219    case GuardClassKind::ResizableDataView:
   2220    case GuardClassKind::MappedArguments:
   2221    case GuardClassKind::UnmappedArguments:
   2222    case GuardClassKind::Set:
   2223    case GuardClassKind::Map:
   2224    case GuardClassKind::BoundFunction:
   2225    case GuardClassKind::Date:
   2226    case GuardClassKind::WeakMap:
   2227    case GuardClassKind::WeakSet:
   2228      return ClassFor(kind);
   2229    case GuardClassKind::WindowProxy:
   2230      return cx->runtime()->maybeWindowProxyClass();
   2231    case GuardClassKind::JSFunction:
   2232      MOZ_CRASH("must be handled by caller");
   2233  }
   2234  MOZ_CRASH("unexpected kind");
   2235 }
   2236 
   2237 bool CacheIRCompiler::emitGuardClass(ObjOperandId objId, GuardClassKind kind) {
   2238  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2239  Register obj = allocator.useRegister(masm, objId);
   2240  AutoScratchRegister scratch(allocator, masm);
   2241 
   2242  FailurePath* failure;
   2243  if (!addFailurePath(&failure)) {
   2244    return false;
   2245  }
   2246 
   2247  if (kind == GuardClassKind::JSFunction) {
   2248    if (objectGuardNeedsSpectreMitigations(objId)) {
   2249      masm.branchTestObjIsFunction(Assembler::NotEqual, obj, scratch, obj,
   2250                                   failure->label());
   2251    } else {
   2252      masm.branchTestObjIsFunctionNoSpectreMitigations(
   2253          Assembler::NotEqual, obj, scratch, failure->label());
   2254    }
   2255    return true;
   2256  }
   2257 
   2258  const JSClass* clasp = ClassFor(cx_, kind);
   2259  MOZ_ASSERT(clasp);
   2260 
   2261  if (objectGuardNeedsSpectreMitigations(objId)) {
   2262    masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj,
   2263                            failure->label());
   2264  } else {
   2265    masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
   2266                                                scratch, failure->label());
   2267  }
   2268 
   2269  return true;
   2270 }
   2271 
   2272 bool CacheIRCompiler::emitGuardNullProto(ObjOperandId objId) {
   2273  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2274  Register obj = allocator.useRegister(masm, objId);
   2275  AutoScratchRegister scratch(allocator, masm);
   2276 
   2277  FailurePath* failure;
   2278  if (!addFailurePath(&failure)) {
   2279    return false;
   2280  }
   2281 
   2282  masm.loadObjProto(obj, scratch);
   2283  masm.branchTestPtr(Assembler::NonZero, scratch, scratch, failure->label());
   2284  return true;
   2285 }
   2286 
   2287 bool CacheIRCompiler::emitGuardIsExtensible(ObjOperandId objId) {
   2288  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2289  Register obj = allocator.useRegister(masm, objId);
   2290  AutoScratchRegister scratch(allocator, masm);
   2291 
   2292  FailurePath* failure;
   2293  if (!addFailurePath(&failure)) {
   2294    return false;
   2295  }
   2296 
   2297  masm.branchIfObjectNotExtensible(obj, scratch, failure->label());
   2298  return true;
   2299 }
   2300 
   2301 bool CacheIRCompiler::emitGuardDynamicSlotIsSpecificObject(
   2302    ObjOperandId objId, ObjOperandId expectedId, uint32_t slotOffset) {
   2303  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2304  Register obj = allocator.useRegister(masm, objId);
   2305  Register expectedObject = allocator.useRegister(masm, expectedId);
   2306 
   2307  // Allocate registers before the failure path to make sure they're registered
   2308  // by addFailurePath.
   2309  AutoScratchRegister scratch1(allocator, masm);
   2310  AutoScratchRegister scratch2(allocator, masm);
   2311 
   2312  FailurePath* failure;
   2313  if (!addFailurePath(&failure)) {
   2314    return false;
   2315  }
   2316 
   2317  // Guard on the expected object.
   2318  StubFieldOffset slot(slotOffset, StubField::Type::RawInt32);
   2319  masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
   2320  emitLoadStubField(slot, scratch2);
   2321  BaseObjectSlotIndex expectedSlot(scratch1, scratch2);
   2322  masm.fallibleUnboxObject(expectedSlot, scratch1, failure->label());
   2323  masm.branchPtr(Assembler::NotEqual, expectedObject, scratch1,
   2324                 failure->label());
   2325 
   2326  return true;
   2327 }
   2328 
   2329 bool CacheIRCompiler::emitGuardDynamicSlotIsNotObject(ObjOperandId objId,
   2330                                                      uint32_t slotOffset) {
   2331  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2332  Register obj = allocator.useRegister(masm, objId);
   2333 
   2334  AutoScratchRegister scratch1(allocator, masm);
   2335  AutoScratchRegister scratch2(allocator, masm);
   2336 
   2337  FailurePath* failure;
   2338  if (!addFailurePath(&failure)) {
   2339    return false;
   2340  }
   2341 
   2342  // Guard that the slot isn't an object.
   2343  StubFieldOffset slot(slotOffset, StubField::Type::RawInt32);
   2344  masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
   2345  emitLoadStubField(slot, scratch2);
   2346  BaseObjectSlotIndex expectedSlot(scratch1, scratch2);
   2347  masm.branchTestObject(Assembler::Equal, expectedSlot, failure->label());
   2348 
   2349  return true;
   2350 }
   2351 
   2352 bool CacheIRCompiler::emitGuardFixedSlotValue(ObjOperandId objId,
   2353                                              uint32_t offsetOffset,
   2354                                              uint32_t valOffset) {
   2355  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2356 
   2357  Register obj = allocator.useRegister(masm, objId);
   2358 
   2359  AutoScratchRegister scratch(allocator, masm);
   2360  AutoScratchValueRegister scratchVal(allocator, masm);
   2361 
   2362  FailurePath* failure;
   2363  if (!addFailurePath(&failure)) {
   2364    return false;
   2365  }
   2366 
   2367  StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
   2368  emitLoadStubField(offset, scratch);
   2369 
   2370  StubFieldOffset val(valOffset, StubField::Type::Value);
   2371  emitLoadValueStubField(val, scratchVal);
   2372 
   2373  BaseIndex slotVal(obj, scratch, TimesOne);
   2374  masm.branchTestValue(Assembler::NotEqual, slotVal, scratchVal,
   2375                       failure->label());
   2376  return true;
   2377 }
   2378 
   2379 bool CacheIRCompiler::emitGuardDynamicSlotValue(ObjOperandId objId,
   2380                                                uint32_t offsetOffset,
   2381                                                uint32_t valOffset) {
   2382  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2383 
   2384  Register obj = allocator.useRegister(masm, objId);
   2385 
   2386  AutoScratchRegister scratch1(allocator, masm);
   2387  AutoScratchRegister scratch2(allocator, masm);
   2388  AutoScratchValueRegister scratchVal(allocator, masm);
   2389 
   2390  FailurePath* failure;
   2391  if (!addFailurePath(&failure)) {
   2392    return false;
   2393  }
   2394 
   2395  masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
   2396 
   2397  StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
   2398  emitLoadStubField(offset, scratch2);
   2399 
   2400  StubFieldOffset val(valOffset, StubField::Type::Value);
   2401  emitLoadValueStubField(val, scratchVal);
   2402 
   2403  BaseIndex slotVal(scratch1, scratch2, TimesOne);
   2404  masm.branchTestValue(Assembler::NotEqual, slotVal, scratchVal,
   2405                       failure->label());
   2406  return true;
   2407 }
   2408 
   2409 bool CacheIRCompiler::emitCheckWeakValueResultForFixedSlot(
   2410    ObjOperandId objId, uint32_t offsetOffset, uint32_t valOffset) {
   2411  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2412 
   2413  // Ion IC stubs use a strong reference for UncheckedLoadWeakValueResult and
   2414  // UncheckedLoadWeakObjectResult (the value is baked into IC JIT code as a
   2415  // constant), so we don't need this check there.
   2416  if (isIon()) {
   2417    return true;
   2418  }
   2419 
   2420  Register obj = allocator.useRegister(masm, objId);
   2421  AutoScratchRegister scratch(allocator, masm);
   2422  AutoScratchValueRegister scratchVal(allocator, masm);
   2423 
   2424  StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
   2425  emitLoadStubField(offset, scratch);
   2426 
   2427  StubFieldOffset val(valOffset, StubField::Type::WeakValue);
   2428  emitLoadValueStubField(val, scratchVal);
   2429 
   2430  Label done;
   2431  BaseIndex slotVal(obj, scratch, TimesOne);
   2432  masm.branchTestValue(Assembler::Equal, slotVal, scratchVal, &done);
   2433  masm.assumeUnreachable("CheckWeakValueResultForFixedSlot failed");
   2434  masm.bind(&done);
   2435  return true;
   2436 }
   2437 
   2438 bool CacheIRCompiler::emitCheckWeakValueResultForDynamicSlot(
   2439    ObjOperandId objId, uint32_t offsetOffset, uint32_t valOffset) {
   2440  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2441 
   2442  // Ion IC stubs use a strong reference for UncheckedLoadWeakValueResult and
   2443  // UncheckedLoadWeakObjectResult (the value is baked into IC JIT code as a
   2444  // constant), so we don't need this check there.
   2445  if (isIon()) {
   2446    return true;
   2447  }
   2448 
   2449  Register obj = allocator.useRegister(masm, objId);
   2450 
   2451  AutoScratchRegister scratch1(allocator, masm);
   2452  AutoScratchRegister scratch2(allocator, masm);
   2453  AutoScratchValueRegister scratchVal(allocator, masm);
   2454 
   2455  masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
   2456 
   2457  StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
   2458  emitLoadStubField(offset, scratch2);
   2459 
   2460  StubFieldOffset val(valOffset, StubField::Type::WeakValue);
   2461  emitLoadValueStubField(val, scratchVal);
   2462 
   2463  Label done;
   2464  BaseIndex slotVal(scratch1, scratch2, TimesOne);
   2465  masm.branchTestValue(Assembler::Equal, slotVal, scratchVal, &done);
   2466  masm.assumeUnreachable("CheckWeakValueResultForDynamicSlot failed");
   2467  masm.bind(&done);
   2468  return true;
   2469 }
   2470 
   2471 bool CacheIRCompiler::emitLoadScriptedProxyHandler(ObjOperandId resultId,
   2472                                                   ObjOperandId objId) {
   2473  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2474 
   2475  Register obj = allocator.useRegister(masm, objId);
   2476  Register output = allocator.defineRegister(masm, resultId);
   2477 
   2478  FailurePath* failure;
   2479  if (!addFailurePath(&failure)) {
   2480    return false;
   2481  }
   2482 
   2483  masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), output);
   2484  Address handlerAddr(output, js::detail::ProxyReservedSlots::offsetOfSlot(
   2485                                  ScriptedProxyHandler::HANDLER_EXTRA));
   2486  masm.fallibleUnboxObject(handlerAddr, output, failure->label());
   2487 
   2488  return true;
   2489 }
   2490 
   2491 bool CacheIRCompiler::emitIdToStringOrSymbol(ValOperandId resultId,
   2492                                             ValOperandId idId) {
   2493  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2494 
   2495  ValueOperand id = allocator.useValueRegister(masm, idId);
   2496  ValueOperand output = allocator.defineValueRegister(masm, resultId);
   2497  AutoScratchRegister scratch(allocator, masm);
   2498 
   2499  FailurePath* failure;
   2500  if (!addFailurePath(&failure)) {
   2501    return false;
   2502  }
   2503 
   2504  masm.moveValue(id, output);
   2505 
   2506  Label done, intDone, callVM;
   2507  {
   2508    ScratchTagScope tag(masm, output);
   2509    masm.splitTagForTest(output, tag);
   2510    masm.branchTestString(Assembler::Equal, tag, &done);
   2511    masm.branchTestSymbol(Assembler::Equal, tag, &done);
   2512    masm.branchTestInt32(Assembler::NotEqual, tag, failure->label());
   2513  }
   2514 
   2515  Register intReg = output.scratchReg();
   2516  masm.unboxInt32(output, intReg);
   2517 
   2518  // Fast path for small integers.
   2519  masm.lookupStaticIntString(intReg, intReg, scratch, cx_->staticStrings(),
   2520                             &callVM);
   2521  masm.jump(&intDone);
   2522 
   2523  masm.bind(&callVM);
   2524  LiveRegisterSet volatileRegs = liveVolatileRegs();
   2525  masm.PushRegsInMask(volatileRegs);
   2526 
   2527  using Fn = JSLinearString* (*)(JSContext * cx, int32_t i);
   2528  masm.setupUnalignedABICall(scratch);
   2529  masm.loadJSContext(scratch);
   2530  masm.passABIArg(scratch);
   2531  masm.passABIArg(intReg);
   2532  masm.callWithABI<Fn, js::Int32ToStringPure>();
   2533 
   2534  masm.storeCallPointerResult(intReg);
   2535 
   2536  LiveRegisterSet ignore;
   2537  ignore.add(intReg);
   2538  masm.PopRegsInMaskIgnore(volatileRegs, ignore);
   2539 
   2540  masm.branchPtr(Assembler::Equal, intReg, ImmPtr(nullptr), failure->label());
   2541 
   2542  masm.bind(&intDone);
   2543  masm.tagValue(JSVAL_TYPE_STRING, intReg, output);
   2544  masm.bind(&done);
   2545 
   2546  return true;
   2547 }
   2548 
   2549 bool CacheIRCompiler::emitLoadFixedSlot(ValOperandId resultId,
   2550                                        ObjOperandId objId,
   2551                                        uint32_t offsetOffset) {
   2552  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2553 
   2554  ValueOperand output = allocator.defineValueRegister(masm, resultId);
   2555  Register obj = allocator.useRegister(masm, objId);
   2556  AutoScratchRegister scratch(allocator, masm);
   2557 
   2558  StubFieldOffset slotIndex(offsetOffset, StubField::Type::RawInt32);
   2559  emitLoadStubField(slotIndex, scratch);
   2560 
   2561  masm.loadValue(BaseIndex(obj, scratch, TimesOne), output);
   2562  return true;
   2563 }
   2564 
   2565 bool CacheIRCompiler::emitLoadFixedSlotFromOffsetResult(
   2566    ObjOperandId objId, Int32OperandId offsetId) {
   2567  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2568  AutoOutputRegister output(*this);
   2569  Register obj = allocator.useRegister(masm, objId);
   2570  Register offset = allocator.useRegister(masm, offsetId);
   2571 
   2572  // Load the value at the offset reg.
   2573  masm.loadValue(BaseIndex(obj, offset, TimesOne), output.valueReg());
   2574  return true;
   2575 }
   2576 
   2577 bool CacheIRCompiler::emitStoreFixedSlotFromOffset(ObjOperandId objId,
   2578                                                   Int32OperandId offsetId,
   2579                                                   ValOperandId rhsId) {
   2580  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2581 
   2582  AutoScratchRegister scratch(allocator, masm);
   2583  Register obj = allocator.useRegister(masm, objId);
   2584  Register offset = allocator.useRegister(masm, offsetId);
   2585  ValueOperand val = allocator.useValueRegister(masm, rhsId);
   2586 
   2587  BaseIndex slot(obj, offset, TimesOne);
   2588  EmitPreBarrier(masm, slot, MIRType::Value);
   2589 
   2590  masm.storeValue(val, slot);
   2591 
   2592  emitPostBarrierSlot(obj, val, scratch);
   2593 
   2594  return true;
   2595 }
   2596 
   2597 bool CacheIRCompiler::emitStoreDynamicSlotFromOffset(ObjOperandId objId,
   2598                                                     Int32OperandId offsetId,
   2599                                                     ValOperandId rhsId) {
   2600  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2601 
   2602  Register obj = allocator.useRegister(masm, objId);
   2603  Register offset = allocator.useRegister(masm, offsetId);
   2604  ValueOperand val = allocator.useValueRegister(masm, rhsId);
   2605 
   2606  AutoScratchRegister slots(allocator, masm);
   2607 
   2608  masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), slots);
   2609  BaseIndex slot(slots, offset, TimesOne);
   2610  EmitPreBarrier(masm, slot, MIRType::Value);
   2611  masm.storeValue(val, slot);
   2612 
   2613  emitPostBarrierSlot(obj, val, /*scratch=*/slots);
   2614  return true;
   2615 }
   2616 
   2617 bool CacheIRCompiler::emitLoadDynamicSlot(ValOperandId resultId,
   2618                                          ObjOperandId objId,
   2619                                          uint32_t slotOffset) {
   2620  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2621 
   2622  ValueOperand output = allocator.defineValueRegister(masm, resultId);
   2623  Register obj = allocator.useRegister(masm, objId);
   2624  AutoScratchRegister scratch1(allocator, masm);
   2625  Register scratch2 = output.scratchReg();
   2626 
   2627  StubFieldOffset slotIndex(slotOffset, StubField::Type::RawInt32);
   2628  emitLoadStubField(slotIndex, scratch2);
   2629 
   2630  masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
   2631  masm.loadValue(BaseObjectSlotIndex(scratch1, scratch2), output);
   2632  return true;
   2633 }
   2634 
   2635 bool CacheIRCompiler::emitLoadDynamicSlotFromOffsetResult(
   2636    ObjOperandId objId, Int32OperandId offsetId) {
   2637  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2638  AutoOutputRegister output(*this);
   2639  Register obj = allocator.useRegister(masm, objId);
   2640  Register offset = allocator.useRegister(masm, offsetId);
   2641  AutoScratchRegister scratch(allocator, masm);
   2642 
   2643  // obj->slots
   2644  masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
   2645  masm.loadValue(BaseIndex(scratch, offset, TimesOne), output.valueReg());
   2646  return true;
   2647 }
   2648 
   2649 bool CacheIRCompiler::emitGuardIsNativeObject(ObjOperandId objId) {
   2650  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2651 
   2652  Register obj = allocator.useRegister(masm, objId);
   2653  AutoScratchRegister scratch(allocator, masm);
   2654 
   2655  FailurePath* failure;
   2656  if (!addFailurePath(&failure)) {
   2657    return false;
   2658  }
   2659 
   2660  masm.branchIfNonNativeObj(obj, scratch, failure->label());
   2661  return true;
   2662 }
   2663 
   2664 bool CacheIRCompiler::emitGuardIsProxy(ObjOperandId objId) {
   2665  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2666 
   2667  Register obj = allocator.useRegister(masm, objId);
   2668  AutoScratchRegister scratch(allocator, masm);
   2669 
   2670  FailurePath* failure;
   2671  if (!addFailurePath(&failure)) {
   2672    return false;
   2673  }
   2674 
   2675  masm.branchTestObjectIsProxy(false, obj, scratch, failure->label());
   2676  return true;
   2677 }
   2678 
   2679 bool CacheIRCompiler::emitGuardIsNotProxy(ObjOperandId objId) {
   2680  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2681 
   2682  Register obj = allocator.useRegister(masm, objId);
   2683  AutoScratchRegister scratch(allocator, masm);
   2684 
   2685  FailurePath* failure;
   2686  if (!addFailurePath(&failure)) {
   2687    return false;
   2688  }
   2689 
   2690  masm.branchTestObjectIsProxy(true, obj, scratch, failure->label());
   2691  return true;
   2692 }
   2693 
   2694 bool CacheIRCompiler::emitGuardToArrayBuffer(ObjOperandId objId) {
   2695  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2696 
   2697  Register obj = allocator.useRegister(masm, objId);
   2698  AutoScratchRegister scratch(allocator, masm);
   2699 
   2700  FailurePath* failure;
   2701  if (!addFailurePath(&failure)) {
   2702    return false;
   2703  }
   2704 
   2705  masm.branchIfIsNotArrayBuffer(obj, scratch, failure->label());
   2706  return true;
   2707 }
   2708 
   2709 bool CacheIRCompiler::emitGuardToSharedArrayBuffer(ObjOperandId objId) {
   2710  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2711 
   2712  Register obj = allocator.useRegister(masm, objId);
   2713  AutoScratchRegister scratch(allocator, masm);
   2714 
   2715  FailurePath* failure;
   2716  if (!addFailurePath(&failure)) {
   2717    return false;
   2718  }
   2719 
   2720  masm.branchIfIsNotSharedArrayBuffer(obj, scratch, failure->label());
   2721  return true;
   2722 }
   2723 
   2724 bool CacheIRCompiler::emitGuardIsNotArrayBufferMaybeShared(ObjOperandId objId) {
   2725  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2726 
   2727  Register obj = allocator.useRegister(masm, objId);
   2728  AutoScratchRegister scratch(allocator, masm);
   2729 
   2730  FailurePath* failure;
   2731  if (!addFailurePath(&failure)) {
   2732    return false;
   2733  }
   2734 
   2735  masm.branchIfIsArrayBufferMaybeShared(obj, scratch, failure->label());
   2736  return true;
   2737 }
   2738 
   2739 bool CacheIRCompiler::emitGuardIsTypedArray(ObjOperandId objId) {
   2740  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2741 
   2742  Register obj = allocator.useRegister(masm, objId);
   2743  AutoScratchRegister scratch(allocator, masm);
   2744 
   2745  FailurePath* failure;
   2746  if (!addFailurePath(&failure)) {
   2747    return false;
   2748  }
   2749 
   2750  masm.loadObjClassUnsafe(obj, scratch);
   2751  masm.branchIfClassIsNotTypedArray(scratch, failure->label());
   2752  return true;
   2753 }
   2754 
   2755 bool CacheIRCompiler::emitGuardIsNonResizableTypedArray(ObjOperandId objId) {
   2756  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2757 
   2758  Register obj = allocator.useRegister(masm, objId);
   2759  AutoScratchRegister scratch(allocator, masm);
   2760 
   2761  FailurePath* failure;
   2762  if (!addFailurePath(&failure)) {
   2763    return false;
   2764  }
   2765 
   2766  masm.loadObjClassUnsafe(obj, scratch);
   2767  masm.branchIfClassIsNotNonResizableTypedArray(scratch, failure->label());
   2768  return true;
   2769 }
   2770 
   2771 bool CacheIRCompiler::emitGuardIsResizableTypedArray(ObjOperandId objId) {
   2772  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2773 
   2774  Register obj = allocator.useRegister(masm, objId);
   2775  AutoScratchRegister scratch(allocator, masm);
   2776 
   2777  FailurePath* failure;
   2778  if (!addFailurePath(&failure)) {
   2779    return false;
   2780  }
   2781 
   2782  masm.loadObjClassUnsafe(obj, scratch);
   2783  masm.branchIfClassIsNotResizableTypedArray(scratch, failure->label());
   2784  return true;
   2785 }
   2786 
   2787 bool CacheIRCompiler::emitGuardIsNotDOMProxy(ObjOperandId objId) {
   2788  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2789  Register obj = allocator.useRegister(masm, objId);
   2790  AutoScratchRegister scratch(allocator, masm);
   2791 
   2792  FailurePath* failure;
   2793  if (!addFailurePath(&failure)) {
   2794    return false;
   2795  }
   2796 
   2797  masm.branchTestProxyHandlerFamily(Assembler::Equal, obj, scratch,
   2798                                    GetDOMProxyHandlerFamily(),
   2799                                    failure->label());
   2800  return true;
   2801 }
   2802 
   2803 bool CacheIRCompiler::emitGuardNoDenseElements(ObjOperandId objId) {
   2804  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2805  Register obj = allocator.useRegister(masm, objId);
   2806  AutoScratchRegister scratch(allocator, masm);
   2807 
   2808  FailurePath* failure;
   2809  if (!addFailurePath(&failure)) {
   2810    return false;
   2811  }
   2812 
   2813  // Load obj->elements.
   2814  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
   2815 
   2816  // Make sure there are no dense elements.
   2817  Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
   2818  masm.branch32(Assembler::NotEqual, initLength, Imm32(0), failure->label());
   2819  return true;
   2820 }
   2821 
   2822 bool CacheIRCompiler::emitGuardSpecificInt32(Int32OperandId numId,
   2823                                             int32_t expected) {
   2824  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2825  Register num = allocator.useRegister(masm, numId);
   2826 
   2827  FailurePath* failure;
   2828  if (!addFailurePath(&failure)) {
   2829    return false;
   2830  }
   2831 
   2832  masm.branch32(Assembler::NotEqual, num, Imm32(expected), failure->label());
   2833  return true;
   2834 }
   2835 
   2836 bool CacheIRCompiler::emitGuardStringToInt32(StringOperandId strId,
   2837                                             Int32OperandId resultId) {
   2838  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2839  Register str = allocator.useRegister(masm, strId);
   2840  Register output = allocator.defineRegister(masm, resultId);
   2841  AutoScratchRegister scratch(allocator, masm);
   2842 
   2843  FailurePath* failure;
   2844  if (!addFailurePath(&failure)) {
   2845    return false;
   2846  }
   2847 
   2848  masm.guardStringToInt32(str, output, scratch, liveVolatileRegs(),
   2849                          failure->label());
   2850  return true;
   2851 }
   2852 
   2853 bool CacheIRCompiler::emitGuardStringToNumber(StringOperandId strId,
   2854                                              NumberOperandId resultId) {
   2855  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2856  Register str = allocator.useRegister(masm, strId);
   2857  ValueOperand output = allocator.defineValueRegister(masm, resultId);
   2858  AutoScratchRegister scratch(allocator, masm);
   2859 
   2860  FailurePath* failure;
   2861  if (!addFailurePath(&failure)) {
   2862    return false;
   2863  }
   2864 
   2865  Label vmCall, done;
   2866  // Use indexed value as fast path if possible.
   2867  masm.loadStringIndexValue(str, scratch, &vmCall);
   2868  masm.tagValue(JSVAL_TYPE_INT32, scratch, output);
   2869  masm.jump(&done);
   2870  {
   2871    masm.bind(&vmCall);
   2872 
   2873    // Reserve stack for holding the result value of the call.
   2874    masm.reserveStack(sizeof(double));
   2875    masm.moveStackPtrTo(output.payloadOrValueReg());
   2876 
   2877    // We cannot use callVM, as callVM expects to be able to clobber all
   2878    // operands, however, since this op is not the last in the generated IC, we
   2879    // want to be able to reference other live values.
   2880    LiveRegisterSet volatileRegs = liveVolatileRegs();
   2881    masm.PushRegsInMask(volatileRegs);
   2882 
   2883    using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
   2884    masm.setupUnalignedABICall(scratch);
   2885    masm.loadJSContext(scratch);
   2886    masm.passABIArg(scratch);
   2887    masm.passABIArg(str);
   2888    masm.passABIArg(output.payloadOrValueReg());
   2889    masm.callWithABI<Fn, js::StringToNumberPure>();
   2890    masm.storeCallPointerResult(scratch);
   2891 
   2892    LiveRegisterSet ignore;
   2893    ignore.add(scratch);
   2894    masm.PopRegsInMaskIgnore(volatileRegs, ignore);
   2895 
   2896    Label ok;
   2897    masm.branchIfTrueBool(scratch, &ok);
   2898    {
   2899      // OOM path, recovered by StringToNumberPure.
   2900      //
   2901      // Use addToStackPtr instead of freeStack as freeStack tracks stack height
   2902      // flow-insensitively, and using it twice would confuse the stack height
   2903      // tracking.
   2904      masm.addToStackPtr(Imm32(sizeof(double)));
   2905      masm.jump(failure->label());
   2906    }
   2907    masm.bind(&ok);
   2908 
   2909    {
   2910      ScratchDoubleScope fpscratch(masm);
   2911      masm.loadDouble(Address(output.payloadOrValueReg(), 0), fpscratch);
   2912      masm.boxDouble(fpscratch, output, fpscratch);
   2913    }
   2914    masm.freeStack(sizeof(double));
   2915  }
   2916  masm.bind(&done);
   2917  return true;
   2918 }
   2919 
   2920 bool CacheIRCompiler::emitNumberParseIntResult(StringOperandId strId,
   2921                                               Int32OperandId radixId) {
   2922  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2923 
   2924  AutoCallVM callvm(masm, this, allocator);
   2925 
   2926  Register str = allocator.useRegister(masm, strId);
   2927  Register radix = allocator.useRegister(masm, radixId);
   2928  AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
   2929 
   2930 #ifdef DEBUG
   2931  Label ok;
   2932  masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
   2933  masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
   2934  masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
   2935  masm.bind(&ok);
   2936 #endif
   2937 
   2938  // Discard the stack to ensure it's balanced when we skip the vm-call.
   2939  allocator.discardStack(masm);
   2940 
   2941  // Use indexed value as fast path if possible.
   2942  Label vmCall, done;
   2943  masm.loadStringIndexValue(str, scratch, &vmCall);
   2944  masm.tagValue(JSVAL_TYPE_INT32, scratch, callvm.outputValueReg());
   2945  masm.jump(&done);
   2946  {
   2947    masm.bind(&vmCall);
   2948 
   2949    callvm.prepare();
   2950    masm.Push(radix);
   2951    masm.Push(str);
   2952 
   2953    using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
   2954    callvm.call<Fn, js::NumberParseInt>();
   2955  }
   2956  masm.bind(&done);
   2957  return true;
   2958 }
   2959 
   2960 bool CacheIRCompiler::emitDoubleParseIntResult(NumberOperandId numId) {
   2961  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   2962 
   2963  AutoOutputRegister output(*this);
   2964  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   2965  AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
   2966  AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
   2967 
   2968  FailurePath* failure;
   2969  if (!addFailurePath(&failure)) {
   2970    return false;
   2971  }
   2972 
   2973  allocator.ensureDoubleRegister(masm, numId, floatScratch1);
   2974 
   2975  masm.branchDouble(Assembler::DoubleUnordered, floatScratch1, floatScratch1,
   2976                    failure->label());
   2977  masm.branchTruncateDoubleToInt32(floatScratch1, scratch, failure->label());
   2978 
   2979  Label ok;
   2980  masm.branch32(Assembler::NotEqual, scratch, Imm32(0), &ok);
   2981  {
   2982    // Accept both +0 and -0 and return 0.
   2983    masm.loadConstantDouble(0.0, floatScratch2);
   2984    masm.branchDouble(Assembler::DoubleEqual, floatScratch1, floatScratch2,
   2985                      &ok);
   2986 
   2987    // Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
   2988    masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, floatScratch2);
   2989    masm.branchDouble(Assembler::DoubleLessThan, floatScratch1, floatScratch2,
   2990                      failure->label());
   2991  }
   2992  masm.bind(&ok);
   2993 
   2994  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   2995  return true;
   2996 }
   2997 
   2998 bool CacheIRCompiler::emitStringToAtom(StringOperandId stringId) {
   2999  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3000  Register str = allocator.useRegister(masm, stringId);
   3001  AutoScratchRegister scratch(allocator, masm);
   3002 
   3003  FailurePath* failure;
   3004  if (!addFailurePath(&failure)) {
   3005    return false;
   3006  }
   3007 
   3008  Label done, vmCall;
   3009  masm.branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
   3010                    Imm32(JSString::ATOM_BIT), &done);
   3011 
   3012  masm.tryFastAtomize(str, scratch, str, &vmCall);
   3013  masm.jump(&done);
   3014 
   3015  masm.bind(&vmCall);
   3016  LiveRegisterSet save = liveVolatileRegs();
   3017  masm.PushRegsInMask(save);
   3018 
   3019  using Fn = JSAtom* (*)(JSContext * cx, JSString * str);
   3020  masm.setupUnalignedABICall(scratch);
   3021  masm.loadJSContext(scratch);
   3022  masm.passABIArg(scratch);
   3023  masm.passABIArg(str);
   3024  masm.callWithABI<Fn, jit::AtomizeStringNoGC>();
   3025  masm.storeCallPointerResult(scratch);
   3026 
   3027  LiveRegisterSet ignore;
   3028  ignore.add(scratch);
   3029  masm.PopRegsInMaskIgnore(save, ignore);
   3030 
   3031  masm.branchPtr(Assembler::Equal, scratch, Imm32(0), failure->label());
   3032  masm.movePtr(scratch.get(), str);
   3033 
   3034  masm.bind(&done);
   3035  return true;
   3036 }
   3037 
   3038 bool CacheIRCompiler::emitBooleanToNumber(BooleanOperandId booleanId,
   3039                                          NumberOperandId resultId) {
   3040  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3041  Register boolean = allocator.useRegister(masm, booleanId);
   3042  ValueOperand output = allocator.defineValueRegister(masm, resultId);
   3043  masm.tagValue(JSVAL_TYPE_INT32, boolean, output);
   3044  return true;
   3045 }
   3046 
   3047 bool CacheIRCompiler::emitGuardStringToIndex(StringOperandId strId,
   3048                                             Int32OperandId resultId) {
   3049  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3050  Register str = allocator.useRegister(masm, strId);
   3051  Register output = allocator.defineRegister(masm, resultId);
   3052 
   3053  FailurePath* failure;
   3054  if (!addFailurePath(&failure)) {
   3055    return false;
   3056  }
   3057 
   3058  Label vmCall, done;
   3059  masm.loadStringIndexValue(str, output, &vmCall);
   3060  masm.jump(&done);
   3061 
   3062  {
   3063    masm.bind(&vmCall);
   3064    LiveRegisterSet save = liveVolatileRegs();
   3065    masm.PushRegsInMask(save);
   3066 
   3067    using Fn = int32_t (*)(JSString* str);
   3068    masm.setupUnalignedABICall(output);
   3069    masm.passABIArg(str);
   3070    masm.callWithABI<Fn, GetIndexFromString>();
   3071    masm.storeCallInt32Result(output);
   3072 
   3073    LiveRegisterSet ignore;
   3074    ignore.add(output);
   3075    masm.PopRegsInMaskIgnore(save, ignore);
   3076 
   3077    // GetIndexFromString returns a negative value on failure.
   3078    masm.branchTest32(Assembler::Signed, output, output, failure->label());
   3079  }
   3080 
   3081  masm.bind(&done);
   3082  return true;
   3083 }
   3084 
   3085 bool CacheIRCompiler::emitLoadProto(ObjOperandId objId, ObjOperandId resultId) {
   3086  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3087  Register obj = allocator.useRegister(masm, objId);
   3088  Register reg = allocator.defineRegister(masm, resultId);
   3089  masm.loadObjProto(obj, reg);
   3090 
   3091 #ifdef DEBUG
   3092  // We shouldn't encounter a null or lazy proto.
   3093  MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
   3094 
   3095  Label done;
   3096  masm.branchPtr(Assembler::Above, reg, ImmWord(1), &done);
   3097  masm.assumeUnreachable("Unexpected null or lazy proto in CacheIR LoadProto");
   3098  masm.bind(&done);
   3099 #endif
   3100  return true;
   3101 }
   3102 
   3103 bool CacheIRCompiler::emitLoadEnclosingEnvironment(ObjOperandId objId,
   3104                                                   ObjOperandId resultId) {
   3105  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3106  Register obj = allocator.useRegister(masm, objId);
   3107  Register reg = allocator.defineRegister(masm, resultId);
   3108  masm.unboxObject(
   3109      Address(obj, EnvironmentObject::offsetOfEnclosingEnvironment()), reg);
   3110  return true;
   3111 }
   3112 
   3113 bool CacheIRCompiler::emitLoadWrapperTarget(ObjOperandId objId,
   3114                                            ObjOperandId resultId,
   3115                                            bool fallible) {
   3116  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3117  Register obj = allocator.useRegister(masm, objId);
   3118  Register reg = allocator.defineRegister(masm, resultId);
   3119 
   3120  FailurePath* failure;
   3121  if (fallible && !addFailurePath(&failure)) {
   3122    return false;
   3123  }
   3124 
   3125  masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), reg);
   3126 
   3127  Address targetAddr(reg,
   3128                     js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
   3129  if (fallible) {
   3130    masm.fallibleUnboxObject(targetAddr, reg, failure->label());
   3131  } else {
   3132    masm.unboxObject(targetAddr, reg);
   3133  }
   3134 
   3135  return true;
   3136 }
   3137 
   3138 bool CacheIRCompiler::emitLoadValueTag(ValOperandId valId,
   3139                                       ValueTagOperandId resultId) {
   3140  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3141  ValueOperand val = allocator.useValueRegister(masm, valId);
   3142  Register res = allocator.defineRegister(masm, resultId);
   3143 
   3144  Register tag = masm.extractTag(val, res);
   3145  if (tag != res) {
   3146    masm.mov(tag, res);
   3147  }
   3148  return true;
   3149 }
   3150 
   3151 bool CacheIRCompiler::emitLoadDOMExpandoValue(ObjOperandId objId,
   3152                                              ValOperandId resultId) {
   3153  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3154  Register obj = allocator.useRegister(masm, objId);
   3155  ValueOperand val = allocator.defineValueRegister(masm, resultId);
   3156 
   3157  masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
   3158               val.scratchReg());
   3159  masm.loadValue(Address(val.scratchReg(),
   3160                         js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
   3161                 val);
   3162  return true;
   3163 }
   3164 
   3165 bool CacheIRCompiler::emitLoadDOMExpandoValueIgnoreGeneration(
   3166    ObjOperandId objId, ValOperandId resultId) {
   3167  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3168  Register obj = allocator.useRegister(masm, objId);
   3169  ValueOperand output = allocator.defineValueRegister(masm, resultId);
   3170 
   3171  // Determine the expando's Address.
   3172  Register scratch = output.scratchReg();
   3173  masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
   3174  Address expandoAddr(scratch,
   3175                      js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
   3176 
   3177 #ifdef DEBUG
   3178  // Private values are stored as doubles, so assert we have a double.
   3179  Label ok;
   3180  masm.branchTestDouble(Assembler::Equal, expandoAddr, &ok);
   3181  masm.assumeUnreachable("DOM expando is not a PrivateValue!");
   3182  masm.bind(&ok);
   3183 #endif
   3184 
   3185  // Load the ExpandoAndGeneration* from the PrivateValue.
   3186  masm.loadPrivate(expandoAddr, scratch);
   3187 
   3188  // Load expandoAndGeneration->expando into the output Value register.
   3189  masm.loadValue(Address(scratch, ExpandoAndGeneration::offsetOfExpando()),
   3190                 output);
   3191  return true;
   3192 }
   3193 
   3194 bool CacheIRCompiler::emitLoadUndefinedResult() {
   3195  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3196  AutoOutputRegister output(*this);
   3197  masm.moveValue(UndefinedValue(), output.valueReg());
   3198  return true;
   3199 }
   3200 
   3201 static void EmitStoreBoolean(MacroAssembler& masm, bool b,
   3202                             const AutoOutputRegister& output) {
   3203  if (output.hasValue()) {
   3204    Value val = BooleanValue(b);
   3205    masm.moveValue(val, output.valueReg());
   3206  } else {
   3207    MOZ_ASSERT(output.type() == JSVAL_TYPE_BOOLEAN);
   3208    masm.movePtr(ImmWord(b), output.typedReg().gpr());
   3209  }
   3210 }
   3211 
   3212 bool CacheIRCompiler::emitLoadBooleanResult(bool val) {
   3213  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3214  AutoOutputRegister output(*this);
   3215  EmitStoreBoolean(masm, val, output);
   3216  return true;
   3217 }
   3218 
   3219 bool CacheIRCompiler::emitLoadOperandResult(ValOperandId inputId) {
   3220  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3221  AutoOutputRegister output(*this);
   3222  ValueOperand input = allocator.useValueRegister(masm, inputId);
   3223  masm.moveValue(input, output.valueReg());
   3224  return true;
   3225 }
   3226 
   3227 static void EmitStoreResult(MacroAssembler& masm, Register reg,
   3228                            JSValueType type,
   3229                            const AutoOutputRegister& output) {
   3230  if (output.hasValue()) {
   3231    masm.tagValue(type, reg, output.valueReg());
   3232    return;
   3233  }
   3234  if (type == JSVAL_TYPE_INT32 && output.typedReg().isFloat()) {
   3235    masm.convertInt32ToDouble(reg, output.typedReg().fpu());
   3236    return;
   3237  }
   3238  if (type == output.type()) {
   3239    masm.mov(reg, output.typedReg().gpr());
   3240    return;
   3241  }
   3242  masm.assumeUnreachable("Should have monitored result");
   3243 }
   3244 
   3245 bool CacheIRCompiler::emitLoadInt32ArrayLengthResult(ObjOperandId objId) {
   3246  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3247  AutoOutputRegister output(*this);
   3248  Register obj = allocator.useRegister(masm, objId);
   3249  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3250 
   3251  FailurePath* failure;
   3252  if (!addFailurePath(&failure)) {
   3253    return false;
   3254  }
   3255 
   3256  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
   3257  masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch);
   3258 
   3259  // Guard length fits in an int32.
   3260  masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
   3261  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   3262  return true;
   3263 }
   3264 
   3265 bool CacheIRCompiler::emitLoadInt32ArrayLength(ObjOperandId objId,
   3266                                               Int32OperandId resultId) {
   3267  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3268  Register obj = allocator.useRegister(masm, objId);
   3269  Register res = allocator.defineRegister(masm, resultId);
   3270 
   3271  FailurePath* failure;
   3272  if (!addFailurePath(&failure)) {
   3273    return false;
   3274  }
   3275 
   3276  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), res);
   3277  masm.load32(Address(res, ObjectElements::offsetOfLength()), res);
   3278 
   3279  // Guard length fits in an int32.
   3280  masm.branchTest32(Assembler::Signed, res, res, failure->label());
   3281  return true;
   3282 }
   3283 
   3284 bool CacheIRCompiler::emitDoubleAddResult(NumberOperandId lhsId,
   3285                                          NumberOperandId rhsId) {
   3286  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3287  AutoOutputRegister output(*this);
   3288 
   3289  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   3290  AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
   3291 
   3292  allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
   3293  allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
   3294 
   3295  masm.addDouble(floatScratch1, floatScratch0);
   3296  masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
   3297 
   3298  return true;
   3299 }
   3300 bool CacheIRCompiler::emitDoubleSubResult(NumberOperandId lhsId,
   3301                                          NumberOperandId rhsId) {
   3302  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3303  AutoOutputRegister output(*this);
   3304 
   3305  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   3306  AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
   3307 
   3308  allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
   3309  allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
   3310 
   3311  masm.subDouble(floatScratch1, floatScratch0);
   3312  masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
   3313 
   3314  return true;
   3315 }
   3316 bool CacheIRCompiler::emitDoubleMulResult(NumberOperandId lhsId,
   3317                                          NumberOperandId rhsId) {
   3318  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3319  AutoOutputRegister output(*this);
   3320 
   3321  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   3322  AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
   3323 
   3324  allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
   3325  allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
   3326 
   3327  masm.mulDouble(floatScratch1, floatScratch0);
   3328  masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
   3329 
   3330  return true;
   3331 }
   3332 bool CacheIRCompiler::emitDoubleDivResult(NumberOperandId lhsId,
   3333                                          NumberOperandId rhsId) {
   3334  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3335  AutoOutputRegister output(*this);
   3336 
   3337  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   3338  AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
   3339 
   3340  allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
   3341  allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
   3342 
   3343  masm.divDouble(floatScratch1, floatScratch0);
   3344  masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
   3345 
   3346  return true;
   3347 }
   3348 bool CacheIRCompiler::emitDoubleModResult(NumberOperandId lhsId,
   3349                                          NumberOperandId rhsId) {
   3350  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3351  AutoOutputRegister output(*this);
   3352  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3353 
   3354  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   3355  AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
   3356 
   3357  allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
   3358  allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
   3359 
   3360  LiveRegisterSet save = liveVolatileRegs();
   3361  masm.PushRegsInMask(save);
   3362 
   3363  using Fn = double (*)(double a, double b);
   3364  masm.setupUnalignedABICall(scratch);
   3365  masm.passABIArg(floatScratch0, ABIType::Float64);
   3366  masm.passABIArg(floatScratch1, ABIType::Float64);
   3367  masm.callWithABI<Fn, js::NumberMod>(ABIType::Float64);
   3368  masm.storeCallFloatResult(floatScratch0);
   3369 
   3370  LiveRegisterSet ignore;
   3371  ignore.add(floatScratch0);
   3372  masm.PopRegsInMaskIgnore(save, ignore);
   3373 
   3374  masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
   3375 
   3376  return true;
   3377 }
   3378 bool CacheIRCompiler::emitDoublePowResult(NumberOperandId lhsId,
   3379                                          NumberOperandId rhsId) {
   3380  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3381  AutoOutputRegister output(*this);
   3382  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3383 
   3384  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   3385  AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
   3386 
   3387  allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
   3388  allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
   3389 
   3390  LiveRegisterSet save = liveVolatileRegs();
   3391  masm.PushRegsInMask(save);
   3392 
   3393  using Fn = double (*)(double x, double y);
   3394  masm.setupUnalignedABICall(scratch);
   3395  masm.passABIArg(floatScratch0, ABIType::Float64);
   3396  masm.passABIArg(floatScratch1, ABIType::Float64);
   3397  masm.callWithABI<Fn, js::ecmaPow>(ABIType::Float64);
   3398  masm.storeCallFloatResult(floatScratch0);
   3399 
   3400  LiveRegisterSet ignore;
   3401  ignore.add(floatScratch0);
   3402  masm.PopRegsInMaskIgnore(save, ignore);
   3403 
   3404  masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
   3405 
   3406  return true;
   3407 }
   3408 
   3409 bool CacheIRCompiler::emitInt32AddResult(Int32OperandId lhsId,
   3410                                         Int32OperandId rhsId) {
   3411  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3412  AutoOutputRegister output(*this);
   3413  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3414 
   3415  Register lhs = allocator.useRegister(masm, lhsId);
   3416  Register rhs = allocator.useRegister(masm, rhsId);
   3417 
   3418  FailurePath* failure;
   3419  if (!addFailurePath(&failure)) {
   3420    return false;
   3421  }
   3422 
   3423  masm.mov(rhs, scratch);
   3424  masm.branchAdd32(Assembler::Overflow, lhs, scratch, failure->label());
   3425  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   3426 
   3427  return true;
   3428 }
   3429 bool CacheIRCompiler::emitInt32SubResult(Int32OperandId lhsId,
   3430                                         Int32OperandId rhsId) {
   3431  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3432  AutoOutputRegister output(*this);
   3433  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3434  Register lhs = allocator.useRegister(masm, lhsId);
   3435  Register rhs = allocator.useRegister(masm, rhsId);
   3436 
   3437  FailurePath* failure;
   3438  if (!addFailurePath(&failure)) {
   3439    return false;
   3440  }
   3441 
   3442  masm.mov(lhs, scratch);
   3443  masm.branchSub32(Assembler::Overflow, rhs, scratch, failure->label());
   3444  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   3445 
   3446  return true;
   3447 }
   3448 
   3449 bool CacheIRCompiler::emitInt32MulResult(Int32OperandId lhsId,
   3450                                         Int32OperandId rhsId) {
   3451  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3452  AutoOutputRegister output(*this);
   3453  Register lhs = allocator.useRegister(masm, lhsId);
   3454  Register rhs = allocator.useRegister(masm, rhsId);
   3455  AutoScratchRegister scratch(allocator, masm);
   3456  AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
   3457 
   3458  FailurePath* failure;
   3459  if (!addFailurePath(&failure)) {
   3460    return false;
   3461  }
   3462 
   3463  Label maybeNegZero, done;
   3464  masm.mov(lhs, scratch);
   3465  masm.branchMul32(Assembler::Overflow, rhs, scratch, failure->label());
   3466  masm.branchTest32(Assembler::Zero, scratch, scratch, &maybeNegZero);
   3467  masm.jump(&done);
   3468 
   3469  masm.bind(&maybeNegZero);
   3470  masm.mov(lhs, scratch2);
   3471  // Result is -0 if exactly one of lhs or rhs is negative.
   3472  masm.or32(rhs, scratch2);
   3473  masm.branchTest32(Assembler::Signed, scratch2, scratch2, failure->label());
   3474 
   3475  masm.bind(&done);
   3476  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   3477  return true;
   3478 }
   3479 
   3480 bool CacheIRCompiler::emitInt32DivResult(Int32OperandId lhsId,
   3481                                         Int32OperandId rhsId) {
   3482  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3483  AutoOutputRegister output(*this);
   3484  Register lhs = allocator.useRegister(masm, lhsId);
   3485  Register rhs = allocator.useRegister(masm, rhsId);
   3486  AutoScratchRegister rem(allocator, masm);
   3487  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3488 
   3489  FailurePath* failure;
   3490  if (!addFailurePath(&failure)) {
   3491    return false;
   3492  }
   3493 
   3494  // Prevent division by 0.
   3495  masm.branchTest32(Assembler::Zero, rhs, rhs, failure->label());
   3496 
   3497  // Prevent -2147483648 / -1.
   3498  Label notOverflow;
   3499  masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
   3500  masm.branch32(Assembler::Equal, rhs, Imm32(-1), failure->label());
   3501  masm.bind(&notOverflow);
   3502 
   3503  // Prevent negative 0.
   3504  Label notZero;
   3505  masm.branchTest32(Assembler::NonZero, lhs, lhs, &notZero);
   3506  masm.branchTest32(Assembler::Signed, rhs, rhs, failure->label());
   3507  masm.bind(&notZero);
   3508 
   3509  masm.flexibleDivMod32(lhs, rhs, scratch, rem, false, liveVolatileRegs());
   3510 
   3511  // A remainder implies a double result.
   3512  masm.branchTest32(Assembler::NonZero, rem, rem, failure->label());
   3513  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   3514  return true;
   3515 }
   3516 
   3517 bool CacheIRCompiler::emitInt32ModResult(Int32OperandId lhsId,
   3518                                         Int32OperandId rhsId) {
   3519  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3520  AutoOutputRegister output(*this);
   3521  Register lhs = allocator.useRegister(masm, lhsId);
   3522  Register rhs = allocator.useRegister(masm, rhsId);
   3523  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3524 
   3525  FailurePath* failure;
   3526  if (!addFailurePath(&failure)) {
   3527    return false;
   3528  }
   3529 
   3530  // x % 0 results in NaN
   3531  masm.branchTest32(Assembler::Zero, rhs, rhs, failure->label());
   3532 
   3533  // Prevent -2147483648 % -1.
   3534  //
   3535  // Traps on x86 and has undefined behavior on ARM32 (when __aeabi_idivmod is
   3536  // called).
   3537  Label notOverflow;
   3538  masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
   3539  masm.branch32(Assembler::Equal, rhs, Imm32(-1), failure->label());
   3540  masm.bind(&notOverflow);
   3541 
   3542  masm.flexibleRemainder32(lhs, rhs, scratch, false, liveVolatileRegs());
   3543 
   3544  // Modulo takes the sign of the dividend; we can't return negative zero here.
   3545  Label notZero;
   3546  masm.branchTest32(Assembler::NonZero, scratch, scratch, &notZero);
   3547  masm.branchTest32(Assembler::Signed, lhs, lhs, failure->label());
   3548  masm.bind(&notZero);
   3549 
   3550  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   3551 
   3552  return true;
   3553 }
   3554 
   3555 bool CacheIRCompiler::emitInt32PowResult(Int32OperandId lhsId,
   3556                                         Int32OperandId rhsId) {
   3557  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3558  AutoOutputRegister output(*this);
   3559  Register base = allocator.useRegister(masm, lhsId);
   3560  Register power = allocator.useRegister(masm, rhsId);
   3561  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   3562  AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
   3563  AutoScratchRegister scratch3(allocator, masm);
   3564 
   3565  FailurePath* failure;
   3566  if (!addFailurePath(&failure)) {
   3567    return false;
   3568  }
   3569 
   3570  masm.pow32(base, power, scratch1, scratch2, scratch3, failure->label());
   3571 
   3572  masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
   3573  return true;
   3574 }
   3575 
   3576 bool CacheIRCompiler::emitInt32BitOrResult(Int32OperandId lhsId,
   3577                                           Int32OperandId rhsId) {
   3578  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3579  AutoOutputRegister output(*this);
   3580  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3581 
   3582  Register lhs = allocator.useRegister(masm, lhsId);
   3583  Register rhs = allocator.useRegister(masm, rhsId);
   3584 
   3585  masm.mov(rhs, scratch);
   3586  masm.or32(lhs, scratch);
   3587  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   3588 
   3589  return true;
   3590 }
   3591 bool CacheIRCompiler::emitInt32BitXorResult(Int32OperandId lhsId,
   3592                                            Int32OperandId rhsId) {
   3593  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3594  AutoOutputRegister output(*this);
   3595  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3596 
   3597  Register lhs = allocator.useRegister(masm, lhsId);
   3598  Register rhs = allocator.useRegister(masm, rhsId);
   3599 
   3600  masm.mov(rhs, scratch);
   3601  masm.xor32(lhs, scratch);
   3602  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   3603 
   3604  return true;
   3605 }
   3606 bool CacheIRCompiler::emitInt32BitAndResult(Int32OperandId lhsId,
   3607                                            Int32OperandId rhsId) {
   3608  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3609  AutoOutputRegister output(*this);
   3610  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3611 
   3612  Register lhs = allocator.useRegister(masm, lhsId);
   3613  Register rhs = allocator.useRegister(masm, rhsId);
   3614 
   3615  masm.mov(rhs, scratch);
   3616  masm.and32(lhs, scratch);
   3617  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   3618 
   3619  return true;
   3620 }
   3621 bool CacheIRCompiler::emitInt32LeftShiftResult(Int32OperandId lhsId,
   3622                                               Int32OperandId rhsId) {
   3623  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3624  AutoOutputRegister output(*this);
   3625  Register lhs = allocator.useRegister(masm, lhsId);
   3626  Register rhs = allocator.useRegister(masm, rhsId);
   3627  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3628 
   3629  masm.mov(lhs, scratch);
   3630  masm.flexibleLshift32(rhs, scratch);
   3631  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   3632 
   3633  return true;
   3634 }
   3635 
   3636 bool CacheIRCompiler::emitInt32RightShiftResult(Int32OperandId lhsId,
   3637                                                Int32OperandId rhsId) {
   3638  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3639  AutoOutputRegister output(*this);
   3640  Register lhs = allocator.useRegister(masm, lhsId);
   3641  Register rhs = allocator.useRegister(masm, rhsId);
   3642  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3643 
   3644  masm.mov(lhs, scratch);
   3645  masm.flexibleRshift32Arithmetic(rhs, scratch);
   3646  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   3647 
   3648  return true;
   3649 }
   3650 
   3651 bool CacheIRCompiler::emitInt32URightShiftResult(Int32OperandId lhsId,
   3652                                                 Int32OperandId rhsId,
   3653                                                 bool forceDouble) {
   3654  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3655  AutoOutputRegister output(*this);
   3656 
   3657  Register lhs = allocator.useRegister(masm, lhsId);
   3658  Register rhs = allocator.useRegister(masm, rhsId);
   3659  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3660 
   3661  FailurePath* failure;
   3662  if (!addFailurePath(&failure)) {
   3663    return false;
   3664  }
   3665 
   3666  masm.mov(lhs, scratch);
   3667  masm.flexibleRshift32(rhs, scratch);
   3668  if (forceDouble) {
   3669    ScratchDoubleScope fpscratch(masm);
   3670    masm.convertUInt32ToDouble(scratch, fpscratch);
   3671    masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
   3672  } else {
   3673    masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
   3674    masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   3675  }
   3676  return true;
   3677 }
   3678 
   3679 bool CacheIRCompiler::emitInt32NegationResult(Int32OperandId inputId) {
   3680  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3681  AutoOutputRegister output(*this);
   3682  Register val = allocator.useRegister(masm, inputId);
   3683  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3684 
   3685  FailurePath* failure;
   3686  if (!addFailurePath(&failure)) {
   3687    return false;
   3688  }
   3689 
   3690  // Guard against 0 and MIN_INT by checking if low 31-bits are all zero.
   3691  // Both of these result in a double.
   3692  masm.branchTest32(Assembler::Zero, val, Imm32(0x7fffffff), failure->label());
   3693  masm.mov(val, scratch);
   3694  masm.neg32(scratch);
   3695  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   3696  return true;
   3697 }
   3698 
   3699 bool CacheIRCompiler::emitInt32IncResult(Int32OperandId inputId) {
   3700  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3701  AutoOutputRegister output(*this);
   3702  Register input = allocator.useRegister(masm, inputId);
   3703  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3704 
   3705  FailurePath* failure;
   3706  if (!addFailurePath(&failure)) {
   3707    return false;
   3708  }
   3709 
   3710  masm.mov(input, scratch);
   3711  masm.branchAdd32(Assembler::Overflow, Imm32(1), scratch, failure->label());
   3712  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   3713 
   3714  return true;
   3715 }
   3716 
   3717 bool CacheIRCompiler::emitInt32DecResult(Int32OperandId inputId) {
   3718  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3719  AutoOutputRegister output(*this);
   3720  Register input = allocator.useRegister(masm, inputId);
   3721  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3722 
   3723  FailurePath* failure;
   3724  if (!addFailurePath(&failure)) {
   3725    return false;
   3726  }
   3727 
   3728  masm.mov(input, scratch);
   3729  masm.branchSub32(Assembler::Overflow, Imm32(1), scratch, failure->label());
   3730  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   3731 
   3732  return true;
   3733 }
   3734 
   3735 bool CacheIRCompiler::emitInt32NotResult(Int32OperandId inputId) {
   3736  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3737  AutoOutputRegister output(*this);
   3738  Register val = allocator.useRegister(masm, inputId);
   3739  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   3740 
   3741  masm.mov(val, scratch);
   3742  masm.not32(scratch);
   3743  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   3744  return true;
   3745 }
   3746 
   3747 bool CacheIRCompiler::emitDoubleNegationResult(NumberOperandId inputId) {
   3748  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3749  AutoOutputRegister output(*this);
   3750 
   3751  AutoScratchFloatRegister floatReg(this);
   3752 
   3753  allocator.ensureDoubleRegister(masm, inputId, floatReg);
   3754 
   3755  masm.negateDouble(floatReg);
   3756  masm.boxDouble(floatReg, output.valueReg(), floatReg);
   3757 
   3758  return true;
   3759 }
   3760 
   3761 bool CacheIRCompiler::emitDoubleIncDecResult(bool isInc,
   3762                                             NumberOperandId inputId) {
   3763  AutoOutputRegister output(*this);
   3764 
   3765  AutoScratchFloatRegister floatReg(this);
   3766 
   3767  allocator.ensureDoubleRegister(masm, inputId, floatReg);
   3768 
   3769  {
   3770    ScratchDoubleScope fpscratch(masm);
   3771    masm.loadConstantDouble(1.0, fpscratch);
   3772    if (isInc) {
   3773      masm.addDouble(fpscratch, floatReg);
   3774    } else {
   3775      masm.subDouble(fpscratch, floatReg);
   3776    }
   3777  }
   3778  masm.boxDouble(floatReg, output.valueReg(), floatReg);
   3779 
   3780  return true;
   3781 }
   3782 
   3783 bool CacheIRCompiler::emitDoubleIncResult(NumberOperandId inputId) {
   3784  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3785  return emitDoubleIncDecResult(true, inputId);
   3786 }
   3787 
   3788 bool CacheIRCompiler::emitDoubleDecResult(NumberOperandId inputId) {
   3789  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3790  return emitDoubleIncDecResult(false, inputId);
   3791 }
   3792 
   3793 template <typename Fn, Fn fn>
   3794 bool CacheIRCompiler::emitBigIntBinaryOperationShared(BigIntOperandId lhsId,
   3795                                                      BigIntOperandId rhsId) {
   3796  AutoCallVM callvm(masm, this, allocator);
   3797  Register lhs = allocator.useRegister(masm, lhsId);
   3798  Register rhs = allocator.useRegister(masm, rhsId);
   3799 
   3800  callvm.prepare();
   3801 
   3802  masm.Push(rhs);
   3803  masm.Push(lhs);
   3804 
   3805  callvm.call<Fn, fn>();
   3806  return true;
   3807 }
   3808 
   3809 bool CacheIRCompiler::emitBigIntAddResult(BigIntOperandId lhsId,
   3810                                          BigIntOperandId rhsId) {
   3811  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3812  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
   3813  return emitBigIntBinaryOperationShared<Fn, BigInt::add>(lhsId, rhsId);
   3814 }
   3815 
   3816 bool CacheIRCompiler::emitBigIntSubResult(BigIntOperandId lhsId,
   3817                                          BigIntOperandId rhsId) {
   3818  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3819  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
   3820  return emitBigIntBinaryOperationShared<Fn, BigInt::sub>(lhsId, rhsId);
   3821 }
   3822 
   3823 bool CacheIRCompiler::emitBigIntMulResult(BigIntOperandId lhsId,
   3824                                          BigIntOperandId rhsId) {
   3825  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3826  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
   3827  return emitBigIntBinaryOperationShared<Fn, BigInt::mul>(lhsId, rhsId);
   3828 }
   3829 
   3830 bool CacheIRCompiler::emitBigIntDivResult(BigIntOperandId lhsId,
   3831                                          BigIntOperandId rhsId) {
   3832  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3833  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
   3834  return emitBigIntBinaryOperationShared<Fn, BigInt::div>(lhsId, rhsId);
   3835 }
   3836 
   3837 bool CacheIRCompiler::emitBigIntModResult(BigIntOperandId lhsId,
   3838                                          BigIntOperandId rhsId) {
   3839  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3840  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
   3841  return emitBigIntBinaryOperationShared<Fn, BigInt::mod>(lhsId, rhsId);
   3842 }
   3843 
   3844 bool CacheIRCompiler::emitBigIntPowResult(BigIntOperandId lhsId,
   3845                                          BigIntOperandId rhsId) {
   3846  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3847  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
   3848  return emitBigIntBinaryOperationShared<Fn, BigInt::pow>(lhsId, rhsId);
   3849 }
   3850 
   3851 bool CacheIRCompiler::emitBigIntBitAndResult(BigIntOperandId lhsId,
   3852                                             BigIntOperandId rhsId) {
   3853  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3854  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
   3855  return emitBigIntBinaryOperationShared<Fn, BigInt::bitAnd>(lhsId, rhsId);
   3856 }
   3857 
   3858 bool CacheIRCompiler::emitBigIntBitOrResult(BigIntOperandId lhsId,
   3859                                            BigIntOperandId rhsId) {
   3860  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3861  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
   3862  return emitBigIntBinaryOperationShared<Fn, BigInt::bitOr>(lhsId, rhsId);
   3863 }
   3864 
   3865 bool CacheIRCompiler::emitBigIntBitXorResult(BigIntOperandId lhsId,
   3866                                             BigIntOperandId rhsId) {
   3867  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3868  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
   3869  return emitBigIntBinaryOperationShared<Fn, BigInt::bitXor>(lhsId, rhsId);
   3870 }
   3871 
   3872 bool CacheIRCompiler::emitBigIntLeftShiftResult(BigIntOperandId lhsId,
   3873                                                BigIntOperandId rhsId) {
   3874  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3875  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
   3876  return emitBigIntBinaryOperationShared<Fn, BigInt::lsh>(lhsId, rhsId);
   3877 }
   3878 
   3879 bool CacheIRCompiler::emitBigIntRightShiftResult(BigIntOperandId lhsId,
   3880                                                 BigIntOperandId rhsId) {
   3881  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3882  using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
   3883  return emitBigIntBinaryOperationShared<Fn, BigInt::rsh>(lhsId, rhsId);
   3884 }
   3885 
   3886 template <typename Fn, Fn fn>
   3887 bool CacheIRCompiler::emitBigIntUnaryOperationShared(BigIntOperandId inputId) {
   3888  AutoCallVM callvm(masm, this, allocator);
   3889  Register val = allocator.useRegister(masm, inputId);
   3890 
   3891  callvm.prepare();
   3892 
   3893  masm.Push(val);
   3894 
   3895  callvm.call<Fn, fn>();
   3896  return true;
   3897 }
   3898 
   3899 bool CacheIRCompiler::emitBigIntNotResult(BigIntOperandId inputId) {
   3900  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3901  using Fn = BigInt* (*)(JSContext*, HandleBigInt);
   3902  return emitBigIntUnaryOperationShared<Fn, BigInt::bitNot>(inputId);
   3903 }
   3904 
   3905 bool CacheIRCompiler::emitBigIntNegationResult(BigIntOperandId inputId) {
   3906  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3907  using Fn = BigInt* (*)(JSContext*, HandleBigInt);
   3908  return emitBigIntUnaryOperationShared<Fn, BigInt::neg>(inputId);
   3909 }
   3910 
   3911 bool CacheIRCompiler::emitBigIntIncResult(BigIntOperandId inputId) {
   3912  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3913  using Fn = BigInt* (*)(JSContext*, HandleBigInt);
   3914  return emitBigIntUnaryOperationShared<Fn, BigInt::inc>(inputId);
   3915 }
   3916 
   3917 bool CacheIRCompiler::emitBigIntDecResult(BigIntOperandId inputId) {
   3918  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3919  using Fn = BigInt* (*)(JSContext*, HandleBigInt);
   3920  return emitBigIntUnaryOperationShared<Fn, BigInt::dec>(inputId);
   3921 }
   3922 
   3923 bool CacheIRCompiler::emitBigIntToIntPtr(BigIntOperandId inputId,
   3924                                         IntPtrOperandId resultId) {
   3925  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3926 
   3927  Register input = allocator.useRegister(masm, inputId);
   3928  Register output = allocator.defineRegister(masm, resultId);
   3929 
   3930  FailurePath* failure;
   3931  if (!addFailurePath(&failure)) {
   3932    return false;
   3933  }
   3934 
   3935  masm.loadBigIntPtr(input, output, failure->label());
   3936  return true;
   3937 }
   3938 
   3939 static gc::Heap InitialBigIntHeap(JSContext* cx) {
   3940  JS::Zone* zone = cx->zone();
   3941  return zone->allocNurseryBigInts() ? gc::Heap::Default : gc::Heap::Tenured;
   3942 }
   3943 
   3944 static void EmitAllocateBigInt(MacroAssembler& masm, Register result,
   3945                               Register temp, const LiveRegisterSet& liveSet,
   3946                               gc::Heap initialHeap, Label* fail) {
   3947  Label fallback, done;
   3948  masm.newGCBigInt(result, temp, initialHeap, &fallback);
   3949  masm.jump(&done);
   3950  {
   3951    masm.bind(&fallback);
   3952 
   3953    // Request a minor collection at a later time if nursery allocation failed.
   3954    bool requestMinorGC = initialHeap == gc::Heap::Default;
   3955 
   3956    masm.PushRegsInMask(liveSet);
   3957    using Fn = void* (*)(JSContext * cx, bool requestMinorGC);
   3958    masm.setupUnalignedABICall(temp);
   3959    masm.loadJSContext(temp);
   3960    masm.passABIArg(temp);
   3961    masm.move32(Imm32(requestMinorGC), result);
   3962    masm.passABIArg(result);
   3963    masm.callWithABI<Fn, jit::AllocateBigIntNoGC>();
   3964    masm.storeCallPointerResult(result);
   3965 
   3966    masm.PopRegsInMask(liveSet);
   3967    masm.branchPtr(Assembler::Equal, result, ImmWord(0), fail);
   3968  }
   3969  masm.bind(&done);
   3970 }
   3971 
   3972 bool CacheIRCompiler::emitIntPtrToBigIntResult(IntPtrOperandId inputId) {
   3973  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   3974 
   3975  AutoOutputRegister output(*this);
   3976  Register input = allocator.useRegister(masm, inputId);
   3977  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   3978  AutoScratchRegister scratch2(allocator, masm);
   3979 
   3980  FailurePath* failure;
   3981  if (!addFailurePath(&failure)) {
   3982    return false;
   3983  }
   3984 
   3985  LiveRegisterSet save = liveVolatileRegs();
   3986  save.takeUnchecked(scratch1);
   3987  save.takeUnchecked(scratch2);
   3988  save.takeUnchecked(output);
   3989 
   3990  // Allocate a new BigInt. The code after this must be infallible.
   3991  gc::Heap initialHeap = InitialBigIntHeap(cx_);
   3992  EmitAllocateBigInt(masm, scratch1, scratch2, save, initialHeap,
   3993                     failure->label());
   3994 
   3995  masm.movePtr(input, scratch2);
   3996  masm.initializeBigIntPtr(scratch1, scratch2);
   3997 
   3998  masm.tagValue(JSVAL_TYPE_BIGINT, scratch1, output.valueReg());
   3999  return true;
   4000 }
   4001 
   4002 bool CacheIRCompiler::emitBigIntPtrAdd(IntPtrOperandId lhsId,
   4003                                       IntPtrOperandId rhsId,
   4004                                       IntPtrOperandId resultId) {
   4005  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4006 
   4007  Register lhs = allocator.useRegister(masm, lhsId);
   4008  Register rhs = allocator.useRegister(masm, rhsId);
   4009  Register output = allocator.defineRegister(masm, resultId);
   4010 
   4011  FailurePath* failure;
   4012  if (!addFailurePath(&failure)) {
   4013    return false;
   4014  }
   4015 
   4016  masm.movePtr(rhs, output);
   4017  masm.branchAddPtr(Assembler::Overflow, lhs, output, failure->label());
   4018  return true;
   4019 }
   4020 
   4021 bool CacheIRCompiler::emitBigIntPtrSub(IntPtrOperandId lhsId,
   4022                                       IntPtrOperandId rhsId,
   4023                                       IntPtrOperandId resultId) {
   4024  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4025 
   4026  Register lhs = allocator.useRegister(masm, lhsId);
   4027  Register rhs = allocator.useRegister(masm, rhsId);
   4028  Register output = allocator.defineRegister(masm, resultId);
   4029 
   4030  FailurePath* failure;
   4031  if (!addFailurePath(&failure)) {
   4032    return false;
   4033  }
   4034 
   4035  masm.movePtr(lhs, output);
   4036  masm.branchSubPtr(Assembler::Overflow, rhs, output, failure->label());
   4037  return true;
   4038 }
   4039 
   4040 bool CacheIRCompiler::emitBigIntPtrMul(IntPtrOperandId lhsId,
   4041                                       IntPtrOperandId rhsId,
   4042                                       IntPtrOperandId resultId) {
   4043  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4044 
   4045  Register lhs = allocator.useRegister(masm, lhsId);
   4046  Register rhs = allocator.useRegister(masm, rhsId);
   4047  Register output = allocator.defineRegister(masm, resultId);
   4048 
   4049  FailurePath* failure;
   4050  if (!addFailurePath(&failure)) {
   4051    return false;
   4052  }
   4053 
   4054  masm.movePtr(rhs, output);
   4055  masm.branchMulPtr(Assembler::Overflow, lhs, output, failure->label());
   4056  return true;
   4057 }
   4058 
   4059 bool CacheIRCompiler::emitBigIntPtrDiv(IntPtrOperandId lhsId,
   4060                                       IntPtrOperandId rhsId,
   4061                                       IntPtrOperandId resultId) {
   4062  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4063 
   4064  Register lhs = allocator.useRegister(masm, lhsId);
   4065  Register rhs = allocator.useRegister(masm, rhsId);
   4066  Register output = allocator.defineRegister(masm, resultId);
   4067 
   4068  FailurePath* failure;
   4069  if (!addFailurePath(&failure)) {
   4070    return false;
   4071  }
   4072 
   4073  static constexpr auto DigitMin = std::numeric_limits<
   4074      mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
   4075 
   4076  // Prevent division by 0.
   4077  masm.branchTestPtr(Assembler::Zero, rhs, rhs, failure->label());
   4078 
   4079  // Prevent INTPTR_MIN / -1.
   4080  Label notOverflow;
   4081  masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(DigitMin), &notOverflow);
   4082  masm.branchPtr(Assembler::Equal, rhs, Imm32(-1), failure->label());
   4083  masm.bind(&notOverflow);
   4084 
   4085  LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
   4086                               liveVolatileFloatRegs());
   4087  masm.flexibleQuotientPtr(lhs, rhs, output, false, volatileRegs);
   4088  return true;
   4089 }
   4090 
   4091 bool CacheIRCompiler::emitBigIntPtrMod(IntPtrOperandId lhsId,
   4092                                       IntPtrOperandId rhsId,
   4093                                       IntPtrOperandId resultId) {
   4094  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4095 
   4096  Register lhs = allocator.useRegister(masm, lhsId);
   4097  Register rhs = allocator.useRegister(masm, rhsId);
   4098  Register output = allocator.defineRegister(masm, resultId);
   4099 
   4100  FailurePath* failure;
   4101  if (!addFailurePath(&failure)) {
   4102    return false;
   4103  }
   4104 
   4105  static constexpr auto DigitMin = std::numeric_limits<
   4106      mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
   4107 
   4108  // Prevent division by 0.
   4109  masm.branchTestPtr(Assembler::Zero, rhs, rhs, failure->label());
   4110 
   4111  // Prevent INTPTR_MIN / -1.
   4112  Label notOverflow, done;
   4113  masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(DigitMin), &notOverflow);
   4114  masm.branchPtr(Assembler::NotEqual, rhs, Imm32(-1), &notOverflow);
   4115  masm.movePtr(ImmWord(0), output);
   4116  masm.jump(&done);
   4117  masm.bind(&notOverflow);
   4118 
   4119  LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
   4120                               liveVolatileFloatRegs());
   4121  masm.flexibleRemainderPtr(lhs, rhs, output, false, volatileRegs);
   4122  masm.bind(&done);
   4123  return true;
   4124 }
   4125 
   4126 bool CacheIRCompiler::emitBigIntPtrPow(IntPtrOperandId lhsId,
   4127                                       IntPtrOperandId rhsId,
   4128                                       IntPtrOperandId resultId) {
   4129  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4130 
   4131  Register lhs = allocator.useRegister(masm, lhsId);
   4132  Register rhs = allocator.useRegister(masm, rhsId);
   4133  Register output = allocator.defineRegister(masm, resultId);
   4134  AutoScratchRegister scratch1(allocator, masm);
   4135  AutoScratchRegister scratch2(allocator, masm);
   4136 
   4137  FailurePath* failure;
   4138  if (!addFailurePath(&failure)) {
   4139    return false;
   4140  }
   4141 
   4142  masm.powPtr(lhs, rhs, output, scratch1, scratch2, failure->label());
   4143  return true;
   4144 }
   4145 
   4146 bool CacheIRCompiler::emitBigIntPtrBitOr(IntPtrOperandId lhsId,
   4147                                         IntPtrOperandId rhsId,
   4148                                         IntPtrOperandId resultId) {
   4149  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4150 
   4151  Register lhs = allocator.useRegister(masm, lhsId);
   4152  Register rhs = allocator.useRegister(masm, rhsId);
   4153  Register output = allocator.defineRegister(masm, resultId);
   4154 
   4155  masm.movePtr(rhs, output);
   4156  masm.orPtr(lhs, output);
   4157  return true;
   4158 }
   4159 
   4160 bool CacheIRCompiler::emitBigIntPtrBitXor(IntPtrOperandId lhsId,
   4161                                          IntPtrOperandId rhsId,
   4162                                          IntPtrOperandId resultId) {
   4163  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4164 
   4165  Register lhs = allocator.useRegister(masm, lhsId);
   4166  Register rhs = allocator.useRegister(masm, rhsId);
   4167  Register output = allocator.defineRegister(masm, resultId);
   4168 
   4169  masm.movePtr(rhs, output);
   4170  masm.xorPtr(lhs, output);
   4171  return true;
   4172 }
   4173 
   4174 bool CacheIRCompiler::emitBigIntPtrBitAnd(IntPtrOperandId lhsId,
   4175                                          IntPtrOperandId rhsId,
   4176                                          IntPtrOperandId resultId) {
   4177  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4178 
   4179  Register lhs = allocator.useRegister(masm, lhsId);
   4180  Register rhs = allocator.useRegister(masm, rhsId);
   4181  Register output = allocator.defineRegister(masm, resultId);
   4182 
   4183  masm.movePtr(rhs, output);
   4184  masm.andPtr(lhs, output);
   4185  return true;
   4186 }
   4187 
   4188 bool CacheIRCompiler::emitBigIntPtrLeftShift(IntPtrOperandId lhsId,
   4189                                             IntPtrOperandId rhsId,
   4190                                             IntPtrOperandId resultId) {
   4191  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4192 
   4193  Register lhs = allocator.useRegister(masm, lhsId);
   4194  Register rhs = allocator.useRegister(masm, rhsId);
   4195  Register output = allocator.defineRegister(masm, resultId);
   4196  AutoScratchRegister scratch(allocator, masm);
   4197 
   4198  FailurePath* failure;
   4199  if (!addFailurePath(&failure)) {
   4200    return false;
   4201  }
   4202 
   4203  Label done;
   4204 
   4205  masm.movePtr(lhs, output);
   4206 
   4207  // 0n << x == 0n
   4208  masm.branchPtr(Assembler::Equal, lhs, Imm32(0), &done);
   4209 
   4210  // x << DigitBits with x != 0n always exceeds pointer-sized storage.
   4211  masm.branchPtr(Assembler::GreaterThanOrEqual, rhs, Imm32(BigInt::DigitBits),
   4212                 failure->label());
   4213 
   4214  // x << -DigitBits == x >> DigitBits, which is either 0n or -1n.
   4215  Label shift;
   4216  masm.branchPtr(Assembler::GreaterThan, rhs,
   4217                 Imm32(-int32_t(BigInt::DigitBits)), &shift);
   4218  {
   4219    masm.rshiftPtrArithmetic(Imm32(BigInt::DigitBits - 1), output);
   4220    masm.jump(&done);
   4221  }
   4222  masm.bind(&shift);
   4223 
   4224  // |x << -y| is computed as |x >> y|.
   4225  Label leftShift;
   4226  masm.branchPtr(Assembler::GreaterThanOrEqual, rhs, Imm32(0), &leftShift);
   4227  {
   4228    masm.movePtr(rhs, scratch);
   4229    masm.negPtr(scratch);
   4230    masm.flexibleRshiftPtrArithmetic(scratch, output);
   4231    masm.jump(&done);
   4232  }
   4233  masm.bind(&leftShift);
   4234 
   4235  masm.flexibleLshiftPtr(rhs, output);
   4236 
   4237  // Check for overflow: ((lhs << rhs) >> rhs) == lhs.
   4238  masm.movePtr(output, scratch);
   4239  masm.flexibleRshiftPtrArithmetic(rhs, scratch);
   4240  masm.branchPtr(Assembler::NotEqual, scratch, lhs, failure->label());
   4241 
   4242  masm.bind(&done);
   4243  return true;
   4244 }
   4245 
   4246 bool CacheIRCompiler::emitBigIntPtrRightShift(IntPtrOperandId lhsId,
   4247                                              IntPtrOperandId rhsId,
   4248                                              IntPtrOperandId resultId) {
   4249  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4250 
   4251  Register lhs = allocator.useRegister(masm, lhsId);
   4252  Register rhs = allocator.useRegister(masm, rhsId);
   4253  Register output = allocator.defineRegister(masm, resultId);
   4254  AutoScratchRegister scratch1(allocator, masm);
   4255  AutoScratchRegister scratch2(allocator, masm);
   4256 
   4257  FailurePath* failure;
   4258  if (!addFailurePath(&failure)) {
   4259    return false;
   4260  }
   4261 
   4262  Label done;
   4263 
   4264  masm.movePtr(lhs, output);
   4265 
   4266  // 0n >> x == 0n
   4267  masm.branchPtr(Assembler::Equal, lhs, Imm32(0), &done);
   4268 
   4269  // x >> -DigitBits == x << DigitBits, which exceeds pointer-sized storage.
   4270  masm.branchPtr(Assembler::LessThanOrEqual, rhs,
   4271                 Imm32(-int32_t(BigInt::DigitBits)), failure->label());
   4272 
   4273  // x >> DigitBits is either 0n or -1n.
   4274  Label shift;
   4275  masm.branchPtr(Assembler::LessThan, rhs, Imm32(BigInt::DigitBits), &shift);
   4276  {
   4277    masm.rshiftPtrArithmetic(Imm32(BigInt::DigitBits - 1), output);
   4278    masm.jump(&done);
   4279  }
   4280  masm.bind(&shift);
   4281 
   4282  // |x >> -y| is computed as |x << y|.
   4283  Label rightShift;
   4284  masm.branchPtr(Assembler::GreaterThanOrEqual, rhs, Imm32(0), &rightShift);
   4285  {
   4286    masm.movePtr(rhs, scratch1);
   4287    masm.negPtr(scratch1);
   4288    masm.flexibleLshiftPtr(scratch1, output);
   4289 
   4290    // Check for overflow: ((lhs << rhs) >> rhs) == lhs.
   4291    masm.movePtr(output, scratch2);
   4292    masm.flexibleRshiftPtrArithmetic(scratch1, scratch2);
   4293    masm.branchPtr(Assembler::NotEqual, scratch2, lhs, failure->label());
   4294 
   4295    masm.jump(&done);
   4296  }
   4297  masm.bind(&rightShift);
   4298 
   4299  masm.flexibleRshiftPtrArithmetic(rhs, output);
   4300 
   4301  masm.bind(&done);
   4302  return true;
   4303 }
   4304 
   4305 bool CacheIRCompiler::emitBigIntPtrNegation(IntPtrOperandId inputId,
   4306                                            IntPtrOperandId resultId) {
   4307  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4308 
   4309  Register input = allocator.useRegister(masm, inputId);
   4310  Register output = allocator.defineRegister(masm, resultId);
   4311 
   4312  FailurePath* failure;
   4313  if (!addFailurePath(&failure)) {
   4314    return false;
   4315  }
   4316 
   4317  masm.movePtr(input, output);
   4318  masm.branchNegPtr(Assembler::Overflow, output, failure->label());
   4319  return true;
   4320 }
   4321 
   4322 bool CacheIRCompiler::emitBigIntPtrInc(IntPtrOperandId inputId,
   4323                                       IntPtrOperandId resultId) {
   4324  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4325 
   4326  Register input = allocator.useRegister(masm, inputId);
   4327  Register output = allocator.defineRegister(masm, resultId);
   4328 
   4329  FailurePath* failure;
   4330  if (!addFailurePath(&failure)) {
   4331    return false;
   4332  }
   4333 
   4334  masm.movePtr(input, output);
   4335  masm.branchAddPtr(Assembler::Overflow, Imm32(1), output, failure->label());
   4336  return true;
   4337 }
   4338 
   4339 bool CacheIRCompiler::emitBigIntPtrDec(IntPtrOperandId inputId,
   4340                                       IntPtrOperandId resultId) {
   4341  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4342 
   4343  Register input = allocator.useRegister(masm, inputId);
   4344  Register output = allocator.defineRegister(masm, resultId);
   4345 
   4346  FailurePath* failure;
   4347  if (!addFailurePath(&failure)) {
   4348    return false;
   4349  }
   4350 
   4351  masm.movePtr(input, output);
   4352  masm.branchSubPtr(Assembler::Overflow, Imm32(1), output, failure->label());
   4353  return true;
   4354 }
   4355 
   4356 bool CacheIRCompiler::emitBigIntPtrNot(IntPtrOperandId inputId,
   4357                                       IntPtrOperandId resultId) {
   4358  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4359 
   4360  Register input = allocator.useRegister(masm, inputId);
   4361  Register output = allocator.defineRegister(masm, resultId);
   4362 
   4363  masm.movePtr(input, output);
   4364  masm.notPtr(output);
   4365  return true;
   4366 }
   4367 
   4368 bool CacheIRCompiler::emitTruncateDoubleToUInt32(NumberOperandId inputId,
   4369                                                 Int32OperandId resultId) {
   4370  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4371  Register res = allocator.defineRegister(masm, resultId);
   4372 
   4373  AutoScratchFloatRegister floatReg(this);
   4374 
   4375  allocator.ensureDoubleRegister(masm, inputId, floatReg);
   4376 
   4377  TruncateDoubleModUint32(masm, floatReg, res, liveVolatileRegs());
   4378  return true;
   4379 }
   4380 
   4381 bool CacheIRCompiler::emitDoubleToUint8Clamped(NumberOperandId inputId,
   4382                                               Int32OperandId resultId) {
   4383  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4384  Register res = allocator.defineRegister(masm, resultId);
   4385 
   4386  AutoScratchFloatRegister floatReg(this);
   4387 
   4388  allocator.ensureDoubleRegister(masm, inputId, floatReg);
   4389 
   4390  masm.clampDoubleToUint8(floatReg, res);
   4391  return true;
   4392 }
   4393 
   4394 bool CacheIRCompiler::emitLoadArgumentsObjectLengthResult(ObjOperandId objId) {
   4395  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4396  AutoOutputRegister output(*this);
   4397  Register obj = allocator.useRegister(masm, objId);
   4398  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   4399 
   4400  FailurePath* failure;
   4401  if (!addFailurePath(&failure)) {
   4402    return false;
   4403  }
   4404 
   4405  masm.loadArgumentsObjectLength(obj, scratch, failure->label());
   4406 
   4407  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   4408  return true;
   4409 }
   4410 
   4411 bool CacheIRCompiler::emitLoadArgumentsObjectLength(ObjOperandId objId,
   4412                                                    Int32OperandId resultId) {
   4413  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4414  Register obj = allocator.useRegister(masm, objId);
   4415  Register res = allocator.defineRegister(masm, resultId);
   4416 
   4417  FailurePath* failure;
   4418  if (!addFailurePath(&failure)) {
   4419    return false;
   4420  }
   4421 
   4422  masm.loadArgumentsObjectLength(obj, res, failure->label());
   4423  return true;
   4424 }
   4425 
   4426 bool CacheIRCompiler::emitLoadArrayBufferByteLengthInt32Result(
   4427    ObjOperandId objId) {
   4428  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4429  AutoOutputRegister output(*this);
   4430  Register obj = allocator.useRegister(masm, objId);
   4431  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   4432 
   4433  FailurePath* failure;
   4434  if (!addFailurePath(&failure)) {
   4435    return false;
   4436  }
   4437 
   4438  masm.loadArrayBufferByteLengthIntPtr(obj, scratch);
   4439  masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
   4440  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   4441  return true;
   4442 }
   4443 
   4444 bool CacheIRCompiler::emitLoadArrayBufferByteLengthDoubleResult(
   4445    ObjOperandId objId) {
   4446  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4447  AutoOutputRegister output(*this);
   4448  Register obj = allocator.useRegister(masm, objId);
   4449  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   4450 
   4451  ScratchDoubleScope fpscratch(masm);
   4452  masm.loadArrayBufferByteLengthIntPtr(obj, scratch);
   4453  masm.convertIntPtrToDouble(scratch, fpscratch);
   4454  masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
   4455  return true;
   4456 }
   4457 
   4458 bool CacheIRCompiler::emitLoadArrayBufferViewLengthInt32Result(
   4459    ObjOperandId objId) {
   4460  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4461  AutoOutputRegister output(*this);
   4462  Register obj = allocator.useRegister(masm, objId);
   4463  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   4464 
   4465  FailurePath* failure;
   4466  if (!addFailurePath(&failure)) {
   4467    return false;
   4468  }
   4469 
   4470  masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
   4471  masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
   4472  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   4473  return true;
   4474 }
   4475 
   4476 bool CacheIRCompiler::emitLoadArrayBufferViewLengthDoubleResult(
   4477    ObjOperandId objId) {
   4478  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4479  AutoOutputRegister output(*this);
   4480  Register obj = allocator.useRegister(masm, objId);
   4481  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   4482 
   4483  ScratchDoubleScope fpscratch(masm);
   4484  masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
   4485  masm.convertIntPtrToDouble(scratch, fpscratch);
   4486  masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
   4487  return true;
   4488 }
   4489 
   4490 bool CacheIRCompiler::emitLoadArrayBufferViewLength(ObjOperandId objId,
   4491                                                    IntPtrOperandId resultId) {
   4492  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4493  Register obj = allocator.useRegister(masm, objId);
   4494  Register result = allocator.defineRegister(masm, resultId);
   4495 
   4496  masm.loadArrayBufferViewLengthIntPtr(obj, result);
   4497  return true;
   4498 }
   4499 
   4500 bool CacheIRCompiler::emitLoadBoundFunctionNumArgs(ObjOperandId objId,
   4501                                                   Int32OperandId resultId) {
   4502  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4503 
   4504  Register obj = allocator.useRegister(masm, objId);
   4505  Register output = allocator.defineRegister(masm, resultId);
   4506 
   4507  masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
   4508                  output);
   4509  masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
   4510  return true;
   4511 }
   4512 
   4513 bool CacheIRCompiler::emitLoadBoundFunctionTarget(ObjOperandId objId,
   4514                                                  ObjOperandId resultId) {
   4515  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4516 
   4517  Register obj = allocator.useRegister(masm, objId);
   4518  Register output = allocator.defineRegister(masm, resultId);
   4519 
   4520  masm.unboxObject(Address(obj, BoundFunctionObject::offsetOfTargetSlot()),
   4521                   output);
   4522  return true;
   4523 }
   4524 
   4525 bool CacheIRCompiler::emitLoadBoundFunctionArgument(ObjOperandId objId,
   4526                                                    uint32_t index,
   4527                                                    ValOperandId resultId) {
   4528  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4529 
   4530  Register obj = allocator.useRegister(masm, objId);
   4531  ValueOperand output = allocator.defineValueRegister(masm, resultId);
   4532  AutoScratchRegister scratch(allocator, masm);
   4533 
   4534  constexpr size_t inlineArgsOffset =
   4535      BoundFunctionObject::offsetOfFirstInlineBoundArg();
   4536 
   4537  masm.unboxObject(Address(obj, inlineArgsOffset), scratch);
   4538  masm.loadPtr(Address(scratch, NativeObject::offsetOfElements()), scratch);
   4539  masm.loadValue(Address(scratch, index * sizeof(Value)), output);
   4540  return true;
   4541 }
   4542 
   4543 bool CacheIRCompiler::emitGuardBoundFunctionIsConstructor(ObjOperandId objId) {
   4544  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4545 
   4546  Register obj = allocator.useRegister(masm, objId);
   4547 
   4548  FailurePath* failure;
   4549  if (!addFailurePath(&failure)) {
   4550    return false;
   4551  }
   4552 
   4553  Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
   4554  masm.branchTest32(Assembler::Zero, flagsSlot,
   4555                    Imm32(BoundFunctionObject::IsConstructorFlag),
   4556                    failure->label());
   4557  return true;
   4558 }
   4559 
   4560 bool CacheIRCompiler::emitGuardObjectIdentity(ObjOperandId obj1Id,
   4561                                              ObjOperandId obj2Id) {
   4562  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4563 
   4564  Register obj1 = allocator.useRegister(masm, obj1Id);
   4565  Register obj2 = allocator.useRegister(masm, obj2Id);
   4566 
   4567  FailurePath* failure;
   4568  if (!addFailurePath(&failure)) {
   4569    return false;
   4570  }
   4571 
   4572  masm.branchPtr(Assembler::NotEqual, obj1, obj2, failure->label());
   4573  return true;
   4574 }
   4575 
   4576 bool CacheIRCompiler::emitLoadFunctionLengthResult(ObjOperandId objId) {
   4577  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4578  AutoOutputRegister output(*this);
   4579  Register obj = allocator.useRegister(masm, objId);
   4580  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   4581 
   4582  FailurePath* failure;
   4583  if (!addFailurePath(&failure)) {
   4584    return false;
   4585  }
   4586 
   4587  // Get the JSFunction flags and arg count.
   4588  masm.load32(Address(obj, JSFunction::offsetOfFlagsAndArgCount()), scratch);
   4589 
   4590  // Functions with a SelfHostedLazyScript must be compiled with the slow-path
   4591  // before the function length is known. If the length was previously resolved,
   4592  // the length property may be shadowed.
   4593  masm.branchTest32(
   4594      Assembler::NonZero, scratch,
   4595      Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
   4596      failure->label());
   4597 
   4598  masm.loadFunctionLength(obj, scratch, scratch, failure->label());
   4599  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   4600  return true;
   4601 }
   4602 
   4603 bool CacheIRCompiler::emitLoadFunctionNameResult(ObjOperandId objId) {
   4604  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4605  AutoOutputRegister output(*this);
   4606  Register obj = allocator.useRegister(masm, objId);
   4607  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   4608 
   4609  FailurePath* failure;
   4610  if (!addFailurePath(&failure)) {
   4611    return false;
   4612  }
   4613 
   4614  masm.loadFunctionName(obj, scratch, ImmGCPtr(cx_->names().empty_),
   4615                        failure->label());
   4616 
   4617  masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
   4618  return true;
   4619 }
   4620 
   4621 bool CacheIRCompiler::emitBindFunctionResult(ObjOperandId targetId,
   4622                                             uint32_t argc,
   4623                                             uint32_t templateObjectOffset) {
   4624  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4625 
   4626  AutoCallVM callvm(masm, this, allocator);
   4627  AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
   4628 
   4629  Register target = allocator.useRegister(masm, targetId);
   4630 
   4631  callvm.prepare();
   4632 
   4633  if (isBaseline()) {
   4634    // Push the arguments in reverse order.
   4635    for (uint32_t i = 0; i < argc; i++) {
   4636      Address argAddress(FramePointer,
   4637                         BaselineStubFrameLayout::Size() + i * sizeof(Value));
   4638      masm.pushValue(argAddress);
   4639    }
   4640  } else {
   4641    MOZ_ASSERT(argc == 0, "Call ICs not used in ion");
   4642  }
   4643  masm.moveStackPtrTo(scratch.get());
   4644 
   4645  masm.Push(ImmWord(0));  // nullptr for maybeBound
   4646  masm.Push(Imm32(argc));
   4647  masm.Push(scratch);
   4648  masm.Push(target);
   4649 
   4650  using Fn = BoundFunctionObject* (*)(JSContext*, Handle<JSObject*>, Value*,
   4651                                      uint32_t, Handle<BoundFunctionObject*>);
   4652  callvm.call<Fn, BoundFunctionObject::functionBindImpl>();
   4653  return true;
   4654 }
   4655 
   4656 bool CacheIRCompiler::emitSpecializedBindFunctionResult(
   4657    ObjOperandId targetId, uint32_t argc, uint32_t templateObjectOffset) {
   4658  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4659 
   4660  AutoCallVM callvm(masm, this, allocator);
   4661  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, callvm.output());
   4662  AutoScratchRegister scratch2(allocator, masm);
   4663 
   4664  Register target = allocator.useRegister(masm, targetId);
   4665 
   4666  StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
   4667  emitLoadStubField(objectField, scratch2);
   4668 
   4669  callvm.prepare();
   4670 
   4671  if (isBaseline()) {
   4672    // Push the arguments in reverse order.
   4673    for (uint32_t i = 0; i < argc; i++) {
   4674      Address argAddress(FramePointer,
   4675                         BaselineStubFrameLayout::Size() + i * sizeof(Value));
   4676      masm.pushValue(argAddress);
   4677    }
   4678  } else {
   4679    MOZ_ASSERT(argc == 0, "Call ICs not used in ion");
   4680  }
   4681  masm.moveStackPtrTo(scratch1.get());
   4682 
   4683  masm.Push(scratch2);
   4684  masm.Push(Imm32(argc));
   4685  masm.Push(scratch1);
   4686  masm.Push(target);
   4687 
   4688  using Fn = BoundFunctionObject* (*)(JSContext*, Handle<JSObject*>, Value*,
   4689                                      uint32_t, Handle<BoundFunctionObject*>);
   4690  callvm.call<Fn, BoundFunctionObject::functionBindSpecializedBaseline>();
   4691  return true;
   4692 }
   4693 
   4694 bool CacheIRCompiler::emitLinearizeForCharAccess(StringOperandId strId,
   4695                                                 Int32OperandId indexId,
   4696                                                 StringOperandId resultId) {
   4697  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4698  Register str = allocator.useRegister(masm, strId);
   4699  Register index = allocator.useRegister(masm, indexId);
   4700  Register result = allocator.defineRegister(masm, resultId);
   4701  AutoScratchRegister scratch(allocator, masm);
   4702 
   4703  FailurePath* failure;
   4704  if (!addFailurePath(&failure)) {
   4705    return false;
   4706  }
   4707 
   4708  Label done;
   4709  masm.movePtr(str, result);
   4710 
   4711  // We can omit the bounds check, because we only compare the index against the
   4712  // string length. In the worst case we unnecessarily linearize the string
   4713  // when the index is out-of-bounds.
   4714 
   4715  masm.branchIfCanLoadStringChar(str, index, scratch, &done);
   4716  {
   4717    LiveRegisterSet volatileRegs = liveVolatileRegs();
   4718    masm.PushRegsInMask(volatileRegs);
   4719 
   4720    using Fn = JSLinearString* (*)(JSString*);
   4721    masm.setupUnalignedABICall(scratch);
   4722    masm.passABIArg(str);
   4723    masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
   4724    masm.storeCallPointerResult(result);
   4725 
   4726    LiveRegisterSet ignore;
   4727    ignore.add(result);
   4728    masm.PopRegsInMaskIgnore(volatileRegs, ignore);
   4729 
   4730    masm.branchTestPtr(Assembler::Zero, result, result, failure->label());
   4731  }
   4732 
   4733  masm.bind(&done);
   4734  return true;
   4735 }
   4736 
   4737 bool CacheIRCompiler::emitLinearizeForCodePointAccess(
   4738    StringOperandId strId, Int32OperandId indexId, StringOperandId resultId) {
   4739  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4740  Register str = allocator.useRegister(masm, strId);
   4741  Register index = allocator.useRegister(masm, indexId);
   4742  Register result = allocator.defineRegister(masm, resultId);
   4743  AutoScratchRegister scratch1(allocator, masm);
   4744  AutoScratchRegister scratch2(allocator, masm);
   4745 
   4746  FailurePath* failure;
   4747  if (!addFailurePath(&failure)) {
   4748    return false;
   4749  }
   4750 
   4751  Label done;
   4752  masm.movePtr(str, result);
   4753 
   4754  // We can omit the bounds check, because we only compare the index against the
   4755  // string length. In the worst case we unnecessarily linearize the string
   4756  // when the index is out-of-bounds.
   4757 
   4758  masm.branchIfCanLoadStringCodePoint(str, index, scratch1, scratch2, &done);
   4759  {
   4760    LiveRegisterSet volatileRegs = liveVolatileRegs();
   4761    masm.PushRegsInMask(volatileRegs);
   4762 
   4763    using Fn = JSLinearString* (*)(JSString*);
   4764    masm.setupUnalignedABICall(scratch1);
   4765    masm.passABIArg(str);
   4766    masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
   4767    masm.storeCallPointerResult(result);
   4768 
   4769    LiveRegisterSet ignore;
   4770    ignore.add(result);
   4771    masm.PopRegsInMaskIgnore(volatileRegs, ignore);
   4772 
   4773    masm.branchTestPtr(Assembler::Zero, result, result, failure->label());
   4774  }
   4775 
   4776  masm.bind(&done);
   4777  return true;
   4778 }
   4779 
   4780 bool CacheIRCompiler::emitToRelativeStringIndex(Int32OperandId indexId,
   4781                                                StringOperandId strId,
   4782                                                Int32OperandId resultId) {
   4783  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4784  Register index = allocator.useRegister(masm, indexId);
   4785  Register str = allocator.useRegister(masm, strId);
   4786  Register result = allocator.defineRegister(masm, resultId);
   4787 
   4788  // If |index| is non-negative, it's an index relative to the start of the
   4789  // string. Otherwise it's an index relative to the end of the string.
   4790  masm.move32(Imm32(0), result);
   4791  masm.cmp32Load32(Assembler::LessThan, index, Imm32(0),
   4792                   Address(str, JSString::offsetOfLength()), result);
   4793  masm.add32(index, result);
   4794  return true;
   4795 }
   4796 
   4797 bool CacheIRCompiler::emitLoadStringLengthResult(StringOperandId strId) {
   4798  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4799  AutoOutputRegister output(*this);
   4800  Register str = allocator.useRegister(masm, strId);
   4801  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   4802 
   4803  masm.loadStringLength(str, scratch);
   4804  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   4805  return true;
   4806 }
   4807 
   4808 bool CacheIRCompiler::emitLoadStringCharCodeResult(StringOperandId strId,
   4809                                                   Int32OperandId indexId,
   4810                                                   bool handleOOB) {
   4811  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4812  AutoOutputRegister output(*this);
   4813  Register str = allocator.useRegister(masm, strId);
   4814  Register index = allocator.useRegister(masm, indexId);
   4815  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   4816  AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
   4817  AutoScratchRegister scratch3(allocator, masm);
   4818 
   4819  // Bounds check, load string char.
   4820  Label done;
   4821  if (!handleOOB) {
   4822    FailurePath* failure;
   4823    if (!addFailurePath(&failure)) {
   4824      return false;
   4825    }
   4826 
   4827    masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
   4828                              scratch1, failure->label());
   4829    masm.loadStringChar(str, index, scratch1, scratch2, scratch3,
   4830                        failure->label());
   4831  } else {
   4832    // Return NaN for out-of-bounds access.
   4833    masm.moveValue(JS::NaNValue(), output.valueReg());
   4834 
   4835    // The bounds check mustn't use a scratch register which aliases the output.
   4836    MOZ_ASSERT(!output.valueReg().aliases(scratch3));
   4837 
   4838    // This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
   4839    // guaranteed to see no nested ropes.
   4840    Label loadFailed;
   4841    masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
   4842                              scratch3, &done);
   4843    masm.loadStringChar(str, index, scratch1, scratch2, scratch3, &loadFailed);
   4844 
   4845    Label loadedChar;
   4846    masm.jump(&loadedChar);
   4847    masm.bind(&loadFailed);
   4848    masm.assumeUnreachable("loadStringChar can't fail for linear strings");
   4849    masm.bind(&loadedChar);
   4850  }
   4851 
   4852  masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
   4853  masm.bind(&done);
   4854  return true;
   4855 }
   4856 
   4857 bool CacheIRCompiler::emitLoadStringCodePointResult(StringOperandId strId,
   4858                                                    Int32OperandId indexId,
   4859                                                    bool handleOOB) {
   4860  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4861  AutoOutputRegister output(*this);
   4862  Register str = allocator.useRegister(masm, strId);
   4863  Register index = allocator.useRegister(masm, indexId);
   4864  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   4865  AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
   4866  AutoScratchRegister scratch3(allocator, masm);
   4867 
   4868  // Bounds check, load string char.
   4869  Label done;
   4870  if (!handleOOB) {
   4871    FailurePath* failure;
   4872    if (!addFailurePath(&failure)) {
   4873      return false;
   4874    }
   4875 
   4876    masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
   4877                              scratch1, failure->label());
   4878    masm.loadStringCodePoint(str, index, scratch1, scratch2, scratch3,
   4879                             failure->label());
   4880  } else {
   4881    // Return undefined for out-of-bounds access.
   4882    masm.moveValue(JS::UndefinedValue(), output.valueReg());
   4883 
   4884    // The bounds check mustn't use a scratch register which aliases the output.
   4885    MOZ_ASSERT(!output.valueReg().aliases(scratch3));
   4886 
   4887    // This CacheIR op is always preceded by |LinearizeForCodePointAccess|, so
   4888    // we're guaranteed to see no nested ropes or split surrogates.
   4889    Label loadFailed;
   4890    masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
   4891                              scratch3, &done);
   4892    masm.loadStringCodePoint(str, index, scratch1, scratch2, scratch3,
   4893                             &loadFailed);
   4894 
   4895    Label loadedChar;
   4896    masm.jump(&loadedChar);
   4897    masm.bind(&loadFailed);
   4898    masm.assumeUnreachable("loadStringCodePoint can't fail for linear strings");
   4899    masm.bind(&loadedChar);
   4900  }
   4901 
   4902  masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
   4903  masm.bind(&done);
   4904  return true;
   4905 }
   4906 
   4907 bool CacheIRCompiler::emitNewMapObjectResult(uint32_t templateObjectOffset) {
   4908  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4909 
   4910  AutoCallVM callvm(masm, this, allocator);
   4911 
   4912  callvm.prepare();
   4913  masm.Push(ImmPtr(nullptr));  // proto
   4914 
   4915  using Fn = MapObject* (*)(JSContext*, HandleObject);
   4916  callvm.call<Fn, MapObject::create>();
   4917  return true;
   4918 }
   4919 
   4920 bool CacheIRCompiler::emitNewSetObjectResult(uint32_t templateObjectOffset) {
   4921  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4922 
   4923  AutoCallVM callvm(masm, this, allocator);
   4924 
   4925  callvm.prepare();
   4926  masm.Push(ImmPtr(nullptr));  // proto
   4927 
   4928  using Fn = SetObject* (*)(JSContext*, HandleObject);
   4929  callvm.call<Fn, SetObject::create>();
   4930  return true;
   4931 }
   4932 
   4933 bool CacheIRCompiler::emitNewMapObjectFromIterableResult(
   4934    uint32_t templateObjectOffset, ValOperandId iterableId) {
   4935  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4936 
   4937  AutoCallVM callvm(masm, this, allocator);
   4938  ValueOperand iterable = allocator.useValueRegister(masm, iterableId);
   4939 
   4940  callvm.prepare();
   4941  masm.Push(ImmPtr(nullptr));  // allocatedFromJit
   4942  masm.Push(iterable);
   4943  masm.Push(ImmPtr(nullptr));  // proto
   4944 
   4945  using Fn = MapObject* (*)(JSContext*, Handle<JSObject*>, Handle<Value>,
   4946                            Handle<MapObject*>);
   4947  callvm.call<Fn, MapObject::createFromIterable>();
   4948  return true;
   4949 }
   4950 
   4951 bool CacheIRCompiler::emitNewSetObjectFromIterableResult(
   4952    uint32_t templateObjectOffset, ValOperandId iterableId) {
   4953  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4954 
   4955  AutoCallVM callvm(masm, this, allocator);
   4956  ValueOperand iterable = allocator.useValueRegister(masm, iterableId);
   4957 
   4958  callvm.prepare();
   4959  masm.Push(ImmPtr(nullptr));  // allocatedFromJit
   4960  masm.Push(iterable);
   4961  masm.Push(ImmPtr(nullptr));  // proto
   4962 
   4963  using Fn = SetObject* (*)(JSContext*, Handle<JSObject*>, Handle<Value>,
   4964                            Handle<SetObject*>);
   4965  callvm.call<Fn, SetObject::createFromIterable>();
   4966  return true;
   4967 }
   4968 
   4969 bool CacheIRCompiler::emitNewStringObjectResult(uint32_t templateObjectOffset,
   4970                                                StringOperandId strId) {
   4971  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4972 
   4973  AutoCallVM callvm(masm, this, allocator);
   4974 
   4975  Register str = allocator.useRegister(masm, strId);
   4976 
   4977  callvm.prepare();
   4978  masm.Push(str);
   4979 
   4980  using Fn = JSObject* (*)(JSContext*, HandleString);
   4981  callvm.call<Fn, NewStringObject>();
   4982  return true;
   4983 }
   4984 
   4985 bool CacheIRCompiler::emitStringIncludesResult(StringOperandId strId,
   4986                                               StringOperandId searchStrId) {
   4987  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   4988 
   4989  AutoCallVM callvm(masm, this, allocator);
   4990 
   4991  Register str = allocator.useRegister(masm, strId);
   4992  Register searchStr = allocator.useRegister(masm, searchStrId);
   4993 
   4994  callvm.prepare();
   4995  masm.Push(searchStr);
   4996  masm.Push(str);
   4997 
   4998  using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
   4999  callvm.call<Fn, js::StringIncludes>();
   5000  return true;
   5001 }
   5002 
   5003 bool CacheIRCompiler::emitStringIndexOfResult(StringOperandId strId,
   5004                                              StringOperandId searchStrId) {
   5005  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5006 
   5007  AutoCallVM callvm(masm, this, allocator);
   5008 
   5009  Register str = allocator.useRegister(masm, strId);
   5010  Register searchStr = allocator.useRegister(masm, searchStrId);
   5011 
   5012  callvm.prepare();
   5013  masm.Push(searchStr);
   5014  masm.Push(str);
   5015 
   5016  using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
   5017  callvm.call<Fn, js::StringIndexOf>();
   5018  return true;
   5019 }
   5020 
   5021 bool CacheIRCompiler::emitStringLastIndexOfResult(StringOperandId strId,
   5022                                                  StringOperandId searchStrId) {
   5023  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5024 
   5025  AutoCallVM callvm(masm, this, allocator);
   5026 
   5027  Register str = allocator.useRegister(masm, strId);
   5028  Register searchStr = allocator.useRegister(masm, searchStrId);
   5029 
   5030  callvm.prepare();
   5031  masm.Push(searchStr);
   5032  masm.Push(str);
   5033 
   5034  using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
   5035  callvm.call<Fn, js::StringLastIndexOf>();
   5036  return true;
   5037 }
   5038 
   5039 bool CacheIRCompiler::emitStringStartsWithResult(StringOperandId strId,
   5040                                                 StringOperandId searchStrId) {
   5041  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5042 
   5043  AutoCallVM callvm(masm, this, allocator);
   5044 
   5045  Register str = allocator.useRegister(masm, strId);
   5046  Register searchStr = allocator.useRegister(masm, searchStrId);
   5047 
   5048  callvm.prepare();
   5049  masm.Push(searchStr);
   5050  masm.Push(str);
   5051 
   5052  using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
   5053  callvm.call<Fn, js::StringStartsWith>();
   5054  return true;
   5055 }
   5056 
   5057 bool CacheIRCompiler::emitStringEndsWithResult(StringOperandId strId,
   5058                                               StringOperandId searchStrId) {
   5059  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5060 
   5061  AutoCallVM callvm(masm, this, allocator);
   5062 
   5063  Register str = allocator.useRegister(masm, strId);
   5064  Register searchStr = allocator.useRegister(masm, searchStrId);
   5065 
   5066  callvm.prepare();
   5067  masm.Push(searchStr);
   5068  masm.Push(str);
   5069 
   5070  using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
   5071  callvm.call<Fn, js::StringEndsWith>();
   5072  return true;
   5073 }
   5074 
   5075 bool CacheIRCompiler::emitStringToLowerCaseResult(StringOperandId strId) {
   5076  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5077 
   5078  AutoCallVM callvm(masm, this, allocator);
   5079 
   5080  Register str = allocator.useRegister(masm, strId);
   5081 
   5082  callvm.prepare();
   5083  masm.Push(str);
   5084 
   5085  using Fn = JSLinearString* (*)(JSContext*, JSString*);
   5086  callvm.call<Fn, js::StringToLowerCase>();
   5087  return true;
   5088 }
   5089 
   5090 bool CacheIRCompiler::emitStringToUpperCaseResult(StringOperandId strId) {
   5091  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5092 
   5093  AutoCallVM callvm(masm, this, allocator);
   5094 
   5095  Register str = allocator.useRegister(masm, strId);
   5096 
   5097  callvm.prepare();
   5098  masm.Push(str);
   5099 
   5100  using Fn = JSLinearString* (*)(JSContext*, JSString*);
   5101  callvm.call<Fn, js::StringToUpperCase>();
   5102  return true;
   5103 }
   5104 
   5105 bool CacheIRCompiler::emitStringTrimResult(StringOperandId strId) {
   5106  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5107 
   5108  AutoCallVM callvm(masm, this, allocator);
   5109 
   5110  Register str = allocator.useRegister(masm, strId);
   5111 
   5112  callvm.prepare();
   5113  masm.Push(str);
   5114 
   5115  using Fn = JSString* (*)(JSContext*, HandleString);
   5116  callvm.call<Fn, js::StringTrim>();
   5117  return true;
   5118 }
   5119 
   5120 bool CacheIRCompiler::emitStringTrimStartResult(StringOperandId strId) {
   5121  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5122 
   5123  AutoCallVM callvm(masm, this, allocator);
   5124 
   5125  Register str = allocator.useRegister(masm, strId);
   5126 
   5127  callvm.prepare();
   5128  masm.Push(str);
   5129 
   5130  using Fn = JSString* (*)(JSContext*, HandleString);
   5131  callvm.call<Fn, js::StringTrimStart>();
   5132  return true;
   5133 }
   5134 
   5135 bool CacheIRCompiler::emitStringTrimEndResult(StringOperandId strId) {
   5136  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5137 
   5138  AutoCallVM callvm(masm, this, allocator);
   5139 
   5140  Register str = allocator.useRegister(masm, strId);
   5141 
   5142  callvm.prepare();
   5143  masm.Push(str);
   5144 
   5145  using Fn = JSString* (*)(JSContext*, HandleString);
   5146  callvm.call<Fn, js::StringTrimEnd>();
   5147  return true;
   5148 }
   5149 
   5150 bool CacheIRCompiler::emitLoadArgumentsObjectArgResult(ObjOperandId objId,
   5151                                                       Int32OperandId indexId) {
   5152  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5153  AutoOutputRegister output(*this);
   5154  Register obj = allocator.useRegister(masm, objId);
   5155  Register index = allocator.useRegister(masm, indexId);
   5156  AutoScratchRegister scratch(allocator, masm);
   5157 
   5158  FailurePath* failure;
   5159  if (!addFailurePath(&failure)) {
   5160    return false;
   5161  }
   5162 
   5163  masm.loadArgumentsObjectElement(obj, index, output.valueReg(), scratch,
   5164                                  failure->label());
   5165  return true;
   5166 }
   5167 
   5168 bool CacheIRCompiler::emitLoadArgumentsObjectArgHoleResult(
   5169    ObjOperandId objId, Int32OperandId indexId) {
   5170  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5171  AutoOutputRegister output(*this);
   5172  Register obj = allocator.useRegister(masm, objId);
   5173  Register index = allocator.useRegister(masm, indexId);
   5174  AutoScratchRegister scratch(allocator, masm);
   5175 
   5176  FailurePath* failure;
   5177  if (!addFailurePath(&failure)) {
   5178    return false;
   5179  }
   5180 
   5181  masm.loadArgumentsObjectElementHole(obj, index, output.valueReg(), scratch,
   5182                                      failure->label());
   5183  return true;
   5184 }
   5185 
   5186 bool CacheIRCompiler::emitLoadArgumentsObjectArgExistsResult(
   5187    ObjOperandId objId, Int32OperandId indexId) {
   5188  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5189  AutoOutputRegister output(*this);
   5190  Register obj = allocator.useRegister(masm, objId);
   5191  Register index = allocator.useRegister(masm, indexId);
   5192  AutoScratchRegister scratch1(allocator, masm);
   5193  AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
   5194 
   5195  FailurePath* failure;
   5196  if (!addFailurePath(&failure)) {
   5197    return false;
   5198  }
   5199 
   5200  masm.loadArgumentsObjectElementExists(obj, index, scratch2, scratch1,
   5201                                        failure->label());
   5202  EmitStoreResult(masm, scratch2, JSVAL_TYPE_BOOLEAN, output);
   5203  return true;
   5204 }
   5205 
   5206 bool CacheIRCompiler::emitLoadDenseElementResult(ObjOperandId objId,
   5207                                                 Int32OperandId indexId,
   5208                                                 bool expectPackedElements) {
   5209  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5210  AutoOutputRegister output(*this);
   5211  Register obj = allocator.useRegister(masm, objId);
   5212  Register index = allocator.useRegister(masm, indexId);
   5213  AutoScratchRegister scratch1(allocator, masm);
   5214  AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
   5215 
   5216  FailurePath* failure;
   5217  if (!addFailurePath(&failure)) {
   5218    return false;
   5219  }
   5220 
   5221  // Load obj->elements.
   5222  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
   5223 
   5224  // Bounds check.
   5225  Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
   5226  masm.spectreBoundsCheck32(index, initLength, scratch2, failure->label());
   5227 
   5228  if (expectPackedElements) {
   5229    Address flags(scratch1, ObjectElements::offsetOfFlags());
   5230    masm.branchTest32(Assembler::NonZero, flags,
   5231                      Imm32(ObjectElements::NON_PACKED), failure->label());
   5232  }
   5233 
   5234  BaseObjectElementIndex element(scratch1, index);
   5235 
   5236  // If we did not check the packed flag, we must check for a hole value.
   5237  if (!expectPackedElements) {
   5238    masm.branchTestMagic(Assembler::Equal, element, failure->label());
   5239  }
   5240 
   5241  masm.loadTypedOrValue(element, output);
   5242  return true;
   5243 }
   5244 
   5245 bool CacheIRCompiler::emitGuardInt32IsNonNegative(Int32OperandId indexId) {
   5246  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5247  Register index = allocator.useRegister(masm, indexId);
   5248 
   5249  FailurePath* failure;
   5250  if (!addFailurePath(&failure)) {
   5251    return false;
   5252  }
   5253 
   5254  masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
   5255  return true;
   5256 }
   5257 
   5258 bool CacheIRCompiler::emitGuardIntPtrIsNonNegative(IntPtrOperandId indexId) {
   5259  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5260  Register index = allocator.useRegister(masm, indexId);
   5261 
   5262  FailurePath* failure;
   5263  if (!addFailurePath(&failure)) {
   5264    return false;
   5265  }
   5266 
   5267  masm.branchPtr(Assembler::LessThan, index, ImmWord(0), failure->label());
   5268  return true;
   5269 }
   5270 
   5271 bool CacheIRCompiler::emitGuardIndexIsNotDenseElement(ObjOperandId objId,
   5272                                                      Int32OperandId indexId) {
   5273  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5274  Register obj = allocator.useRegister(masm, objId);
   5275  Register index = allocator.useRegister(masm, indexId);
   5276  AutoScratchRegister scratch(allocator, masm);
   5277  AutoSpectreBoundsScratchRegister spectreScratch(allocator, masm);
   5278 
   5279  FailurePath* failure;
   5280  if (!addFailurePath(&failure)) {
   5281    return false;
   5282  }
   5283 
   5284  // Load obj->elements.
   5285  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
   5286 
   5287  // Ensure index >= initLength or the element is a hole.
   5288  Label notDense;
   5289  Address capacity(scratch, ObjectElements::offsetOfInitializedLength());
   5290  masm.spectreBoundsCheck32(index, capacity, spectreScratch, &notDense);
   5291 
   5292  BaseValueIndex element(scratch, index);
   5293  masm.branchTestMagic(Assembler::Equal, element, &notDense);
   5294 
   5295  masm.jump(failure->label());
   5296 
   5297  masm.bind(&notDense);
   5298  return true;
   5299 }
   5300 
   5301 bool CacheIRCompiler::emitGuardIndexIsValidUpdateOrAdd(ObjOperandId objId,
   5302                                                       Int32OperandId indexId) {
   5303  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5304  Register obj = allocator.useRegister(masm, objId);
   5305  Register index = allocator.useRegister(masm, indexId);
   5306  AutoScratchRegister scratch(allocator, masm);
   5307  AutoSpectreBoundsScratchRegister spectreScratch(allocator, masm);
   5308 
   5309  FailurePath* failure;
   5310  if (!addFailurePath(&failure)) {
   5311    return false;
   5312  }
   5313 
   5314  // Load obj->elements.
   5315  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
   5316 
   5317  Label success;
   5318 
   5319  // If length is writable, branch to &success.  All indices are writable.
   5320  Address flags(scratch, ObjectElements::offsetOfFlags());
   5321  masm.branchTest32(Assembler::Zero, flags,
   5322                    Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
   5323                    &success);
   5324 
   5325  // Otherwise, ensure index is in bounds.
   5326  Address length(scratch, ObjectElements::offsetOfLength());
   5327  masm.spectreBoundsCheck32(index, length, spectreScratch,
   5328                            /* failure = */ failure->label());
   5329  masm.bind(&success);
   5330  return true;
   5331 }
   5332 
   5333 bool CacheIRCompiler::emitGuardTagNotEqual(ValueTagOperandId lhsId,
   5334                                           ValueTagOperandId rhsId) {
   5335  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5336  Register lhs = allocator.useRegister(masm, lhsId);
   5337  Register rhs = allocator.useRegister(masm, rhsId);
   5338 
   5339  FailurePath* failure;
   5340  if (!addFailurePath(&failure)) {
   5341    return false;
   5342  }
   5343 
   5344  Label done;
   5345  masm.branch32(Assembler::Equal, lhs, rhs, failure->label());
   5346 
   5347  // If both lhs and rhs are numbers, can't use tag comparison to do inequality
   5348  // comparison
   5349  masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
   5350  masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
   5351  masm.jump(failure->label());
   5352 
   5353  masm.bind(&done);
   5354  return true;
   5355 }
   5356 
   5357 bool CacheIRCompiler::emitGuardXrayExpandoShapeAndDefaultProto(
   5358    ObjOperandId objId, uint32_t shapeWrapperOffset) {
   5359  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5360 
   5361  Register obj = allocator.useRegister(masm, objId);
   5362  StubFieldOffset shapeWrapper(shapeWrapperOffset, StubField::Type::JSObject);
   5363 
   5364  AutoScratchRegister scratch(allocator, masm);
   5365  AutoScratchRegister scratch2(allocator, masm);
   5366  AutoScratchRegister scratch3(allocator, masm);
   5367 
   5368  FailurePath* failure;
   5369  if (!addFailurePath(&failure)) {
   5370    return false;
   5371  }
   5372 
   5373  masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
   5374  Address holderAddress(scratch,
   5375                        sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
   5376  Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(
   5377                                      GetXrayJitInfo()->holderExpandoSlot));
   5378 
   5379  masm.fallibleUnboxObject(holderAddress, scratch, failure->label());
   5380  masm.fallibleUnboxObject(expandoAddress, scratch, failure->label());
   5381 
   5382  // Unwrap the expando before checking its shape.
   5383  masm.loadPtr(Address(scratch, ProxyObject::offsetOfReservedSlots()), scratch);
   5384  masm.unboxObject(
   5385      Address(scratch, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
   5386      scratch);
   5387 
   5388  emitLoadStubField(shapeWrapper, scratch2);
   5389  LoadShapeWrapperContents(masm, scratch2, scratch2, failure->label());
   5390  masm.branchTestObjShape(Assembler::NotEqual, scratch, scratch2, scratch3,
   5391                          scratch, failure->label());
   5392 
   5393  // The reserved slots on the expando should all be in fixed slots.
   5394  Address protoAddress(scratch, NativeObject::getFixedSlotOffset(
   5395                                    GetXrayJitInfo()->expandoProtoSlot));
   5396  masm.branchTestUndefined(Assembler::NotEqual, protoAddress, failure->label());
   5397 
   5398  return true;
   5399 }
   5400 
   5401 bool CacheIRCompiler::emitGuardXrayNoExpando(ObjOperandId objId) {
   5402  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5403 
   5404  Register obj = allocator.useRegister(masm, objId);
   5405  AutoScratchRegister scratch(allocator, masm);
   5406 
   5407  FailurePath* failure;
   5408  if (!addFailurePath(&failure)) {
   5409    return false;
   5410  }
   5411 
   5412  masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
   5413  Address holderAddress(scratch,
   5414                        sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
   5415  Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(
   5416                                      GetXrayJitInfo()->holderExpandoSlot));
   5417 
   5418  Label done;
   5419  masm.fallibleUnboxObject(holderAddress, scratch, &done);
   5420  masm.branchTestObject(Assembler::Equal, expandoAddress, failure->label());
   5421  masm.bind(&done);
   5422 
   5423  return true;
   5424 }
   5425 
   5426 bool CacheIRCompiler::emitGuardNoAllocationMetadataBuilder(
   5427    uint32_t builderAddrOffset) {
   5428  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5429  AutoScratchRegister scratch(allocator, masm);
   5430 
   5431  FailurePath* failure;
   5432  if (!addFailurePath(&failure)) {
   5433    return false;
   5434  }
   5435 
   5436  StubFieldOffset builderField(builderAddrOffset, StubField::Type::RawPointer);
   5437  emitLoadStubField(builderField, scratch);
   5438  masm.branchPtr(Assembler::NotEqual, Address(scratch, 0), ImmWord(0),
   5439                 failure->label());
   5440 
   5441  return true;
   5442 }
   5443 
   5444 bool CacheIRCompiler::emitGuardFunctionHasJitEntry(ObjOperandId funId) {
   5445  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5446  Register fun = allocator.useRegister(masm, funId);
   5447 
   5448  FailurePath* failure;
   5449  if (!addFailurePath(&failure)) {
   5450    return false;
   5451  }
   5452 
   5453  masm.branchIfFunctionHasNoJitEntry(fun, failure->label());
   5454  return true;
   5455 }
   5456 
   5457 bool CacheIRCompiler::emitGuardFunctionHasNoJitEntry(ObjOperandId funId) {
   5458  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5459  Register obj = allocator.useRegister(masm, funId);
   5460  AutoScratchRegister scratch(allocator, masm);
   5461 
   5462  FailurePath* failure;
   5463  if (!addFailurePath(&failure)) {
   5464    return false;
   5465  }
   5466 
   5467  masm.branchIfFunctionHasJitEntry(obj, failure->label());
   5468  return true;
   5469 }
   5470 
   5471 bool CacheIRCompiler::emitGuardFunctionIsNonBuiltinCtor(ObjOperandId funId) {
   5472  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5473 
   5474  Register fun = allocator.useRegister(masm, funId);
   5475  AutoScratchRegister scratch(allocator, masm);
   5476 
   5477  FailurePath* failure;
   5478  if (!addFailurePath(&failure)) {
   5479    return false;
   5480  }
   5481 
   5482  masm.branchIfNotFunctionIsNonBuiltinCtor(fun, scratch, failure->label());
   5483  return true;
   5484 }
   5485 
   5486 bool CacheIRCompiler::emitGuardFunctionIsConstructor(ObjOperandId funId) {
   5487  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5488  Register funcReg = allocator.useRegister(masm, funId);
   5489  AutoScratchRegister scratch(allocator, masm);
   5490 
   5491  FailurePath* failure;
   5492  if (!addFailurePath(&failure)) {
   5493    return false;
   5494  }
   5495 
   5496  // Ensure obj is a constructor
   5497  masm.branchTestFunctionFlags(funcReg, FunctionFlags::CONSTRUCTOR,
   5498                               Assembler::Zero, failure->label());
   5499  return true;
   5500 }
   5501 
   5502 bool CacheIRCompiler::emitGuardNotClassConstructor(ObjOperandId funId) {
   5503  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5504  Register fun = allocator.useRegister(masm, funId);
   5505  AutoScratchRegister scratch(allocator, masm);
   5506 
   5507  FailurePath* failure;
   5508  if (!addFailurePath(&failure)) {
   5509    return false;
   5510  }
   5511 
   5512  masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
   5513                          fun, scratch, failure->label());
   5514  return true;
   5515 }
   5516 
   5517 bool CacheIRCompiler::emitGuardArrayIsPacked(ObjOperandId arrayId) {
   5518  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5519  Register array = allocator.useRegister(masm, arrayId);
   5520  AutoScratchRegister scratch(allocator, masm);
   5521  AutoScratchRegister scratch2(allocator, masm);
   5522 
   5523  FailurePath* failure;
   5524  if (!addFailurePath(&failure)) {
   5525    return false;
   5526  }
   5527 
   5528  masm.branchArrayIsNotPacked(array, scratch, scratch2, failure->label());
   5529  return true;
   5530 }
   5531 
   5532 bool CacheIRCompiler::emitGuardArgumentsObjectFlags(ObjOperandId objId,
   5533                                                    uint8_t flags) {
   5534  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5535  Register obj = allocator.useRegister(masm, objId);
   5536  AutoScratchRegister scratch(allocator, masm);
   5537 
   5538  FailurePath* failure;
   5539  if (!addFailurePath(&failure)) {
   5540    return false;
   5541  }
   5542 
   5543  masm.branchTestArgumentsObjectFlags(obj, scratch, flags, Assembler::NonZero,
   5544                                      failure->label());
   5545  return true;
   5546 }
   5547 
   5548 bool CacheIRCompiler::emitGuardObjectHasSameRealm(ObjOperandId objId) {
   5549  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5550 
   5551  Register obj = allocator.useRegister(masm, objId);
   5552  AutoScratchRegister scratch(allocator, masm);
   5553 
   5554  FailurePath* failure;
   5555  if (!addFailurePath(&failure)) {
   5556    return false;
   5557  }
   5558 
   5559  masm.guardObjectHasSameRealm(obj, scratch, failure->label());
   5560  return true;
   5561 }
   5562 
   5563 bool CacheIRCompiler::emitLoadDenseElementHoleResult(ObjOperandId objId,
   5564                                                     Int32OperandId indexId) {
   5565  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5566  AutoOutputRegister output(*this);
   5567  Register obj = allocator.useRegister(masm, objId);
   5568  Register index = allocator.useRegister(masm, indexId);
   5569  AutoScratchRegister scratch1(allocator, masm);
   5570  AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
   5571 
   5572  FailurePath* failure;
   5573  if (!addFailurePath(&failure)) {
   5574    return false;
   5575  }
   5576 
   5577  // Make sure the index is nonnegative.
   5578  masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
   5579 
   5580  // Load obj->elements.
   5581  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
   5582 
   5583  // Guard on the initialized length.
   5584  Label hole;
   5585  Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
   5586  masm.spectreBoundsCheck32(index, initLength, scratch2, &hole);
   5587 
   5588  // Load the value.
   5589  Label done;
   5590  masm.loadValue(BaseObjectElementIndex(scratch1, index), output.valueReg());
   5591  masm.branchTestMagic(Assembler::NotEqual, output.valueReg(), &done);
   5592 
   5593  // Load undefined for the hole.
   5594  masm.bind(&hole);
   5595  masm.moveValue(UndefinedValue(), output.valueReg());
   5596 
   5597  masm.bind(&done);
   5598  return true;
   5599 }
   5600 
   5601 bool CacheIRCompiler::emitLoadTypedArrayElementExistsResult(
   5602    ObjOperandId objId, IntPtrOperandId indexId, ArrayBufferViewKind viewKind) {
   5603  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5604  AutoOutputRegister output(*this);
   5605  Register obj = allocator.useRegister(masm, objId);
   5606  Register index = allocator.useRegister(masm, indexId);
   5607  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   5608  Maybe<AutoScratchRegister> scratch2;
   5609  if (viewKind == ArrayBufferViewKind::Resizable) {
   5610    scratch2.emplace(allocator, masm);
   5611  }
   5612 
   5613  Label outOfBounds, done;
   5614 
   5615  // Bounds check.
   5616  if (viewKind == ArrayBufferViewKind::FixedLength ||
   5617      viewKind == ArrayBufferViewKind::Immutable) {
   5618    masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
   5619  } else {
   5620    // Bounds check doesn't require synchronization. See IsValidIntegerIndex
   5621    // abstract operation which reads the underlying buffer byte length using
   5622    // "unordered" memory order.
   5623    auto sync = Synchronization::None();
   5624 
   5625    masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch, *scratch2);
   5626  }
   5627  masm.branchPtr(Assembler::BelowOrEqual, scratch, index, &outOfBounds);
   5628  EmitStoreBoolean(masm, true, output);
   5629  masm.jump(&done);
   5630 
   5631  masm.bind(&outOfBounds);
   5632  EmitStoreBoolean(masm, false, output);
   5633 
   5634  masm.bind(&done);
   5635  return true;
   5636 }
   5637 
   5638 bool CacheIRCompiler::emitLoadDenseElementExistsResult(ObjOperandId objId,
   5639                                                       Int32OperandId indexId) {
   5640  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5641  AutoOutputRegister output(*this);
   5642  Register obj = allocator.useRegister(masm, objId);
   5643  Register index = allocator.useRegister(masm, indexId);
   5644  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   5645 
   5646  FailurePath* failure;
   5647  if (!addFailurePath(&failure)) {
   5648    return false;
   5649  }
   5650 
   5651  // Load obj->elements.
   5652  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
   5653 
   5654  // Bounds check. Unsigned compare sends negative indices to next IC.
   5655  Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
   5656  masm.branch32(Assembler::BelowOrEqual, initLength, index, failure->label());
   5657 
   5658  // Hole check.
   5659  BaseObjectElementIndex element(scratch, index);
   5660  masm.branchTestMagic(Assembler::Equal, element, failure->label());
   5661 
   5662  EmitStoreBoolean(masm, true, output);
   5663  return true;
   5664 }
   5665 
   5666 bool CacheIRCompiler::emitLoadDenseElementHoleExistsResult(
   5667    ObjOperandId objId, Int32OperandId indexId) {
   5668  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5669  AutoOutputRegister output(*this);
   5670  Register obj = allocator.useRegister(masm, objId);
   5671  Register index = allocator.useRegister(masm, indexId);
   5672  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   5673 
   5674  FailurePath* failure;
   5675  if (!addFailurePath(&failure)) {
   5676    return false;
   5677  }
   5678 
   5679  // Make sure the index is nonnegative.
   5680  masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
   5681 
   5682  // Load obj->elements.
   5683  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
   5684 
   5685  // Guard on the initialized length.
   5686  Label hole;
   5687  Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
   5688  masm.branch32(Assembler::BelowOrEqual, initLength, index, &hole);
   5689 
   5690  // Load value and replace with true.
   5691  Label done;
   5692  BaseObjectElementIndex element(scratch, index);
   5693  masm.branchTestMagic(Assembler::Equal, element, &hole);
   5694  EmitStoreBoolean(masm, true, output);
   5695  masm.jump(&done);
   5696 
   5697  // Load false for the hole.
   5698  masm.bind(&hole);
   5699  EmitStoreBoolean(masm, false, output);
   5700 
   5701  masm.bind(&done);
   5702  return true;
   5703 }
   5704 
   5705 bool CacheIRCompiler::emitPackedArrayPopResult(ObjOperandId arrayId) {
   5706  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5707 
   5708  AutoOutputRegister output(*this);
   5709  Register array = allocator.useRegister(masm, arrayId);
   5710  AutoScratchRegister scratch1(allocator, masm);
   5711  AutoScratchRegister scratch2(allocator, masm);
   5712 
   5713  FailurePath* failure;
   5714  if (!addFailurePath(&failure)) {
   5715    return false;
   5716  }
   5717 
   5718  masm.packedArrayPop(array, output.valueReg(), scratch1, scratch2,
   5719                      failure->label());
   5720  return true;
   5721 }
   5722 
   5723 bool CacheIRCompiler::emitPackedArrayShiftResult(ObjOperandId arrayId) {
   5724  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5725 
   5726  AutoOutputRegister output(*this);
   5727  Register array = allocator.useRegister(masm, arrayId);
   5728  AutoScratchRegister scratch1(allocator, masm);
   5729  AutoScratchRegister scratch2(allocator, masm);
   5730 
   5731  FailurePath* failure;
   5732  if (!addFailurePath(&failure)) {
   5733    return false;
   5734  }
   5735 
   5736  masm.packedArrayShift(array, output.valueReg(), scratch1, scratch2,
   5737                        liveVolatileRegs(), failure->label());
   5738  return true;
   5739 }
   5740 
   5741 bool CacheIRCompiler::emitIsObjectResult(ValOperandId inputId) {
   5742  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5743 
   5744  AutoOutputRegister output(*this);
   5745  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   5746 
   5747  ValueOperand val = allocator.useValueRegister(masm, inputId);
   5748 
   5749  masm.testObjectSet(Assembler::Equal, val, scratch);
   5750 
   5751  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
   5752  return true;
   5753 }
   5754 
   5755 bool CacheIRCompiler::emitIsPackedArrayResult(ObjOperandId objId) {
   5756  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5757 
   5758  AutoOutputRegister output(*this);
   5759  Register obj = allocator.useRegister(masm, objId);
   5760  AutoScratchRegister scratch(allocator, masm);
   5761 
   5762  Register outputScratch = output.valueReg().scratchReg();
   5763  masm.setIsPackedArray(obj, outputScratch, scratch);
   5764  masm.tagValue(JSVAL_TYPE_BOOLEAN, outputScratch, output.valueReg());
   5765  return true;
   5766 }
   5767 
   5768 bool CacheIRCompiler::emitIsCallableResult(ValOperandId inputId) {
   5769  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5770 
   5771  AutoOutputRegister output(*this);
   5772  AutoScratchRegister scratch1(allocator, masm);
   5773  AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
   5774 
   5775  ValueOperand val = allocator.useValueRegister(masm, inputId);
   5776 
   5777  Label isObject, done;
   5778  masm.branchTestObject(Assembler::Equal, val, &isObject);
   5779  // Primitives are never callable.
   5780  masm.move32(Imm32(0), scratch2);
   5781  masm.jump(&done);
   5782 
   5783  masm.bind(&isObject);
   5784  masm.unboxObject(val, scratch1);
   5785 
   5786  Label isProxy;
   5787  masm.isCallable(scratch1, scratch2, &isProxy);
   5788  masm.jump(&done);
   5789 
   5790  masm.bind(&isProxy);
   5791  {
   5792    LiveRegisterSet volatileRegs = liveVolatileRegs();
   5793    masm.PushRegsInMask(volatileRegs);
   5794 
   5795    using Fn = bool (*)(JSObject* obj);
   5796    masm.setupUnalignedABICall(scratch2);
   5797    masm.passABIArg(scratch1);
   5798    masm.callWithABI<Fn, ObjectIsCallable>();
   5799    masm.storeCallBoolResult(scratch2);
   5800 
   5801    LiveRegisterSet ignore;
   5802    ignore.add(scratch2);
   5803    masm.PopRegsInMaskIgnore(volatileRegs, ignore);
   5804  }
   5805 
   5806  masm.bind(&done);
   5807  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
   5808  return true;
   5809 }
   5810 
   5811 bool CacheIRCompiler::emitIsConstructorResult(ObjOperandId objId) {
   5812  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5813 
   5814  AutoOutputRegister output(*this);
   5815  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   5816 
   5817  Register obj = allocator.useRegister(masm, objId);
   5818 
   5819  Label isProxy, done;
   5820  masm.isConstructor(obj, scratch, &isProxy);
   5821  masm.jump(&done);
   5822 
   5823  masm.bind(&isProxy);
   5824  {
   5825    LiveRegisterSet volatileRegs = liveVolatileRegs();
   5826    masm.PushRegsInMask(volatileRegs);
   5827 
   5828    using Fn = bool (*)(JSObject* obj);
   5829    masm.setupUnalignedABICall(scratch);
   5830    masm.passABIArg(obj);
   5831    masm.callWithABI<Fn, ObjectIsConstructor>();
   5832    masm.storeCallBoolResult(scratch);
   5833 
   5834    LiveRegisterSet ignore;
   5835    ignore.add(scratch);
   5836    masm.PopRegsInMaskIgnore(volatileRegs, ignore);
   5837  }
   5838 
   5839  masm.bind(&done);
   5840  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
   5841  return true;
   5842 }
   5843 
   5844 bool CacheIRCompiler::emitIsCrossRealmArrayConstructorResult(
   5845    ObjOperandId objId) {
   5846  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5847 
   5848  AutoOutputRegister output(*this);
   5849  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   5850  Register obj = allocator.useRegister(masm, objId);
   5851 
   5852  masm.setIsCrossRealmArrayConstructor(obj, scratch);
   5853  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
   5854  return true;
   5855 }
   5856 
   5857 bool CacheIRCompiler::emitArrayBufferViewByteOffsetInt32Result(
   5858    ObjOperandId objId) {
   5859  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5860 
   5861  AutoOutputRegister output(*this);
   5862  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   5863  Register obj = allocator.useRegister(masm, objId);
   5864 
   5865  FailurePath* failure;
   5866  if (!addFailurePath(&failure)) {
   5867    return false;
   5868  }
   5869 
   5870  masm.loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
   5871  masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
   5872  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   5873  return true;
   5874 }
   5875 
   5876 bool CacheIRCompiler::emitArrayBufferViewByteOffsetDoubleResult(
   5877    ObjOperandId objId) {
   5878  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5879 
   5880  AutoOutputRegister output(*this);
   5881  Register obj = allocator.useRegister(masm, objId);
   5882  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   5883 
   5884  ScratchDoubleScope fpscratch(masm);
   5885  masm.loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
   5886  masm.convertIntPtrToDouble(scratch, fpscratch);
   5887  masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
   5888  return true;
   5889 }
   5890 
   5891 bool CacheIRCompiler::emitTypedArrayByteLengthInt32Result(ObjOperandId objId) {
   5892  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5893 
   5894  AutoOutputRegister output(*this);
   5895  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   5896  AutoScratchRegister scratch2(allocator, masm);
   5897  Register obj = allocator.useRegister(masm, objId);
   5898 
   5899  FailurePath* failure;
   5900  if (!addFailurePath(&failure)) {
   5901    return false;
   5902  }
   5903 
   5904  masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
   5905  masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
   5906  masm.typedArrayElementSize(obj, scratch2);
   5907 
   5908  masm.branchMul32(Assembler::Overflow, scratch2.get(), scratch1,
   5909                   failure->label());
   5910 
   5911  masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
   5912  return true;
   5913 }
   5914 
   5915 bool CacheIRCompiler::emitTypedArrayByteLengthDoubleResult(ObjOperandId objId) {
   5916  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5917 
   5918  AutoOutputRegister output(*this);
   5919  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   5920  AutoScratchRegister scratch2(allocator, masm);
   5921  Register obj = allocator.useRegister(masm, objId);
   5922 
   5923  masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
   5924  masm.typedArrayElementSize(obj, scratch2);
   5925  masm.mulPtr(scratch2, scratch1);
   5926 
   5927  ScratchDoubleScope fpscratch(masm);
   5928  masm.convertIntPtrToDouble(scratch1, fpscratch);
   5929  masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
   5930  return true;
   5931 }
   5932 
   5933 bool CacheIRCompiler::emitResizableTypedArrayByteLengthInt32Result(
   5934    ObjOperandId objId) {
   5935  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5936 
   5937  AutoOutputRegister output(*this);
   5938  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   5939  AutoScratchRegister scratch2(allocator, masm);
   5940  Register obj = allocator.useRegister(masm, objId);
   5941 
   5942  FailurePath* failure;
   5943  if (!addFailurePath(&failure)) {
   5944    return false;
   5945  }
   5946 
   5947  // Explicit |byteLength| accesses are seq-consistent atomic loads.
   5948  auto sync = Synchronization::Load();
   5949 
   5950  masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
   5951  masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
   5952  masm.typedArrayElementSize(obj, scratch2);
   5953 
   5954  masm.branchMul32(Assembler::Overflow, scratch2.get(), scratch1,
   5955                   failure->label());
   5956 
   5957  masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
   5958  return true;
   5959 }
   5960 
   5961 bool CacheIRCompiler::emitResizableTypedArrayByteLengthDoubleResult(
   5962    ObjOperandId objId) {
   5963  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5964 
   5965  AutoOutputRegister output(*this);
   5966  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   5967  AutoScratchRegister scratch2(allocator, masm);
   5968  Register obj = allocator.useRegister(masm, objId);
   5969 
   5970  // Explicit |byteLength| accesses are seq-consistent atomic loads.
   5971  auto sync = Synchronization::Load();
   5972 
   5973  masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
   5974  masm.typedArrayElementSize(obj, scratch2);
   5975  masm.mulPtr(scratch2, scratch1);
   5976 
   5977  ScratchDoubleScope fpscratch(masm);
   5978  masm.convertIntPtrToDouble(scratch1, fpscratch);
   5979  masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
   5980  return true;
   5981 }
   5982 
   5983 bool CacheIRCompiler::emitResizableTypedArrayLengthInt32Result(
   5984    ObjOperandId objId) {
   5985  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   5986 
   5987  AutoOutputRegister output(*this);
   5988  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   5989  AutoScratchRegister scratch2(allocator, masm);
   5990  Register obj = allocator.useRegister(masm, objId);
   5991 
   5992  FailurePath* failure;
   5993  if (!addFailurePath(&failure)) {
   5994    return false;
   5995  }
   5996 
   5997  // Explicit |length| accesses are seq-consistent atomic loads.
   5998  auto sync = Synchronization::Load();
   5999 
   6000  masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
   6001  masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
   6002 
   6003  masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
   6004  return true;
   6005 }
   6006 
   6007 bool CacheIRCompiler::emitResizableTypedArrayLengthDoubleResult(
   6008    ObjOperandId objId) {
   6009  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6010 
   6011  AutoOutputRegister output(*this);
   6012  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   6013  AutoScratchRegister scratch2(allocator, masm);
   6014  Register obj = allocator.useRegister(masm, objId);
   6015 
   6016  // Explicit |length| accesses are seq-consistent atomic loads.
   6017  auto sync = Synchronization::Load();
   6018 
   6019  masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
   6020 
   6021  ScratchDoubleScope fpscratch(masm);
   6022  masm.convertIntPtrToDouble(scratch1, fpscratch);
   6023  masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
   6024  return true;
   6025 }
   6026 
   6027 bool CacheIRCompiler::emitResizableDataViewByteLengthInt32Result(
   6028    ObjOperandId objId) {
   6029  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6030 
   6031  AutoOutputRegister output(*this);
   6032  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   6033  AutoScratchRegister scratch2(allocator, masm);
   6034  Register obj = allocator.useRegister(masm, objId);
   6035 
   6036  FailurePath* failure;
   6037  if (!addFailurePath(&failure)) {
   6038    return false;
   6039  }
   6040 
   6041  // Explicit |byteLength| accesses are seq-consistent atomic loads.
   6042  auto sync = Synchronization::Load();
   6043 
   6044  masm.loadResizableDataViewByteLengthIntPtr(sync, obj, scratch1, scratch2);
   6045  masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
   6046 
   6047  masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
   6048  return true;
   6049 }
   6050 
   6051 bool CacheIRCompiler::emitResizableDataViewByteLengthDoubleResult(
   6052    ObjOperandId objId) {
   6053  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6054 
   6055  AutoOutputRegister output(*this);
   6056  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   6057  AutoScratchRegister scratch2(allocator, masm);
   6058  Register obj = allocator.useRegister(masm, objId);
   6059 
   6060  // Explicit |byteLength| accesses are seq-consistent atomic loads.
   6061  auto sync = Synchronization::Load();
   6062 
   6063  masm.loadResizableDataViewByteLengthIntPtr(sync, obj, scratch1, scratch2);
   6064 
   6065  ScratchDoubleScope fpscratch(masm);
   6066  masm.convertIntPtrToDouble(scratch1, fpscratch);
   6067  masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
   6068  return true;
   6069 }
   6070 
   6071 bool CacheIRCompiler::emitGrowableSharedArrayBufferByteLengthInt32Result(
   6072    ObjOperandId objId) {
   6073  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6074 
   6075  AutoOutputRegister output(*this);
   6076  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   6077  Register obj = allocator.useRegister(masm, objId);
   6078 
   6079  FailurePath* failure;
   6080  if (!addFailurePath(&failure)) {
   6081    return false;
   6082  }
   6083 
   6084  // Explicit |byteLength| accesses are seq-consistent atomic loads.
   6085  auto sync = Synchronization::Load();
   6086 
   6087  masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, scratch);
   6088  masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
   6089 
   6090  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   6091  return true;
   6092 }
   6093 
   6094 bool CacheIRCompiler::emitGrowableSharedArrayBufferByteLengthDoubleResult(
   6095    ObjOperandId objId) {
   6096  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6097 
   6098  AutoOutputRegister output(*this);
   6099  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   6100  Register obj = allocator.useRegister(masm, objId);
   6101 
   6102  // Explicit |byteLength| accesses are seq-consistent atomic loads.
   6103  auto sync = Synchronization::Load();
   6104 
   6105  masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, scratch);
   6106 
   6107  ScratchDoubleScope fpscratch(masm);
   6108  masm.convertIntPtrToDouble(scratch, fpscratch);
   6109  masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
   6110  return true;
   6111 }
   6112 
   6113 bool CacheIRCompiler::emitGuardHasAttachedArrayBuffer(ObjOperandId objId) {
   6114  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6115 
   6116  AutoScratchRegister scratch(allocator, masm);
   6117  Register obj = allocator.useRegister(masm, objId);
   6118 
   6119  FailurePath* failure;
   6120  if (!addFailurePath(&failure)) {
   6121    return false;
   6122  }
   6123 
   6124  masm.branchIfHasDetachedArrayBuffer(obj, scratch, failure->label());
   6125  return true;
   6126 }
   6127 
   6128 bool CacheIRCompiler::emitGuardResizableArrayBufferViewInBounds(
   6129    ObjOperandId objId) {
   6130  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6131 
   6132  AutoScratchRegister scratch(allocator, masm);
   6133  Register obj = allocator.useRegister(masm, objId);
   6134 
   6135  FailurePath* failure;
   6136  if (!addFailurePath(&failure)) {
   6137    return false;
   6138  }
   6139 
   6140  masm.branchIfResizableArrayBufferViewOutOfBounds(obj, scratch,
   6141                                                   failure->label());
   6142  return true;
   6143 }
   6144 
   6145 bool CacheIRCompiler::emitGuardResizableArrayBufferViewInBoundsOrDetached(
   6146    ObjOperandId objId) {
   6147  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6148 
   6149  AutoScratchRegister scratch(allocator, masm);
   6150  Register obj = allocator.useRegister(masm, objId);
   6151 
   6152  FailurePath* failure;
   6153  if (!addFailurePath(&failure)) {
   6154    return false;
   6155  }
   6156 
   6157  Label done;
   6158  masm.branchIfResizableArrayBufferViewInBounds(obj, scratch, &done);
   6159  masm.branchIfHasAttachedArrayBuffer(obj, scratch, failure->label());
   6160  masm.bind(&done);
   6161  return true;
   6162 }
   6163 
   6164 bool CacheIRCompiler::emitIsTypedArrayConstructorResult(ObjOperandId objId) {
   6165  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6166 
   6167  AutoOutputRegister output(*this);
   6168  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   6169  Register obj = allocator.useRegister(masm, objId);
   6170 
   6171  masm.setIsDefinitelyTypedArrayConstructor(obj, scratch);
   6172  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
   6173  return true;
   6174 }
   6175 
   6176 bool CacheIRCompiler::emitTypedArrayFillResult(ObjOperandId objId,
   6177                                               uint32_t fillValueId,
   6178                                               IntPtrOperandId startId,
   6179                                               IntPtrOperandId endId,
   6180                                               Scalar::Type elementType) {
   6181  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6182 
   6183  AutoOutputRegister output(*this);
   6184 #ifdef JS_CODEGEN_X86
   6185  // Use a scratch register to avoid running out of registers.
   6186  Register obj = output.valueReg().typeReg();
   6187  allocator.copyToScratchRegister(masm, objId, obj);
   6188 #else
   6189  Register obj = allocator.useRegister(masm, objId);
   6190 #endif
   6191  Register start = allocator.useRegister(masm, startId);
   6192  Register end = allocator.useRegister(masm, endId);
   6193 
   6194  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   6195  MOZ_ASSERT(scratch.get() != obj, "output.scratchReg must not be typeReg");
   6196 
   6197  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   6198 
   6199  Maybe<Register> fillValue;
   6200  if (Scalar::isBigIntType(elementType)) {
   6201    fillValue.emplace(
   6202        allocator.useRegister(masm, BigIntOperandId(fillValueId)));
   6203  } else if (Scalar::isFloatingType(elementType)) {
   6204    allocator.ensureDoubleRegister(masm, NumberOperandId(fillValueId),
   6205                                   floatScratch0);
   6206  } else {
   6207    fillValue.emplace(allocator.useRegister(masm, Int32OperandId(fillValueId)));
   6208  }
   6209 
   6210  LiveRegisterSet save = liveVolatileRegs();
   6211  save.takeUnchecked(scratch);
   6212  masm.PushRegsInMask(save);
   6213 
   6214  masm.setupUnalignedABICall(scratch);
   6215 
   6216  masm.passABIArg(obj);
   6217  if (Scalar::isFloatingType(elementType)) {
   6218    masm.passABIArg(floatScratch0, ABIType::Float64);
   6219  } else {
   6220    masm.passABIArg(*fillValue);
   6221  }
   6222  masm.passABIArg(start);
   6223  masm.passABIArg(end);
   6224 
   6225  if (Scalar::isBigIntType(elementType)) {
   6226    using Fn = void (*)(TypedArrayObject*, BigInt*, intptr_t, intptr_t);
   6227    masm.callWithABI<Fn, js::TypedArrayFillBigInt>();
   6228  } else if (Scalar::isFloatingType(elementType)) {
   6229    using Fn = void (*)(TypedArrayObject*, double, intptr_t, intptr_t);
   6230    masm.callWithABI<Fn, js::TypedArrayFillDouble>();
   6231  } else {
   6232    using Fn = void (*)(TypedArrayObject*, int32_t, intptr_t, intptr_t);
   6233    masm.callWithABI<Fn, js::TypedArrayFillInt32>();
   6234  }
   6235 
   6236  masm.PopRegsInMask(save);
   6237 
   6238  masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
   6239  return true;
   6240 }
   6241 
   6242 bool CacheIRCompiler::emitTypedArraySetResult(ObjOperandId targetId,
   6243                                              ObjOperandId sourceId,
   6244                                              IntPtrOperandId offsetId,
   6245                                              bool canUseBitwiseCopy) {
   6246  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6247 
   6248  Maybe<AutoOutputRegister> output;
   6249  Maybe<AutoCallVM> callvm;
   6250  if (canUseBitwiseCopy) {
   6251    output.emplace(*this);
   6252  } else {
   6253    callvm.emplace(masm, this, allocator);
   6254  }
   6255  Register target = allocator.useRegister(masm, targetId);
   6256  Register source = allocator.useRegister(masm, sourceId);
   6257  Register offset = allocator.useRegister(masm, offsetId);
   6258 
   6259  const auto& eitherOutput = output ? *output : callvm->output();
   6260  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, eitherOutput);
   6261  AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, eitherOutput);
   6262 
   6263  FailurePath* failure;
   6264  if (!addFailurePath(&failure)) {
   6265    return false;
   6266  }
   6267 
   6268  // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
   6269  // we can't use both at the same time. This isn't an issue here, because Ion
   6270  // doesn't support CallICs. If that ever changes, this code must be updated.
   6271  MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
   6272 
   6273  // Ensure `offset <= target.length`.
   6274  masm.loadArrayBufferViewLengthIntPtr(target, scratch1);
   6275  masm.branchSubPtr(Assembler::Signed, offset, scratch1.get(),
   6276                    failure->label());
   6277 
   6278  // Ensure `source.length <= (target.length - offset)`.
   6279  masm.loadArrayBufferViewLengthIntPtr(source, scratch2);
   6280  masm.branchPtr(Assembler::GreaterThan, scratch2, scratch1, failure->label());
   6281 
   6282  // Bit-wise copying is infallible because it doesn't need to allocate any
   6283  // temporary memory, even if the underlying buffers are the same.
   6284  if (canUseBitwiseCopy) {
   6285    LiveRegisterSet save = liveVolatileRegs();
   6286    save.takeUnchecked(scratch1);
   6287    save.takeUnchecked(scratch2);
   6288    masm.PushRegsInMask(save);
   6289 
   6290    using Fn = void (*)(TypedArrayObject*, TypedArrayObject*, intptr_t);
   6291    masm.setupUnalignedABICall(scratch1);
   6292 
   6293    masm.passABIArg(target);
   6294    masm.passABIArg(source);
   6295    masm.passABIArg(offset);
   6296    masm.callWithABI<Fn, js::TypedArraySetInfallible>();
   6297 
   6298    masm.PopRegsInMask(save);
   6299  } else {
   6300    callvm->prepare();
   6301 
   6302    masm.Push(offset);
   6303    masm.Push(source);
   6304    masm.Push(target);
   6305 
   6306    using Fn =
   6307        bool (*)(JSContext* cx, TypedArrayObject*, TypedArrayObject*, intptr_t);
   6308    callvm->callNoResult<Fn, js::TypedArraySet>();
   6309  }
   6310 
   6311  masm.moveValue(UndefinedValue(), eitherOutput.valueReg());
   6312  return true;
   6313 }
   6314 
   6315 bool CacheIRCompiler::emitTypedArraySubarrayResult(
   6316    uint32_t templateObjectOffset, ObjOperandId objId, IntPtrOperandId startId,
   6317    IntPtrOperandId endId) {
   6318  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6319 
   6320  AutoCallVM callvm(masm, this, allocator);
   6321 
   6322  Register obj = allocator.useRegister(masm, objId);
   6323  Register start = allocator.useRegister(masm, startId);
   6324  Register end = allocator.useRegister(masm, endId);
   6325 
   6326  callvm.prepare();
   6327  masm.Push(end);
   6328  masm.Push(start);
   6329  masm.Push(obj);
   6330 
   6331  using Fn = TypedArrayObject* (*)(JSContext*, Handle<TypedArrayObject*>,
   6332                                   intptr_t, intptr_t);
   6333  callvm.call<Fn, js::TypedArraySubarray>();
   6334  return true;
   6335 }
   6336 
   6337 bool CacheIRCompiler::emitGetNextMapSetEntryForIteratorResult(
   6338    ObjOperandId iterId, ObjOperandId resultArrId, bool isMap) {
   6339  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6340 
   6341  AutoOutputRegister output(*this);
   6342  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   6343  Register iter = allocator.useRegister(masm, iterId);
   6344  Register resultArr = allocator.useRegister(masm, resultArrId);
   6345 
   6346  LiveRegisterSet save = liveVolatileRegs();
   6347  save.takeUnchecked(output.valueReg());
   6348  save.takeUnchecked(scratch);
   6349  masm.PushRegsInMask(save);
   6350 
   6351  masm.setupUnalignedABICall(scratch);
   6352  masm.passABIArg(iter);
   6353  masm.passABIArg(resultArr);
   6354  if (isMap) {
   6355    using Fn = bool (*)(MapIteratorObject* iter, ArrayObject* resultPairObj);
   6356    masm.callWithABI<Fn, MapIteratorObject::next>();
   6357  } else {
   6358    using Fn = bool (*)(SetIteratorObject* iter, ArrayObject* resultObj);
   6359    masm.callWithABI<Fn, SetIteratorObject::next>();
   6360  }
   6361  masm.storeCallBoolResult(scratch);
   6362 
   6363  masm.PopRegsInMask(save);
   6364 
   6365  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
   6366  return true;
   6367 }
   6368 
   6369 void CacheIRCompiler::emitActivateIterator(Register objBeingIterated,
   6370                                           Register iterObject,
   6371                                           Register nativeIter,
   6372                                           Register scratch, Register scratch2,
   6373                                           uint32_t enumeratorsAddrOffset) {
   6374  // 'objectBeingIterated_' must be nullptr, so we don't need a pre-barrier.
   6375  Address iterObjAddr(nativeIter,
   6376                      NativeIterator::offsetOfObjectBeingIterated());
   6377 #ifdef DEBUG
   6378  Label ok;
   6379  masm.branchPtr(Assembler::Equal, iterObjAddr, ImmPtr(nullptr), &ok);
   6380  masm.assumeUnreachable("iterator with non-null object");
   6381  masm.bind(&ok);
   6382 #endif
   6383 
   6384  // Mark iterator as active.
   6385  Address iterFlagsAddr(nativeIter, NativeIterator::offsetOfFlags());
   6386  masm.storePtr(objBeingIterated, iterObjAddr);
   6387  masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
   6388 
   6389  // Post-write barrier for stores to 'objectBeingIterated_'.
   6390  emitPostBarrierSlot(
   6391      iterObject,
   6392      TypedOrValueRegister(MIRType::Object, AnyRegister(objBeingIterated)),
   6393      scratch);
   6394 
   6395  // Chain onto the active iterator stack.
   6396  StubFieldOffset enumeratorsAddr(enumeratorsAddrOffset,
   6397                                  StubField::Type::RawPointer);
   6398  emitLoadStubField(enumeratorsAddr, scratch);
   6399  masm.registerIterator(scratch, nativeIter, scratch2);
   6400 }
   6401 
   6402 bool CacheIRCompiler::emitObjectToIteratorResult(
   6403    ObjOperandId objId, uint32_t enumeratorsAddrOffset) {
   6404  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6405 
   6406  AutoCallVM callvm(masm, this, allocator);
   6407  Register obj = allocator.useRegister(masm, objId);
   6408 
   6409  AutoScratchRegister iterObj(allocator, masm);
   6410  AutoScratchRegister scratch(allocator, masm);
   6411  AutoScratchRegisterMaybeOutput scratch2(allocator, masm, callvm.output());
   6412  AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, callvm.output());
   6413 
   6414  Label callVM, done;
   6415  masm.maybeLoadIteratorFromShape(obj, iterObj, scratch, scratch2, scratch3,
   6416                                  &callVM, /* exclusive = */ true);
   6417 
   6418  masm.loadPrivate(
   6419      Address(iterObj, PropertyIteratorObject::offsetOfIteratorSlot()),
   6420      scratch);
   6421 
   6422  emitActivateIterator(obj, iterObj, scratch, scratch2, scratch3,
   6423                       enumeratorsAddrOffset);
   6424  masm.jump(&done);
   6425 
   6426  masm.bind(&callVM);
   6427  callvm.prepare();
   6428  masm.Push(obj);
   6429  using Fn = PropertyIteratorObject* (*)(JSContext*, HandleObject);
   6430  callvm.call<Fn, GetIterator>();
   6431  masm.storeCallPointerResult(iterObj);
   6432 
   6433  masm.bind(&done);
   6434  EmitStoreResult(masm, iterObj, JSVAL_TYPE_OBJECT, callvm.output());
   6435  return true;
   6436 }
   6437 
   6438 bool CacheIRCompiler::emitValueToIteratorResult(ValOperandId valId) {
   6439  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6440 
   6441  AutoCallVM callvm(masm, this, allocator);
   6442 
   6443  ValueOperand val = allocator.useValueRegister(masm, valId);
   6444 
   6445  callvm.prepare();
   6446 
   6447  masm.Push(val);
   6448 
   6449  using Fn = PropertyIteratorObject* (*)(JSContext*, HandleValue);
   6450  callvm.call<Fn, ValueToIterator>();
   6451  return true;
   6452 }
   6453 
   6454 bool CacheIRCompiler::emitNewArrayIteratorResult(
   6455    uint32_t templateObjectOffset) {
   6456  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6457 
   6458  AutoCallVM callvm(masm, this, allocator);
   6459 
   6460  callvm.prepare();
   6461 
   6462  using Fn = ArrayIteratorObject* (*)(JSContext*);
   6463  callvm.call<Fn, NewArrayIterator>();
   6464  return true;
   6465 }
   6466 
   6467 bool CacheIRCompiler::emitNewStringIteratorResult(
   6468    uint32_t templateObjectOffset) {
   6469  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6470 
   6471  AutoCallVM callvm(masm, this, allocator);
   6472 
   6473  callvm.prepare();
   6474 
   6475  using Fn = StringIteratorObject* (*)(JSContext*);
   6476  callvm.call<Fn, NewStringIterator>();
   6477  return true;
   6478 }
   6479 
   6480 bool CacheIRCompiler::emitNewRegExpStringIteratorResult(
   6481    uint32_t templateObjectOffset) {
   6482  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6483 
   6484  AutoCallVM callvm(masm, this, allocator);
   6485 
   6486  callvm.prepare();
   6487 
   6488  using Fn = RegExpStringIteratorObject* (*)(JSContext*);
   6489  callvm.call<Fn, NewRegExpStringIterator>();
   6490  return true;
   6491 }
   6492 
   6493 bool CacheIRCompiler::emitObjectCreateResult(uint32_t templateObjectOffset) {
   6494  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6495 
   6496  AutoCallVM callvm(masm, this, allocator);
   6497  AutoScratchRegister scratch(allocator, masm);
   6498 
   6499  StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
   6500  emitLoadStubField(objectField, scratch);
   6501 
   6502  callvm.prepare();
   6503  masm.Push(scratch);
   6504 
   6505  using Fn = PlainObject* (*)(JSContext*, Handle<PlainObject*>);
   6506  callvm.call<Fn, ObjectCreateWithTemplate>();
   6507  return true;
   6508 }
   6509 
   6510 bool CacheIRCompiler::emitObjectKeysResult(ObjOperandId objId,
   6511                                           uint32_t resultShapeOffset) {
   6512  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6513 
   6514  AutoCallVM callvm(masm, this, allocator);
   6515  Register obj = allocator.useRegister(masm, objId);
   6516 
   6517  // Our goal is only to record calls to Object.keys, to elide it when
   6518  // partially used, not to provide an alternative implementation.
   6519  {
   6520    callvm.prepare();
   6521    masm.Push(obj);
   6522 
   6523    using Fn = JSObject* (*)(JSContext*, HandleObject);
   6524    callvm.call<Fn, jit::ObjectKeys>();
   6525  }
   6526 
   6527  return true;
   6528 }
   6529 
   6530 bool CacheIRCompiler::emitNewArrayFromLengthResult(
   6531    uint32_t templateObjectOffset, Int32OperandId lengthId,
   6532    uint32_t siteOffset) {
   6533  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6534 
   6535  AutoCallVM callvm(masm, this, allocator);
   6536  AutoScratchRegister scratch(allocator, masm);
   6537  AutoScratchRegister scratch2(allocator, masm);
   6538  Register length = allocator.useRegister(masm, lengthId);
   6539 
   6540  StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
   6541  emitLoadStubField(objectField, scratch);
   6542 
   6543  StubFieldOffset siteField(siteOffset, StubField::Type::AllocSite);
   6544  emitLoadStubField(siteField, scratch2);
   6545 
   6546  callvm.prepare();
   6547  masm.Push(scratch2);
   6548  masm.Push(length);
   6549  masm.Push(scratch);
   6550 
   6551  using Fn = ArrayObject* (*)(JSContext*, Handle<ArrayObject*>, int32_t,
   6552                              gc::AllocSite*);
   6553  callvm.call<Fn, ArrayConstructorOneArg>();
   6554  return true;
   6555 }
   6556 
   6557 bool CacheIRCompiler::emitNewTypedArrayFromLengthResult(
   6558    uint32_t templateObjectOffset, Int32OperandId lengthId) {
   6559  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6560 
   6561  AutoCallVM callvm(masm, this, allocator);
   6562  AutoScratchRegister scratch(allocator, masm);
   6563  Register length = allocator.useRegister(masm, lengthId);
   6564 
   6565  StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
   6566  emitLoadStubField(objectField, scratch);
   6567 
   6568  callvm.prepare();
   6569  masm.Push(length);
   6570  masm.Push(scratch);
   6571 
   6572  using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, int32_t length);
   6573  callvm.call<Fn, NewTypedArrayWithTemplateAndLength>();
   6574  return true;
   6575 }
   6576 
   6577 bool CacheIRCompiler::emitNewTypedArrayFromArrayBufferResult(
   6578    uint32_t templateObjectOffset, ObjOperandId bufferId,
   6579    ValOperandId byteOffsetId, ValOperandId lengthId) {
   6580  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6581 
   6582 #ifdef JS_CODEGEN_X86
   6583  MOZ_CRASH("Instruction not supported on 32-bit x86, not enough registers");
   6584 #endif
   6585 
   6586  AutoCallVM callvm(masm, this, allocator);
   6587  AutoScratchRegister scratch(allocator, masm);
   6588  Register buffer = allocator.useRegister(masm, bufferId);
   6589  ValueOperand byteOffset = allocator.useValueRegister(masm, byteOffsetId);
   6590  ValueOperand length = allocator.useValueRegister(masm, lengthId);
   6591 
   6592  StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
   6593  emitLoadStubField(objectField, scratch);
   6594 
   6595  callvm.prepare();
   6596  masm.Push(length);
   6597  masm.Push(byteOffset);
   6598  masm.Push(buffer);
   6599  masm.Push(scratch);
   6600 
   6601  using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject,
   6602                                   HandleValue, HandleValue);
   6603  callvm.call<Fn, NewTypedArrayWithTemplateAndBuffer>();
   6604  return true;
   6605 }
   6606 
   6607 bool CacheIRCompiler::emitNewTypedArrayFromArrayResult(
   6608    uint32_t templateObjectOffset, ObjOperandId arrayId) {
   6609  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6610 
   6611  AutoCallVM callvm(masm, this, allocator);
   6612  AutoScratchRegister scratch(allocator, masm);
   6613  Register array = allocator.useRegister(masm, arrayId);
   6614 
   6615  StubFieldOffset objectField(templateObjectOffset, StubField::Type::JSObject);
   6616  emitLoadStubField(objectField, scratch);
   6617 
   6618  callvm.prepare();
   6619  masm.Push(array);
   6620  masm.Push(scratch);
   6621 
   6622  using Fn = TypedArrayObject* (*)(JSContext*, HandleObject, HandleObject);
   6623  callvm.call<Fn, NewTypedArrayWithTemplateAndArray>();
   6624  return true;
   6625 }
   6626 
   6627 bool CacheIRCompiler::emitAddSlotAndCallAddPropHook(ObjOperandId objId,
   6628                                                    ValOperandId rhsId,
   6629                                                    uint32_t newShapeOffset) {
   6630  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6631 
   6632  AutoCallVM callvm(masm, this, allocator);
   6633 
   6634  AutoScratchRegister scratch(allocator, masm);
   6635  Register obj = allocator.useRegister(masm, objId);
   6636  ValueOperand rhs = allocator.useValueRegister(masm, rhsId);
   6637 
   6638  StubFieldOffset shapeField(newShapeOffset, StubField::Type::Shape);
   6639  emitLoadStubField(shapeField, scratch);
   6640 
   6641  callvm.prepare();
   6642 
   6643  masm.Push(scratch);
   6644  masm.Push(rhs);
   6645  masm.Push(obj);
   6646 
   6647  using Fn =
   6648      bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, Handle<Shape*>);
   6649  callvm.callNoResult<Fn, AddSlotAndCallAddPropHook>();
   6650  return true;
   6651 }
   6652 
   6653 bool CacheIRCompiler::emitMathAbsInt32Result(Int32OperandId inputId) {
   6654  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6655 
   6656  AutoOutputRegister output(*this);
   6657  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   6658 
   6659  Register input = allocator.useRegister(masm, inputId);
   6660 
   6661  FailurePath* failure;
   6662  if (!addFailurePath(&failure)) {
   6663    return false;
   6664  }
   6665 
   6666  masm.mov(input, scratch);
   6667  // Don't negate already positive values.
   6668  Label positive;
   6669  masm.branchTest32(Assembler::NotSigned, scratch, scratch, &positive);
   6670  // neg32 might overflow for INT_MIN.
   6671  masm.branchNeg32(Assembler::Overflow, scratch, failure->label());
   6672  masm.bind(&positive);
   6673 
   6674  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   6675  return true;
   6676 }
   6677 
   6678 bool CacheIRCompiler::emitMathAbsNumberResult(NumberOperandId inputId) {
   6679  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6680 
   6681  AutoOutputRegister output(*this);
   6682  AutoAvailableFloatRegister scratch(*this, FloatReg0);
   6683 
   6684  allocator.ensureDoubleRegister(masm, inputId, scratch);
   6685 
   6686  masm.absDouble(scratch, scratch);
   6687  masm.boxDouble(scratch, output.valueReg(), scratch);
   6688  return true;
   6689 }
   6690 
   6691 bool CacheIRCompiler::emitMathClz32Result(Int32OperandId inputId) {
   6692  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6693 
   6694  AutoOutputRegister output(*this);
   6695  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   6696  Register input = allocator.useRegister(masm, inputId);
   6697 
   6698  masm.clz32(input, scratch, /* knownNotZero = */ false);
   6699  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   6700  return true;
   6701 }
   6702 
   6703 bool CacheIRCompiler::emitMathSignInt32Result(Int32OperandId inputId) {
   6704  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6705 
   6706  AutoOutputRegister output(*this);
   6707  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   6708  Register input = allocator.useRegister(masm, inputId);
   6709 
   6710  masm.signInt32(input, scratch);
   6711  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   6712  return true;
   6713 }
   6714 
   6715 bool CacheIRCompiler::emitMathSignNumberResult(NumberOperandId inputId) {
   6716  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6717 
   6718  AutoOutputRegister output(*this);
   6719  AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
   6720  AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
   6721 
   6722  allocator.ensureDoubleRegister(masm, inputId, floatScratch1);
   6723 
   6724  masm.signDouble(floatScratch1, floatScratch2);
   6725  masm.boxDouble(floatScratch2, output.valueReg(), floatScratch2);
   6726  return true;
   6727 }
   6728 
   6729 bool CacheIRCompiler::emitMathSignNumberToInt32Result(NumberOperandId inputId) {
   6730  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6731 
   6732  AutoOutputRegister output(*this);
   6733  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   6734  AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
   6735  AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
   6736 
   6737  FailurePath* failure;
   6738  if (!addFailurePath(&failure)) {
   6739    return false;
   6740  }
   6741 
   6742  allocator.ensureDoubleRegister(masm, inputId, floatScratch1);
   6743 
   6744  masm.signDoubleToInt32(floatScratch1, scratch, floatScratch2,
   6745                         failure->label());
   6746  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   6747  return true;
   6748 }
   6749 
   6750 bool CacheIRCompiler::emitMathImulResult(Int32OperandId lhsId,
   6751                                         Int32OperandId rhsId) {
   6752  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6753 
   6754  AutoOutputRegister output(*this);
   6755  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   6756  Register lhs = allocator.useRegister(masm, lhsId);
   6757  Register rhs = allocator.useRegister(masm, rhsId);
   6758 
   6759  masm.mov(lhs, scratch);
   6760  masm.mul32(rhs, scratch);
   6761  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   6762  return true;
   6763 }
   6764 
   6765 bool CacheIRCompiler::emitMathSqrtNumberResult(NumberOperandId inputId) {
   6766  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6767 
   6768  AutoOutputRegister output(*this);
   6769  AutoAvailableFloatRegister scratch(*this, FloatReg0);
   6770 
   6771  allocator.ensureDoubleRegister(masm, inputId, scratch);
   6772 
   6773  masm.sqrtDouble(scratch, scratch);
   6774  masm.boxDouble(scratch, output.valueReg(), scratch);
   6775  return true;
   6776 }
   6777 
   6778 bool CacheIRCompiler::emitMathFloorNumberResult(NumberOperandId inputId) {
   6779  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6780 
   6781  AutoOutputRegister output(*this);
   6782  AutoAvailableFloatRegister scratch(*this, FloatReg0);
   6783 
   6784  allocator.ensureDoubleRegister(masm, inputId, scratch);
   6785 
   6786  if (Assembler::HasRoundInstruction(RoundingMode::Down)) {
   6787    masm.nearbyIntDouble(RoundingMode::Down, scratch, scratch);
   6788    masm.boxDouble(scratch, output.valueReg(), scratch);
   6789    return true;
   6790  }
   6791 
   6792  return emitMathFunctionNumberResultShared(UnaryMathFunction::Floor, scratch,
   6793                                            output.valueReg());
   6794 }
   6795 
   6796 bool CacheIRCompiler::emitMathCeilNumberResult(NumberOperandId inputId) {
   6797  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6798 
   6799  AutoOutputRegister output(*this);
   6800  AutoAvailableFloatRegister scratch(*this, FloatReg0);
   6801 
   6802  allocator.ensureDoubleRegister(masm, inputId, scratch);
   6803 
   6804  if (Assembler::HasRoundInstruction(RoundingMode::Up)) {
   6805    masm.nearbyIntDouble(RoundingMode::Up, scratch, scratch);
   6806    masm.boxDouble(scratch, output.valueReg(), scratch);
   6807    return true;
   6808  }
   6809 
   6810  return emitMathFunctionNumberResultShared(UnaryMathFunction::Ceil, scratch,
   6811                                            output.valueReg());
   6812 }
   6813 
   6814 bool CacheIRCompiler::emitMathTruncNumberResult(NumberOperandId inputId) {
   6815  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6816 
   6817  AutoOutputRegister output(*this);
   6818  AutoAvailableFloatRegister scratch(*this, FloatReg0);
   6819 
   6820  allocator.ensureDoubleRegister(masm, inputId, scratch);
   6821 
   6822  if (Assembler::HasRoundInstruction(RoundingMode::TowardsZero)) {
   6823    masm.nearbyIntDouble(RoundingMode::TowardsZero, scratch, scratch);
   6824    masm.boxDouble(scratch, output.valueReg(), scratch);
   6825    return true;
   6826  }
   6827 
   6828  return emitMathFunctionNumberResultShared(UnaryMathFunction::Trunc, scratch,
   6829                                            output.valueReg());
   6830 }
   6831 
   6832 bool CacheIRCompiler::emitMathRoundNumberResult(NumberOperandId inputId) {
   6833  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6834 
   6835  AutoOutputRegister output(*this);
   6836  AutoAvailableFloatRegister scratch0(*this, FloatReg0);
   6837  AutoAvailableFloatRegister scratch1(*this, FloatReg1);
   6838 
   6839  allocator.ensureDoubleRegister(masm, inputId, scratch0);
   6840 
   6841  if (Assembler::HasRoundInstruction(RoundingMode::Up)) {
   6842    masm.roundDouble(scratch0, scratch1);
   6843    masm.boxDouble(scratch1, output.valueReg(), scratch1);
   6844    return true;
   6845  }
   6846 
   6847  return emitMathFunctionNumberResultShared(UnaryMathFunction::Round, scratch0,
   6848                                            output.valueReg());
   6849 }
   6850 
   6851 bool CacheIRCompiler::emitMathFRoundNumberResult(NumberOperandId inputId) {
   6852  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6853 
   6854  AutoOutputRegister output(*this);
   6855  AutoAvailableFloatRegister scratch(*this, FloatReg0);
   6856  FloatRegister scratchFloat32 = scratch.get().asSingle();
   6857 
   6858  allocator.ensureDoubleRegister(masm, inputId, scratch);
   6859 
   6860  masm.convertDoubleToFloat32(scratch, scratchFloat32);
   6861  masm.convertFloat32ToDouble(scratchFloat32, scratch);
   6862 
   6863  masm.boxDouble(scratch, output.valueReg(), scratch);
   6864  return true;
   6865 }
   6866 
   6867 bool CacheIRCompiler::emitMathF16RoundNumberResult(NumberOperandId inputId) {
   6868  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6869 
   6870  AutoOutputRegister output(*this);
   6871  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   6872  AutoAvailableFloatRegister floatScratch(*this, FloatReg0);
   6873 
   6874  allocator.ensureDoubleRegister(masm, inputId, floatScratch);
   6875 
   6876  if (MacroAssembler::SupportsFloat64To16()) {
   6877    masm.convertDoubleToFloat16(floatScratch, floatScratch);
   6878    masm.convertFloat16ToDouble(floatScratch, floatScratch);
   6879  } else {
   6880    LiveRegisterSet save = liveVolatileRegs();
   6881    save.takeUnchecked(floatScratch);
   6882    masm.PushRegsInMask(save);
   6883 
   6884    using Fn = double (*)(double);
   6885    masm.setupUnalignedABICall(scratch);
   6886    masm.passABIArg(floatScratch, ABIType::Float64);
   6887    masm.callWithABI<Fn, js::RoundFloat16>(ABIType::Float64);
   6888    masm.storeCallFloatResult(floatScratch);
   6889 
   6890    masm.PopRegsInMask(save);
   6891  }
   6892 
   6893  masm.boxDouble(floatScratch, output.valueReg(), floatScratch);
   6894  return true;
   6895 }
   6896 
   6897 bool CacheIRCompiler::emitMathHypot2NumberResult(NumberOperandId first,
   6898                                                 NumberOperandId second) {
   6899  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6900  AutoOutputRegister output(*this);
   6901  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   6902 
   6903  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   6904  AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
   6905 
   6906  allocator.ensureDoubleRegister(masm, first, floatScratch0);
   6907  allocator.ensureDoubleRegister(masm, second, floatScratch1);
   6908 
   6909  LiveRegisterSet save = liveVolatileRegs();
   6910  masm.PushRegsInMask(save);
   6911 
   6912  using Fn = double (*)(double x, double y);
   6913  masm.setupUnalignedABICall(scratch);
   6914  masm.passABIArg(floatScratch0, ABIType::Float64);
   6915  masm.passABIArg(floatScratch1, ABIType::Float64);
   6916 
   6917  masm.callWithABI<Fn, ecmaHypot>(ABIType::Float64);
   6918  masm.storeCallFloatResult(floatScratch0);
   6919 
   6920  LiveRegisterSet ignore;
   6921  ignore.add(floatScratch0);
   6922  masm.PopRegsInMaskIgnore(save, ignore);
   6923 
   6924  masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
   6925  return true;
   6926 }
   6927 
   6928 bool CacheIRCompiler::emitMathHypot3NumberResult(NumberOperandId first,
   6929                                                 NumberOperandId second,
   6930                                                 NumberOperandId third) {
   6931  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6932  AutoOutputRegister output(*this);
   6933  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   6934 
   6935  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   6936  AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
   6937  AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
   6938 
   6939  allocator.ensureDoubleRegister(masm, first, floatScratch0);
   6940  allocator.ensureDoubleRegister(masm, second, floatScratch1);
   6941  allocator.ensureDoubleRegister(masm, third, floatScratch2);
   6942 
   6943  LiveRegisterSet save = liveVolatileRegs();
   6944  masm.PushRegsInMask(save);
   6945 
   6946  using Fn = double (*)(double x, double y, double z);
   6947  masm.setupUnalignedABICall(scratch);
   6948  masm.passABIArg(floatScratch0, ABIType::Float64);
   6949  masm.passABIArg(floatScratch1, ABIType::Float64);
   6950  masm.passABIArg(floatScratch2, ABIType::Float64);
   6951 
   6952  masm.callWithABI<Fn, hypot3>(ABIType::Float64);
   6953  masm.storeCallFloatResult(floatScratch0);
   6954 
   6955  LiveRegisterSet ignore;
   6956  ignore.add(floatScratch0);
   6957  masm.PopRegsInMaskIgnore(save, ignore);
   6958 
   6959  masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
   6960  return true;
   6961 }
   6962 
   6963 bool CacheIRCompiler::emitMathHypot4NumberResult(NumberOperandId first,
   6964                                                 NumberOperandId second,
   6965                                                 NumberOperandId third,
   6966                                                 NumberOperandId fourth) {
   6967  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   6968  AutoOutputRegister output(*this);
   6969  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   6970 
   6971  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   6972  AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
   6973  AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
   6974  AutoAvailableFloatRegister floatScratch3(*this, FloatReg3);
   6975 
   6976  allocator.ensureDoubleRegister(masm, first, floatScratch0);
   6977  allocator.ensureDoubleRegister(masm, second, floatScratch1);
   6978  allocator.ensureDoubleRegister(masm, third, floatScratch2);
   6979  allocator.ensureDoubleRegister(masm, fourth, floatScratch3);
   6980 
   6981  LiveRegisterSet save = liveVolatileRegs();
   6982  masm.PushRegsInMask(save);
   6983 
   6984  using Fn = double (*)(double x, double y, double z, double w);
   6985  masm.setupUnalignedABICall(scratch);
   6986  masm.passABIArg(floatScratch0, ABIType::Float64);
   6987  masm.passABIArg(floatScratch1, ABIType::Float64);
   6988  masm.passABIArg(floatScratch2, ABIType::Float64);
   6989  masm.passABIArg(floatScratch3, ABIType::Float64);
   6990 
   6991  masm.callWithABI<Fn, hypot4>(ABIType::Float64);
   6992  masm.storeCallFloatResult(floatScratch0);
   6993 
   6994  LiveRegisterSet ignore;
   6995  ignore.add(floatScratch0);
   6996  masm.PopRegsInMaskIgnore(save, ignore);
   6997 
   6998  masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
   6999  return true;
   7000 }
   7001 
   7002 bool CacheIRCompiler::emitMathAtan2NumberResult(NumberOperandId yId,
   7003                                                NumberOperandId xId) {
   7004  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7005  AutoOutputRegister output(*this);
   7006  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   7007 
   7008  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   7009  AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
   7010 
   7011  allocator.ensureDoubleRegister(masm, yId, floatScratch0);
   7012  allocator.ensureDoubleRegister(masm, xId, floatScratch1);
   7013 
   7014  LiveRegisterSet save = liveVolatileRegs();
   7015  masm.PushRegsInMask(save);
   7016 
   7017  using Fn = double (*)(double x, double y);
   7018  masm.setupUnalignedABICall(scratch);
   7019  masm.passABIArg(floatScratch0, ABIType::Float64);
   7020  masm.passABIArg(floatScratch1, ABIType::Float64);
   7021  masm.callWithABI<Fn, js::ecmaAtan2>(ABIType::Float64);
   7022  masm.storeCallFloatResult(floatScratch0);
   7023 
   7024  LiveRegisterSet ignore;
   7025  ignore.add(floatScratch0);
   7026  masm.PopRegsInMaskIgnore(save, ignore);
   7027 
   7028  masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
   7029 
   7030  return true;
   7031 }
   7032 
   7033 bool CacheIRCompiler::emitMathFloorToInt32Result(NumberOperandId inputId) {
   7034  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7035 
   7036  AutoOutputRegister output(*this);
   7037  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   7038 
   7039  AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
   7040 
   7041  FailurePath* failure;
   7042  if (!addFailurePath(&failure)) {
   7043    return false;
   7044  }
   7045 
   7046  allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
   7047 
   7048  masm.floorDoubleToInt32(scratchFloat, scratch, failure->label());
   7049 
   7050  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   7051  return true;
   7052 }
   7053 
   7054 bool CacheIRCompiler::emitMathCeilToInt32Result(NumberOperandId inputId) {
   7055  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7056 
   7057  AutoOutputRegister output(*this);
   7058  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   7059 
   7060  AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
   7061 
   7062  FailurePath* failure;
   7063  if (!addFailurePath(&failure)) {
   7064    return false;
   7065  }
   7066 
   7067  allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
   7068 
   7069  masm.ceilDoubleToInt32(scratchFloat, scratch, failure->label());
   7070 
   7071  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   7072  return true;
   7073 }
   7074 
   7075 bool CacheIRCompiler::emitMathTruncToInt32Result(NumberOperandId inputId) {
   7076  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7077 
   7078  AutoOutputRegister output(*this);
   7079  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   7080 
   7081  AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
   7082 
   7083  FailurePath* failure;
   7084  if (!addFailurePath(&failure)) {
   7085    return false;
   7086  }
   7087 
   7088  allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
   7089 
   7090  masm.truncDoubleToInt32(scratchFloat, scratch, failure->label());
   7091 
   7092  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   7093  return true;
   7094 }
   7095 
   7096 bool CacheIRCompiler::emitMathRoundToInt32Result(NumberOperandId inputId) {
   7097  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7098 
   7099  AutoOutputRegister output(*this);
   7100  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   7101 
   7102  AutoAvailableFloatRegister scratchFloat0(*this, FloatReg0);
   7103  AutoAvailableFloatRegister scratchFloat1(*this, FloatReg1);
   7104 
   7105  FailurePath* failure;
   7106  if (!addFailurePath(&failure)) {
   7107    return false;
   7108  }
   7109 
   7110  allocator.ensureDoubleRegister(masm, inputId, scratchFloat0);
   7111 
   7112  masm.roundDoubleToInt32(scratchFloat0, scratch, scratchFloat1,
   7113                          failure->label());
   7114 
   7115  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   7116  return true;
   7117 }
   7118 
   7119 bool CacheIRCompiler::emitMathRandomResult(uint32_t rngOffset) {
   7120  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7121 
   7122  AutoOutputRegister output(*this);
   7123  AutoScratchRegister scratch1(allocator, masm);
   7124  AutoScratchRegister64 scratch2(allocator, masm);
   7125  AutoScratchFloatRegister scratchFloat(this);
   7126 
   7127  StubFieldOffset offset(rngOffset, StubField::Type::RawPointer);
   7128  emitLoadStubField(offset, scratch1);
   7129 
   7130  masm.randomDouble(scratch1, scratchFloat, scratch2,
   7131                    output.valueReg().toRegister64());
   7132 
   7133  if (js::SupportDifferentialTesting()) {
   7134    masm.loadConstantDouble(0.0, scratchFloat);
   7135  }
   7136 
   7137  masm.boxDouble(scratchFloat, output.valueReg(), scratchFloat);
   7138  return true;
   7139 }
   7140 
   7141 bool CacheIRCompiler::emitInt32MinMax(bool isMax, Int32OperandId firstId,
   7142                                      Int32OperandId secondId,
   7143                                      Int32OperandId resultId) {
   7144  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7145 
   7146  Register first = allocator.useRegister(masm, firstId);
   7147  Register second = allocator.useRegister(masm, secondId);
   7148  Register result = allocator.defineRegister(masm, resultId);
   7149 
   7150  if (isMax) {
   7151    masm.max32(first, second, result);
   7152  } else {
   7153    masm.min32(first, second, result);
   7154  }
   7155  return true;
   7156 }
   7157 
   7158 bool CacheIRCompiler::emitNumberMinMax(bool isMax, NumberOperandId firstId,
   7159                                       NumberOperandId secondId,
   7160                                       NumberOperandId resultId) {
   7161  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7162 
   7163  ValueOperand output = allocator.defineValueRegister(masm, resultId);
   7164 
   7165  AutoAvailableFloatRegister scratch1(*this, FloatReg0);
   7166  AutoAvailableFloatRegister scratch2(*this, FloatReg1);
   7167 
   7168  allocator.ensureDoubleRegister(masm, firstId, scratch1);
   7169  allocator.ensureDoubleRegister(masm, secondId, scratch2);
   7170 
   7171  if (isMax) {
   7172    masm.maxDouble(scratch2, scratch1, /* handleNaN = */ true);
   7173  } else {
   7174    masm.minDouble(scratch2, scratch1, /* handleNaN = */ true);
   7175  }
   7176 
   7177  masm.boxDouble(scratch1, output, scratch1);
   7178  return true;
   7179 }
   7180 
   7181 bool CacheIRCompiler::emitInt32MinMaxArrayResult(ObjOperandId arrayId,
   7182                                                 bool isMax) {
   7183  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7184 
   7185  AutoOutputRegister output(*this);
   7186  Register array = allocator.useRegister(masm, arrayId);
   7187 
   7188  AutoScratchRegister scratch(allocator, masm);
   7189  AutoScratchRegister scratch2(allocator, masm);
   7190  AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, output);
   7191  AutoScratchRegisterMaybeOutput result(allocator, masm, output);
   7192 
   7193  FailurePath* failure;
   7194  if (!addFailurePath(&failure)) {
   7195    return false;
   7196  }
   7197 
   7198  masm.minMaxArrayInt32(array, result, scratch, scratch2, scratch3, isMax,
   7199                        failure->label());
   7200  masm.tagValue(JSVAL_TYPE_INT32, result, output.valueReg());
   7201  return true;
   7202 }
   7203 
   7204 bool CacheIRCompiler::emitNumberMinMaxArrayResult(ObjOperandId arrayId,
   7205                                                  bool isMax) {
   7206  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7207 
   7208  AutoOutputRegister output(*this);
   7209  Register array = allocator.useRegister(masm, arrayId);
   7210 
   7211  AutoAvailableFloatRegister result(*this, FloatReg0);
   7212  AutoAvailableFloatRegister floatScratch(*this, FloatReg1);
   7213 
   7214  AutoScratchRegister scratch1(allocator, masm);
   7215  AutoScratchRegister scratch2(allocator, masm);
   7216 
   7217  FailurePath* failure;
   7218  if (!addFailurePath(&failure)) {
   7219    return false;
   7220  }
   7221 
   7222  masm.minMaxArrayNumber(array, result, floatScratch, scratch1, scratch2, isMax,
   7223                         failure->label());
   7224  masm.boxDouble(result, output.valueReg(), result);
   7225  return true;
   7226 }
   7227 
   7228 bool CacheIRCompiler::emitMathFunctionNumberResultShared(
   7229    UnaryMathFunction fun, FloatRegister inputScratch, ValueOperand output) {
   7230  UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(fun);
   7231 
   7232  LiveRegisterSet save = liveVolatileRegs();
   7233  save.takeUnchecked(inputScratch);
   7234  masm.PushRegsInMask(save);
   7235 
   7236  masm.setupUnalignedABICall(output.scratchReg());
   7237  masm.passABIArg(inputScratch, ABIType::Float64);
   7238  masm.callWithABI(DynamicFunction<UnaryMathFunctionType>(funPtr),
   7239                   ABIType::Float64);
   7240  masm.storeCallFloatResult(inputScratch);
   7241 
   7242  masm.PopRegsInMask(save);
   7243 
   7244  masm.boxDouble(inputScratch, output, inputScratch);
   7245  return true;
   7246 }
   7247 
   7248 bool CacheIRCompiler::emitMathFunctionNumberResult(NumberOperandId inputId,
   7249                                                   UnaryMathFunction fun) {
   7250  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7251 
   7252  AutoOutputRegister output(*this);
   7253  AutoAvailableFloatRegister scratch(*this, FloatReg0);
   7254 
   7255  allocator.ensureDoubleRegister(masm, inputId, scratch);
   7256 
   7257  return emitMathFunctionNumberResultShared(fun, scratch, output.valueReg());
   7258 }
   7259 
   7260 static void EmitStoreDenseElement(MacroAssembler& masm,
   7261                                  const ConstantOrRegister& value,
   7262                                  BaseObjectElementIndex target) {
   7263  if (value.constant()) {
   7264    Value v = value.value();
   7265    masm.storeValue(v, target);
   7266    return;
   7267  }
   7268 
   7269  TypedOrValueRegister reg = value.reg();
   7270  masm.storeTypedOrValue(reg, target);
   7271 }
   7272 
   7273 bool CacheIRCompiler::emitStoreDenseElement(ObjOperandId objId,
   7274                                            Int32OperandId indexId,
   7275                                            ValOperandId rhsId,
   7276                                            bool expectPackedElements) {
   7277  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7278 
   7279  Register obj = allocator.useRegister(masm, objId);
   7280  Register index = allocator.useRegister(masm, indexId);
   7281  ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
   7282 
   7283  AutoScratchRegister scratch(allocator, masm);
   7284 
   7285  FailurePath* failure;
   7286  if (!addFailurePath(&failure)) {
   7287    return false;
   7288  }
   7289 
   7290  // Load obj->elements in scratch.
   7291  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
   7292 
   7293  // Bounds check. Unfortunately we don't have more registers available on
   7294  // x86, so use InvalidReg and emit slightly slower code on x86.
   7295  Register spectreTemp = InvalidReg;
   7296  Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
   7297  masm.spectreBoundsCheck32(index, initLength, spectreTemp, failure->label());
   7298 
   7299  BaseObjectElementIndex element(scratch, index);
   7300  if (expectPackedElements) {
   7301    Address flags(scratch, ObjectElements::offsetOfFlags());
   7302    masm.branchTest32(Assembler::NonZero, flags,
   7303                      Imm32(ObjectElements::NON_PACKED), failure->label());
   7304  } else {
   7305    // Hole check.
   7306    masm.branchTestMagic(Assembler::Equal, element, failure->label());
   7307  }
   7308 
   7309  // Perform the store.
   7310  EmitPreBarrier(masm, element, MIRType::Value);
   7311  EmitStoreDenseElement(masm, val, element);
   7312 
   7313  emitPostBarrierElement(obj, val, scratch, index);
   7314  return true;
   7315 }
   7316 
   7317 static void EmitAssertExtensibleElements(MacroAssembler& masm,
   7318                                         Register elementsReg) {
   7319 #ifdef DEBUG
   7320  // Preceding shape guards ensure the object elements are extensible.
   7321  Address elementsFlags(elementsReg, ObjectElements::offsetOfFlags());
   7322  Label ok;
   7323  masm.branchTest32(Assembler::Zero, elementsFlags,
   7324                    Imm32(ObjectElements::Flags::NOT_EXTENSIBLE), &ok);
   7325  masm.assumeUnreachable("Unexpected non-extensible elements");
   7326  masm.bind(&ok);
   7327 #endif
   7328 }
   7329 
   7330 static void EmitAssertWritableArrayLengthElements(MacroAssembler& masm,
   7331                                                  Register elementsReg) {
   7332 #ifdef DEBUG
   7333  // Preceding shape guards ensure the array length is writable.
   7334  Address elementsFlags(elementsReg, ObjectElements::offsetOfFlags());
   7335  Label ok;
   7336  masm.branchTest32(Assembler::Zero, elementsFlags,
   7337                    Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
   7338                    &ok);
   7339  masm.assumeUnreachable("Unexpected non-writable array length elements");
   7340  masm.bind(&ok);
   7341 #endif
   7342 }
   7343 
   7344 bool CacheIRCompiler::emitStoreDenseElementHole(ObjOperandId objId,
   7345                                                Int32OperandId indexId,
   7346                                                ValOperandId rhsId,
   7347                                                bool handleAdd) {
   7348  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7349 
   7350  Register obj = allocator.useRegister(masm, objId);
   7351  Register index = allocator.useRegister(masm, indexId);
   7352  ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
   7353 
   7354  AutoScratchRegister scratch(allocator, masm);
   7355 
   7356  FailurePath* failure;
   7357  if (!addFailurePath(&failure)) {
   7358    return false;
   7359  }
   7360 
   7361  // Load obj->elements in scratch.
   7362  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
   7363 
   7364  EmitAssertExtensibleElements(masm, scratch);
   7365  if (handleAdd) {
   7366    EmitAssertWritableArrayLengthElements(masm, scratch);
   7367  }
   7368 
   7369  BaseObjectElementIndex element(scratch, index);
   7370  Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
   7371  Address elementsFlags(scratch, ObjectElements::offsetOfFlags());
   7372 
   7373  // We don't have enough registers on x86 so use InvalidReg. This will emit
   7374  // slightly less efficient code on x86.
   7375  Register spectreTemp = InvalidReg;
   7376 
   7377  Label storeSkipPreBarrier;
   7378  if (handleAdd) {
   7379    // Bounds check.
   7380    Label inBounds, outOfBounds;
   7381    masm.spectreBoundsCheck32(index, initLength, spectreTemp, &outOfBounds);
   7382    masm.jump(&inBounds);
   7383 
   7384    masm.bind(&outOfBounds);
   7385    masm.prepareOOBStoreElement(obj, index, scratch, spectreTemp,
   7386                                failure->label(), liveVolatileRegs());
   7387 
   7388    // Skip EmitPreBarrier as the memory is uninitialized.
   7389    masm.jump(&storeSkipPreBarrier);
   7390 
   7391    masm.bind(&inBounds);
   7392  } else {
   7393    // Fail if index >= initLength.
   7394    masm.spectreBoundsCheck32(index, initLength, spectreTemp, failure->label());
   7395  }
   7396 
   7397  EmitPreBarrier(masm, element, MIRType::Value);
   7398 
   7399  masm.bind(&storeSkipPreBarrier);
   7400  EmitStoreDenseElement(masm, val, element);
   7401 
   7402  emitPostBarrierElement(obj, val, scratch, index);
   7403  return true;
   7404 }
   7405 
   7406 bool CacheIRCompiler::emitArrayPush(ObjOperandId objId, ValOperandId rhsId) {
   7407  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7408 
   7409  AutoOutputRegister output(*this);
   7410  Register obj = allocator.useRegister(masm, objId);
   7411  ValueOperand val = allocator.useValueRegister(masm, rhsId);
   7412 
   7413  AutoScratchRegisterMaybeOutput scratchLength(allocator, masm, output);
   7414  AutoScratchRegisterMaybeOutputType scratch(allocator, masm, output);
   7415 
   7416  FailurePath* failure;
   7417  if (!addFailurePath(&failure)) {
   7418    return false;
   7419  }
   7420 
   7421  // Load obj->elements in scratch.
   7422  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
   7423 
   7424  EmitAssertExtensibleElements(masm, scratch);
   7425  EmitAssertWritableArrayLengthElements(masm, scratch);
   7426 
   7427  Address elementsInitLength(scratch,
   7428                             ObjectElements::offsetOfInitializedLength());
   7429  Address elementsLength(scratch, ObjectElements::offsetOfLength());
   7430  Address capacity(scratch, ObjectElements::offsetOfCapacity());
   7431 
   7432  // Fail if length != initLength.
   7433  masm.load32(elementsInitLength, scratchLength);
   7434  masm.branch32(Assembler::NotEqual, elementsLength, scratchLength,
   7435                failure->label());
   7436 
   7437  // If scratchLength < capacity, we can add a dense element inline. If not we
   7438  // need to allocate more elements.
   7439  Label allocElement, addNewElement;
   7440  masm.spectreBoundsCheck32(scratchLength, capacity, InvalidReg, &allocElement);
   7441  masm.jump(&addNewElement);
   7442 
   7443  masm.bind(&allocElement);
   7444 
   7445  LiveRegisterSet save = liveVolatileRegs();
   7446  save.takeUnchecked(scratch);
   7447  masm.PushRegsInMask(save);
   7448 
   7449  using Fn = bool (*)(JSContext* cx, NativeObject* obj);
   7450  masm.setupUnalignedABICall(scratch);
   7451  masm.loadJSContext(scratch);
   7452  masm.passABIArg(scratch);
   7453  masm.passABIArg(obj);
   7454  masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
   7455  masm.storeCallPointerResult(scratch);
   7456 
   7457  masm.PopRegsInMask(save);
   7458  masm.branchIfFalseBool(scratch, failure->label());
   7459 
   7460  // Load the reallocated elements pointer.
   7461  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
   7462 
   7463  masm.bind(&addNewElement);
   7464 
   7465  // Increment initLength and length.
   7466  masm.add32(Imm32(1), elementsInitLength);
   7467  masm.add32(Imm32(1), elementsLength);
   7468 
   7469  // Store the value.
   7470  BaseObjectElementIndex element(scratch, scratchLength);
   7471  masm.storeValue(val, element);
   7472  emitPostBarrierElement(obj, val, scratch, scratchLength);
   7473 
   7474  // Return value is new length.
   7475  masm.add32(Imm32(1), scratchLength);
   7476  masm.tagValue(JSVAL_TYPE_INT32, scratchLength, output.valueReg());
   7477 
   7478  return true;
   7479 }
   7480 
   7481 bool CacheIRCompiler::emitPackedArraySliceResult(uint32_t templateObjectOffset,
   7482                                                 ObjOperandId arrayId,
   7483                                                 Int32OperandId beginId,
   7484                                                 Int32OperandId endId) {
   7485  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7486 
   7487  AutoCallVM callvm(masm, this, allocator);
   7488 
   7489  Register array = allocator.useRegister(masm, arrayId);
   7490  Register begin = allocator.useRegister(masm, beginId);
   7491  Register end = allocator.useRegister(masm, endId);
   7492 
   7493  callvm.prepare();
   7494 
   7495  // Don't attempt to pre-allocate the object, instead always use the slow path.
   7496  ImmPtr result(nullptr);
   7497 
   7498  masm.Push(result);
   7499  masm.Push(end);
   7500  masm.Push(begin);
   7501  masm.Push(array);
   7502 
   7503  using Fn =
   7504      JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
   7505  callvm.call<Fn, ArraySliceDense>();
   7506  return true;
   7507 }
   7508 
   7509 bool CacheIRCompiler::emitArgumentsSliceResult(uint32_t templateObjectOffset,
   7510                                               ObjOperandId argsId,
   7511                                               Int32OperandId beginId,
   7512                                               Int32OperandId endId) {
   7513  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7514 
   7515  AutoCallVM callvm(masm, this, allocator);
   7516 
   7517  Register args = allocator.useRegister(masm, argsId);
   7518  Register begin = allocator.useRegister(masm, beginId);
   7519  Register end = allocator.useRegister(masm, endId);
   7520 
   7521  callvm.prepare();
   7522 
   7523  // Don't attempt to pre-allocate the object, instead always use the slow path.
   7524  ImmPtr result(nullptr);
   7525 
   7526  masm.Push(result);
   7527  masm.Push(end);
   7528  masm.Push(begin);
   7529  masm.Push(args);
   7530 
   7531  using Fn =
   7532      JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
   7533  callvm.call<Fn, ArgumentsSliceDense>();
   7534  return true;
   7535 }
   7536 
   7537 bool CacheIRCompiler::emitArrayJoinResult(ObjOperandId objId,
   7538                                          StringOperandId sepId) {
   7539  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7540 
   7541  AutoCallVM callvm(masm, this, allocator);
   7542 
   7543  Register obj = allocator.useRegister(masm, objId);
   7544  Register sep = allocator.useRegister(masm, sepId);
   7545  AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
   7546 
   7547  // Discard the stack to ensure it's balanced when we skip the vm-call.
   7548  allocator.discardStack(masm);
   7549 
   7550  // Load obj->elements in scratch.
   7551  masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
   7552  Address lengthAddr(scratch, ObjectElements::offsetOfLength());
   7553 
   7554  // If array length is 0, return empty string.
   7555  Label finished;
   7556 
   7557  {
   7558    Label arrayNotEmpty;
   7559    masm.branch32(Assembler::NotEqual, lengthAddr, Imm32(0), &arrayNotEmpty);
   7560    masm.movePtr(ImmGCPtr(cx_->names().empty_), scratch);
   7561    masm.tagValue(JSVAL_TYPE_STRING, scratch, callvm.outputValueReg());
   7562    masm.jump(&finished);
   7563    masm.bind(&arrayNotEmpty);
   7564  }
   7565 
   7566  Label vmCall;
   7567 
   7568  // Otherwise, handle array length 1 case.
   7569  masm.branch32(Assembler::NotEqual, lengthAddr, Imm32(1), &vmCall);
   7570 
   7571  // But only if initializedLength is also 1.
   7572  Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
   7573  masm.branch32(Assembler::NotEqual, initLength, Imm32(1), &vmCall);
   7574 
   7575  // And only if elem0 is a string.
   7576  Address elementAddr(scratch, 0);
   7577  masm.branchTestString(Assembler::NotEqual, elementAddr, &vmCall);
   7578 
   7579  // Store the value.
   7580  masm.loadValue(elementAddr, callvm.outputValueReg());
   7581  masm.jump(&finished);
   7582 
   7583  // Otherwise call into the VM.
   7584  {
   7585    masm.bind(&vmCall);
   7586 
   7587    callvm.prepare();
   7588 
   7589    masm.Push(sep);
   7590    masm.Push(obj);
   7591 
   7592    using Fn = JSString* (*)(JSContext*, HandleObject, HandleString);
   7593    callvm.call<Fn, jit::ArrayJoin>();
   7594  }
   7595 
   7596  masm.bind(&finished);
   7597  return true;
   7598 }
   7599 
   7600 bool CacheIRCompiler::emitStoreTypedArrayElement(ObjOperandId objId,
   7601                                                 Scalar::Type elementType,
   7602                                                 IntPtrOperandId indexId,
   7603                                                 uint32_t rhsId, bool handleOOB,
   7604                                                 ArrayBufferViewKind viewKind) {
   7605  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7606  Register obj = allocator.useRegister(masm, objId);
   7607  Register index = allocator.useRegister(masm, indexId);
   7608 
   7609  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   7610 
   7611  Maybe<Register> valInt32;
   7612  Maybe<Register> valBigInt;
   7613  switch (elementType) {
   7614    case Scalar::Int8:
   7615    case Scalar::Uint8:
   7616    case Scalar::Int16:
   7617    case Scalar::Uint16:
   7618    case Scalar::Int32:
   7619    case Scalar::Uint32:
   7620    case Scalar::Uint8Clamped:
   7621      valInt32.emplace(allocator.useRegister(masm, Int32OperandId(rhsId)));
   7622      break;
   7623 
   7624    case Scalar::Float16:
   7625    case Scalar::Float32:
   7626    case Scalar::Float64:
   7627      allocator.ensureDoubleRegister(masm, NumberOperandId(rhsId),
   7628                                     floatScratch0);
   7629      break;
   7630 
   7631    case Scalar::BigInt64:
   7632    case Scalar::BigUint64:
   7633      valBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(rhsId)));
   7634      break;
   7635 
   7636    case Scalar::MaxTypedArrayViewType:
   7637    case Scalar::Int64:
   7638    case Scalar::Simd128:
   7639      MOZ_CRASH("Unsupported TypedArray type");
   7640  }
   7641 
   7642  AutoScratchRegister scratch1(allocator, masm);
   7643  Maybe<AutoScratchRegister> scratch2;
   7644  Maybe<AutoSpectreBoundsScratchRegister> spectreScratch;
   7645  if (Scalar::isBigIntType(elementType) || elementType == Scalar::Float16 ||
   7646      viewKind == ArrayBufferViewKind::Resizable) {
   7647    scratch2.emplace(allocator, masm);
   7648  } else {
   7649    spectreScratch.emplace(allocator, masm);
   7650  }
   7651 
   7652  FailurePath* failure = nullptr;
   7653  if (!handleOOB) {
   7654    if (!addFailurePath(&failure)) {
   7655      return false;
   7656    }
   7657  }
   7658 
   7659  // Bounds check.
   7660  Label done;
   7661  emitTypedArrayBoundsCheck(viewKind, obj, index, scratch1, scratch2,
   7662                            spectreScratch,
   7663                            handleOOB ? &done : failure->label());
   7664 
   7665  // Load the elements vector.
   7666  masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch1);
   7667 
   7668  BaseIndex dest(scratch1, index, ScaleFromScalarType(elementType));
   7669 
   7670  if (Scalar::isBigIntType(elementType)) {
   7671 #ifdef JS_PUNBOX64
   7672    Register64 temp(scratch2->get());
   7673 #else
   7674    // We don't have more registers available on x86, so spill |obj|.
   7675    masm.push(obj);
   7676    Register64 temp(scratch2->get(), obj);
   7677 #endif
   7678 
   7679    masm.loadBigInt64(*valBigInt, temp);
   7680    masm.storeToTypedBigIntArray(temp, dest);
   7681 
   7682 #ifndef JS_PUNBOX64
   7683    masm.pop(obj);
   7684 #endif
   7685  } else if (Scalar::isFloatingType(elementType)) {
   7686    Register temp = scratch2 ? scratch2->get() : InvalidReg;
   7687 
   7688    // Canonicalize floating point values for differential testing.
   7689    if (js::SupportDifferentialTesting()) {
   7690      masm.canonicalizeDouble(floatScratch0);
   7691    }
   7692 
   7693    masm.storeToTypedFloatArray(elementType, floatScratch0, dest, temp,
   7694                                liveVolatileRegs());
   7695  } else {
   7696    masm.storeToTypedIntArray(elementType, *valInt32, dest);
   7697  }
   7698 
   7699  masm.bind(&done);
   7700  return true;
   7701 }
   7702 
   7703 void CacheIRCompiler::emitTypedArrayBoundsCheck(ArrayBufferViewKind viewKind,
   7704                                                Register obj, Register index,
   7705                                                Register scratch,
   7706                                                Register maybeScratch,
   7707                                                Register spectreScratch,
   7708                                                Label* fail) {
   7709  // |index| must not alias any scratch register.
   7710  MOZ_ASSERT(index != scratch);
   7711  MOZ_ASSERT(index != maybeScratch);
   7712  MOZ_ASSERT(index != spectreScratch);
   7713 
   7714  // Use |maybeScratch| when no explicit |spectreScratch| is present.
   7715  if (spectreScratch == InvalidReg) {
   7716    spectreScratch = maybeScratch;
   7717  }
   7718 
   7719  if (viewKind == ArrayBufferViewKind::FixedLength ||
   7720      viewKind == ArrayBufferViewKind::Immutable) {
   7721    masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
   7722    masm.spectreBoundsCheckPtr(index, scratch, spectreScratch, fail);
   7723  } else {
   7724    if (maybeScratch == InvalidReg) {
   7725      // Spill |index| to use it as an additional scratch register.
   7726      masm.push(index);
   7727 
   7728      maybeScratch = index;
   7729    }
   7730 
   7731    // Bounds check doesn't require synchronization. See IsValidIntegerIndex
   7732    // abstract operation which reads the underlying buffer byte length using
   7733    // "unordered" memory order.
   7734    auto sync = Synchronization::None();
   7735 
   7736    masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch, maybeScratch);
   7737 
   7738    if (maybeScratch == index) {
   7739      // Restore |index|.
   7740      masm.pop(index);
   7741    }
   7742 
   7743    masm.spectreBoundsCheckPtr(index, scratch, spectreScratch, fail);
   7744  }
   7745 }
   7746 
   7747 void CacheIRCompiler::emitTypedArrayBoundsCheck(
   7748    ArrayBufferViewKind viewKind, Register obj, Register index,
   7749    Register scratch, mozilla::Maybe<Register> maybeScratch,
   7750    mozilla::Maybe<Register> spectreScratch, Label* fail) {
   7751  emitTypedArrayBoundsCheck(viewKind, obj, index, scratch,
   7752                            maybeScratch.valueOr(InvalidReg),
   7753                            spectreScratch.valueOr(InvalidReg), fail);
   7754 }
   7755 
   7756 bool CacheIRCompiler::emitLoadTypedArrayElementResult(
   7757    ObjOperandId objId, IntPtrOperandId indexId, Scalar::Type elementType,
   7758    bool handleOOB, bool forceDoubleForUint32, ArrayBufferViewKind viewKind) {
   7759  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7760  AutoOutputRegister output(*this);
   7761  Register obj = allocator.useRegister(masm, objId);
   7762  Register index = allocator.useRegister(masm, indexId);
   7763 
   7764  AutoScratchRegister scratch1(allocator, masm);
   7765 #ifdef JS_PUNBOX64
   7766  AutoScratchRegister scratch2(allocator, masm);
   7767 #else
   7768  // There are too few registers available on x86, so we may need to reuse the
   7769  // output's scratch register.
   7770  AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
   7771 #endif
   7772 
   7773  FailurePath* failure;
   7774  if (!addFailurePath(&failure)) {
   7775    return false;
   7776  }
   7777 
   7778  // Bounds check.
   7779  Label outOfBounds;
   7780  emitTypedArrayBoundsCheck(viewKind, obj, index, scratch1, scratch2, scratch2,
   7781                            handleOOB ? &outOfBounds : failure->label());
   7782 
   7783  // Allocate BigInt if needed. The code after this should be infallible.
   7784  Maybe<Register> bigInt;
   7785  if (Scalar::isBigIntType(elementType)) {
   7786    bigInt.emplace(output.valueReg().scratchReg());
   7787 
   7788    LiveRegisterSet save = liveVolatileRegs();
   7789    save.takeUnchecked(scratch1);
   7790    save.takeUnchecked(scratch2);
   7791    save.takeUnchecked(output);
   7792 
   7793    gc::Heap initialHeap = InitialBigIntHeap(cx_);
   7794    EmitAllocateBigInt(masm, *bigInt, scratch1, save, initialHeap,
   7795                       failure->label());
   7796  }
   7797 
   7798  // Load the elements vector.
   7799  masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch1);
   7800 
   7801  // Load the value.
   7802  BaseIndex source(scratch1, index, ScaleFromScalarType(elementType));
   7803 
   7804  if (Scalar::isBigIntType(elementType)) {
   7805 #ifdef JS_PUNBOX64
   7806    Register64 temp(scratch2);
   7807 #else
   7808    // We don't have more registers available on x86, so spill |obj| and
   7809    // additionally use the output's type register.
   7810    MOZ_ASSERT(output.valueReg().scratchReg() != output.valueReg().typeReg());
   7811    masm.push(obj);
   7812    Register64 temp(output.valueReg().typeReg(), obj);
   7813 #endif
   7814 
   7815    masm.loadFromTypedBigIntArray(elementType, source, output.valueReg(),
   7816                                  *bigInt, temp);
   7817 
   7818 #ifndef JS_PUNBOX64
   7819    masm.pop(obj);
   7820 #endif
   7821  } else {
   7822    MacroAssembler::Uint32Mode uint32Mode =
   7823        forceDoubleForUint32 ? MacroAssembler::Uint32Mode::ForceDouble
   7824                             : MacroAssembler::Uint32Mode::FailOnDouble;
   7825    masm.loadFromTypedArray(elementType, source, output.valueReg(), uint32Mode,
   7826                            scratch1, failure->label(), liveVolatileRegs());
   7827  }
   7828 
   7829  if (handleOOB) {
   7830    Label done;
   7831    masm.jump(&done);
   7832 
   7833    masm.bind(&outOfBounds);
   7834    masm.moveValue(UndefinedValue(), output.valueReg());
   7835 
   7836    masm.bind(&done);
   7837  }
   7838 
   7839  return true;
   7840 }
   7841 
   7842 void CacheIRCompiler::emitDataViewBoundsCheck(ArrayBufferViewKind viewKind,
   7843                                              size_t byteSize, Register obj,
   7844                                              Register offset, Register scratch,
   7845                                              Register maybeScratch,
   7846                                              Label* fail) {
   7847  // |offset| must not alias any scratch register.
   7848  MOZ_ASSERT(offset != scratch);
   7849  MOZ_ASSERT(offset != maybeScratch);
   7850 
   7851  if (viewKind == ArrayBufferViewKind::FixedLength ||
   7852      viewKind == ArrayBufferViewKind::Immutable) {
   7853    masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
   7854  } else {
   7855    if (maybeScratch == InvalidReg) {
   7856      // Spill |offset| to use it as an additional scratch register.
   7857      masm.push(offset);
   7858 
   7859      maybeScratch = offset;
   7860    }
   7861 
   7862    // Bounds check doesn't require synchronization. See GetViewValue and
   7863    // SetViewValue abstract operations which read the underlying buffer byte
   7864    // length using "unordered" memory order.
   7865    auto sync = Synchronization::None();
   7866 
   7867    masm.loadResizableDataViewByteLengthIntPtr(sync, obj, scratch,
   7868                                               maybeScratch);
   7869 
   7870    if (maybeScratch == offset) {
   7871      // Restore |offset|.
   7872      masm.pop(offset);
   7873    }
   7874  }
   7875 
   7876  // Ensure both offset < length and offset + (byteSize - 1) < length.
   7877  if (byteSize == 1) {
   7878    masm.spectreBoundsCheckPtr(offset, scratch, InvalidReg, fail);
   7879  } else {
   7880    // temp := length - (byteSize - 1)
   7881    // if temp < 0: fail
   7882    // if offset >= temp: fail
   7883    masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), scratch, fail);
   7884    masm.spectreBoundsCheckPtr(offset, scratch, InvalidReg, fail);
   7885  }
   7886 }
   7887 
   7888 bool CacheIRCompiler::emitLoadDataViewValueResult(
   7889    ObjOperandId objId, IntPtrOperandId offsetId,
   7890    BooleanOperandId littleEndianId, Scalar::Type elementType,
   7891    bool forceDoubleForUint32, ArrayBufferViewKind viewKind) {
   7892  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   7893 
   7894  AutoOutputRegister output(*this);
   7895  Register obj = allocator.useRegister(masm, objId);
   7896  Register offset = allocator.useRegister(masm, offsetId);
   7897  Register littleEndian = allocator.useRegister(masm, littleEndianId);
   7898 
   7899  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   7900 
   7901  Register64 outputReg64 = output.valueReg().toRegister64();
   7902  Register outputScratch = outputReg64.scratchReg();
   7903 
   7904  Register scratch2;
   7905 #ifndef JS_CODEGEN_X86
   7906  Maybe<AutoScratchRegister> maybeScratch2;
   7907  if (viewKind == ArrayBufferViewKind::Resizable ||
   7908      (elementType == Scalar::Float16 &&
   7909       !MacroAssembler::SupportsFloat32To16())) {
   7910    maybeScratch2.emplace(allocator, masm);
   7911    scratch2 = *maybeScratch2;
   7912  }
   7913 #else
   7914  // Not enough registers on x86, so use the other part of outputReg64.
   7915  scratch2 = outputReg64.secondScratchReg();
   7916 #endif
   7917 
   7918  FailurePath* failure;
   7919  if (!addFailurePath(&failure)) {
   7920    return false;
   7921  }
   7922 
   7923  const size_t byteSize = Scalar::byteSize(elementType);
   7924 
   7925  emitDataViewBoundsCheck(viewKind, byteSize, obj, offset, outputScratch,
   7926                          scratch2, failure->label());
   7927 
   7928  masm.loadPtr(Address(obj, DataViewObject::dataOffset()), outputScratch);
   7929 
   7930  // Load the value.
   7931  BaseIndex source(outputScratch, offset, TimesOne);
   7932  switch (elementType) {
   7933    case Scalar::Int8:
   7934      masm.load8SignExtend(source, outputScratch);
   7935      break;
   7936    case Scalar::Uint8:
   7937      masm.load8ZeroExtend(source, outputScratch);
   7938      break;
   7939    case Scalar::Int16:
   7940      masm.load16UnalignedSignExtend(source, outputScratch);
   7941      break;
   7942    case Scalar::Uint16:
   7943    case Scalar::Float16:
   7944      masm.load16UnalignedZeroExtend(source, outputScratch);
   7945      break;
   7946    case Scalar::Int32:
   7947    case Scalar::Uint32:
   7948    case Scalar::Float32:
   7949      masm.load32Unaligned(source, outputScratch);
   7950      break;
   7951    case Scalar::Float64:
   7952    case Scalar::BigInt64:
   7953    case Scalar::BigUint64:
   7954      masm.load64Unaligned(source, outputReg64);
   7955      break;
   7956    case Scalar::Uint8Clamped:
   7957    default:
   7958      MOZ_CRASH("Invalid typed array type");
   7959  }
   7960 
   7961  // Swap the bytes in the loaded value.
   7962  if (byteSize > 1) {
   7963    Label skip;
   7964    masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
   7965                  littleEndian, Imm32(0), &skip);
   7966 
   7967    switch (elementType) {
   7968      case Scalar::Int16:
   7969        masm.byteSwap16SignExtend(outputScratch);
   7970        break;
   7971      case Scalar::Uint16:
   7972      case Scalar::Float16:
   7973        masm.byteSwap16ZeroExtend(outputScratch);
   7974        break;
   7975      case Scalar::Int32:
   7976      case Scalar::Uint32:
   7977      case Scalar::Float32:
   7978        masm.byteSwap32(outputScratch);
   7979        break;
   7980      case Scalar::Float64:
   7981      case Scalar::BigInt64:
   7982      case Scalar::BigUint64:
   7983        masm.byteSwap64(outputReg64);
   7984        break;
   7985      case Scalar::Int8:
   7986      case Scalar::Uint8:
   7987      case Scalar::Uint8Clamped:
   7988      default:
   7989        MOZ_CRASH("Invalid type");
   7990    }
   7991 
   7992    masm.bind(&skip);
   7993  }
   7994 
   7995  // Move the value into the output register.
   7996  switch (elementType) {
   7997    case Scalar::Int8:
   7998    case Scalar::Uint8:
   7999    case Scalar::Int16:
   8000    case Scalar::Uint16:
   8001    case Scalar::Int32:
   8002      masm.tagValue(JSVAL_TYPE_INT32, outputScratch, output.valueReg());
   8003      break;
   8004    case Scalar::Uint32: {
   8005      MacroAssembler::Uint32Mode uint32Mode =
   8006          forceDoubleForUint32 ? MacroAssembler::Uint32Mode::ForceDouble
   8007                               : MacroAssembler::Uint32Mode::FailOnDouble;
   8008      masm.boxUint32(outputScratch, output.valueReg(), uint32Mode,
   8009                     failure->label());
   8010      break;
   8011    }
   8012    case Scalar::Float16: {
   8013      FloatRegister scratchFloat32 = floatScratch0.get().asSingle();
   8014      masm.moveGPRToFloat16(outputScratch, scratchFloat32, scratch2,
   8015                            liveVolatileRegs());
   8016      masm.canonicalizeFloat(scratchFloat32);
   8017      masm.convertFloat32ToDouble(scratchFloat32, floatScratch0);
   8018      masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
   8019      break;
   8020    }
   8021    case Scalar::Float32: {
   8022      FloatRegister scratchFloat32 = floatScratch0.get().asSingle();
   8023      masm.moveGPRToFloat32(outputScratch, scratchFloat32);
   8024      masm.canonicalizeFloat(scratchFloat32);
   8025      masm.convertFloat32ToDouble(scratchFloat32, floatScratch0);
   8026      masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
   8027      break;
   8028    }
   8029    case Scalar::Float64:
   8030      masm.moveGPR64ToDouble(outputReg64, floatScratch0);
   8031      masm.canonicalizeDouble(floatScratch0);
   8032      masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
   8033      break;
   8034    case Scalar::BigInt64:
   8035    case Scalar::BigUint64: {
   8036      // We need two extra registers. Reuse the obj/littleEndian registers.
   8037      Register bigInt = obj;
   8038      Register bigIntScratch = littleEndian;
   8039      masm.push(bigInt);
   8040      masm.push(bigIntScratch);
   8041      Label fail, done;
   8042      LiveRegisterSet save = liveVolatileRegs();
   8043      save.takeUnchecked(bigInt);
   8044      save.takeUnchecked(bigIntScratch);
   8045      gc::Heap initialHeap = InitialBigIntHeap(cx_);
   8046      EmitAllocateBigInt(masm, bigInt, bigIntScratch, save, initialHeap, &fail);
   8047      masm.jump(&done);
   8048 
   8049      masm.bind(&fail);
   8050      masm.pop(bigIntScratch);
   8051      masm.pop(bigInt);
   8052      masm.jump(failure->label());
   8053 
   8054      masm.bind(&done);
   8055      masm.initializeBigInt64(elementType, bigInt, outputReg64);
   8056      masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
   8057      masm.pop(bigIntScratch);
   8058      masm.pop(bigInt);
   8059      break;
   8060    }
   8061    case Scalar::Uint8Clamped:
   8062    default:
   8063      MOZ_CRASH("Invalid typed array type");
   8064  }
   8065 
   8066  return true;
   8067 }
   8068 
   8069 bool CacheIRCompiler::emitStoreDataViewValueResult(
   8070    ObjOperandId objId, IntPtrOperandId offsetId, uint32_t valueId,
   8071    BooleanOperandId littleEndianId, Scalar::Type elementType,
   8072    ArrayBufferViewKind viewKind) {
   8073  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8074 
   8075  AutoOutputRegister output(*this);
   8076 #ifdef JS_CODEGEN_X86
   8077  // Use a scratch register to avoid running out of the registers.
   8078  Register obj = output.valueReg().typeReg();
   8079  allocator.copyToScratchRegister(masm, objId, obj);
   8080 #else
   8081  Register obj = allocator.useRegister(masm, objId);
   8082 #endif
   8083  Register offset = allocator.useRegister(masm, offsetId);
   8084  Register littleEndian = allocator.useRegister(masm, littleEndianId);
   8085 
   8086  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   8087  Maybe<Register> valInt32;
   8088  Maybe<Register> valBigInt;
   8089  switch (elementType) {
   8090    case Scalar::Int8:
   8091    case Scalar::Uint8:
   8092    case Scalar::Int16:
   8093    case Scalar::Uint16:
   8094    case Scalar::Int32:
   8095    case Scalar::Uint32:
   8096    case Scalar::Uint8Clamped:
   8097      valInt32.emplace(allocator.useRegister(masm, Int32OperandId(valueId)));
   8098      break;
   8099 
   8100    case Scalar::Float16:
   8101    case Scalar::Float32:
   8102    case Scalar::Float64:
   8103      allocator.ensureDoubleRegister(masm, NumberOperandId(valueId),
   8104                                     floatScratch0);
   8105      break;
   8106 
   8107    case Scalar::BigInt64:
   8108    case Scalar::BigUint64:
   8109      valBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(valueId)));
   8110      break;
   8111 
   8112    case Scalar::MaxTypedArrayViewType:
   8113    case Scalar::Int64:
   8114    case Scalar::Simd128:
   8115      MOZ_CRASH("Unsupported type");
   8116  }
   8117 
   8118  Register scratch1 = output.valueReg().scratchReg();
   8119  MOZ_ASSERT(scratch1 != obj, "scratchReg must not be typeReg");
   8120 
   8121  // On platforms with enough registers, |scratch2| is an extra scratch register
   8122  // (pair) used for byte-swapping the value.
   8123 #ifndef JS_CODEGEN_X86
   8124  mozilla::MaybeOneOf<AutoScratchRegister, AutoScratchRegister64> scratch2;
   8125  switch (elementType) {
   8126    case Scalar::Int8:
   8127    case Scalar::Uint8:
   8128      break;
   8129    case Scalar::Int16:
   8130    case Scalar::Uint16:
   8131    case Scalar::Int32:
   8132    case Scalar::Uint32:
   8133    case Scalar::Float16:
   8134    case Scalar::Float32:
   8135      scratch2.construct<AutoScratchRegister>(allocator, masm);
   8136      break;
   8137    case Scalar::Float64:
   8138    case Scalar::BigInt64:
   8139    case Scalar::BigUint64:
   8140      scratch2.construct<AutoScratchRegister64>(allocator, masm);
   8141      break;
   8142    case Scalar::Uint8Clamped:
   8143    default:
   8144      MOZ_CRASH("Invalid type");
   8145  }
   8146 #endif
   8147 
   8148  Register boundsCheckScratch;
   8149 #ifndef JS_CODEGEN_X86
   8150  Maybe<AutoScratchRegister> maybeBoundsCheckScratch;
   8151  if (viewKind == ArrayBufferViewKind::Resizable) {
   8152    if (scratch2.constructed<AutoScratchRegister>()) {
   8153      boundsCheckScratch = scratch2.ref<AutoScratchRegister>().get();
   8154    } else if (scratch2.constructed<AutoScratchRegister64>()) {
   8155      boundsCheckScratch =
   8156          scratch2.ref<AutoScratchRegister64>().get().scratchReg();
   8157    } else {
   8158      maybeBoundsCheckScratch.emplace(allocator, masm);
   8159      boundsCheckScratch = *maybeBoundsCheckScratch;
   8160    }
   8161  }
   8162 #else
   8163  // Not enough registers on x86.
   8164 #endif
   8165 
   8166  FailurePath* failure;
   8167  if (!addFailurePath(&failure)) {
   8168    return false;
   8169  }
   8170 
   8171  const size_t byteSize = Scalar::byteSize(elementType);
   8172 
   8173  emitDataViewBoundsCheck(viewKind, byteSize, obj, offset, scratch1,
   8174                          boundsCheckScratch, failure->label());
   8175 
   8176  masm.loadPtr(Address(obj, DataViewObject::dataOffset()), scratch1);
   8177  BaseIndex dest(scratch1, offset, TimesOne);
   8178 
   8179  if (byteSize == 1) {
   8180    // Byte swapping has no effect, so just do the byte store.
   8181    masm.store8(*valInt32, dest);
   8182    masm.moveValue(UndefinedValue(), output.valueReg());
   8183    return true;
   8184  }
   8185 
   8186  // On 32-bit x86, |obj| is already a scratch register so use that. If we need
   8187  // a Register64 we also use the littleEndian register and use the stack
   8188  // location for the check below.
   8189  bool pushedLittleEndian = false;
   8190 #ifdef JS_CODEGEN_X86
   8191  if (byteSize == 8) {
   8192    masm.push(littleEndian);
   8193    pushedLittleEndian = true;
   8194  }
   8195  auto valScratch32 = [&]() -> Register { return obj; };
   8196  auto valScratch64 = [&]() -> Register64 {
   8197    return Register64(obj, littleEndian);
   8198  };
   8199 #else
   8200  auto valScratch32 = [&]() -> Register {
   8201    return scratch2.ref<AutoScratchRegister>();
   8202  };
   8203  auto valScratch64 = [&]() -> Register64 {
   8204    return scratch2.ref<AutoScratchRegister64>();
   8205  };
   8206 #endif
   8207 
   8208  // Canonicalize floating point values for differential testing.
   8209  if (Scalar::isFloatingType(elementType) && js::SupportDifferentialTesting()) {
   8210    masm.canonicalizeDouble(floatScratch0);
   8211  }
   8212 
   8213  // Load the value into a gpr register.
   8214  switch (elementType) {
   8215    case Scalar::Int16:
   8216    case Scalar::Uint16:
   8217    case Scalar::Int32:
   8218    case Scalar::Uint32:
   8219      masm.move32(*valInt32, valScratch32());
   8220      break;
   8221    case Scalar::Float16: {
   8222      FloatRegister scratchFloat32 = floatScratch0.get().asSingle();
   8223      masm.convertDoubleToFloat16(floatScratch0, scratchFloat32, valScratch32(),
   8224                                  liveVolatileRegs());
   8225      masm.moveFloat16ToGPR(scratchFloat32, valScratch32(), liveVolatileRegs());
   8226      break;
   8227    }
   8228    case Scalar::Float32: {
   8229      FloatRegister scratchFloat32 = floatScratch0.get().asSingle();
   8230      masm.convertDoubleToFloat32(floatScratch0, scratchFloat32);
   8231      masm.moveFloat32ToGPR(scratchFloat32, valScratch32());
   8232      break;
   8233    }
   8234    case Scalar::Float64: {
   8235      masm.moveDoubleToGPR64(floatScratch0, valScratch64());
   8236      break;
   8237    }
   8238    case Scalar::BigInt64:
   8239    case Scalar::BigUint64:
   8240      masm.loadBigInt64(*valBigInt, valScratch64());
   8241      break;
   8242    case Scalar::Int8:
   8243    case Scalar::Uint8:
   8244    case Scalar::Uint8Clamped:
   8245    default:
   8246      MOZ_CRASH("Invalid type");
   8247  }
   8248 
   8249  // Swap the bytes in the loaded value.
   8250  Label skip;
   8251  if (pushedLittleEndian) {
   8252    masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
   8253                  Address(masm.getStackPointer(), 0), Imm32(0), &skip);
   8254  } else {
   8255    masm.branch32(MOZ_LITTLE_ENDIAN() ? Assembler::NotEqual : Assembler::Equal,
   8256                  littleEndian, Imm32(0), &skip);
   8257  }
   8258  switch (elementType) {
   8259    case Scalar::Int16:
   8260      masm.byteSwap16SignExtend(valScratch32());
   8261      break;
   8262    case Scalar::Uint16:
   8263    case Scalar::Float16:
   8264      masm.byteSwap16ZeroExtend(valScratch32());
   8265      break;
   8266    case Scalar::Int32:
   8267    case Scalar::Uint32:
   8268    case Scalar::Float32:
   8269      masm.byteSwap32(valScratch32());
   8270      break;
   8271    case Scalar::Float64:
   8272    case Scalar::BigInt64:
   8273    case Scalar::BigUint64:
   8274      masm.byteSwap64(valScratch64());
   8275      break;
   8276    case Scalar::Int8:
   8277    case Scalar::Uint8:
   8278    case Scalar::Uint8Clamped:
   8279    default:
   8280      MOZ_CRASH("Invalid type");
   8281  }
   8282  masm.bind(&skip);
   8283 
   8284  // Store the value.
   8285  switch (elementType) {
   8286    case Scalar::Int16:
   8287    case Scalar::Uint16:
   8288    case Scalar::Float16:
   8289      masm.store16Unaligned(valScratch32(), dest);
   8290      break;
   8291    case Scalar::Int32:
   8292    case Scalar::Uint32:
   8293    case Scalar::Float32:
   8294      masm.store32Unaligned(valScratch32(), dest);
   8295      break;
   8296    case Scalar::Float64:
   8297    case Scalar::BigInt64:
   8298    case Scalar::BigUint64:
   8299      masm.store64Unaligned(valScratch64(), dest);
   8300      break;
   8301    case Scalar::Int8:
   8302    case Scalar::Uint8:
   8303    case Scalar::Uint8Clamped:
   8304    default:
   8305      MOZ_CRASH("Invalid typed array type");
   8306  }
   8307 
   8308 #ifdef JS_CODEGEN_X86
   8309  // Restore registers.
   8310  if (pushedLittleEndian) {
   8311    masm.pop(littleEndian);
   8312  }
   8313 #endif
   8314 
   8315  masm.moveValue(UndefinedValue(), output.valueReg());
   8316  return true;
   8317 }
   8318 
   8319 bool CacheIRCompiler::emitStoreFixedSlotUndefinedResult(ObjOperandId objId,
   8320                                                        uint32_t offsetOffset,
   8321                                                        ValOperandId rhsId) {
   8322  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8323 
   8324  AutoOutputRegister output(*this);
   8325  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   8326  Register obj = allocator.useRegister(masm, objId);
   8327  ValueOperand val = allocator.useValueRegister(masm, rhsId);
   8328 
   8329  StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
   8330  emitLoadStubField(offset, scratch);
   8331 
   8332  BaseIndex slot(obj, scratch, TimesOne);
   8333  EmitPreBarrier(masm, slot, MIRType::Value);
   8334  masm.storeValue(val, slot);
   8335  emitPostBarrierSlot(obj, val, scratch);
   8336 
   8337  masm.moveValue(UndefinedValue(), output.valueReg());
   8338  return true;
   8339 }
   8340 
   8341 bool CacheIRCompiler::emitLoadObjectResult(ObjOperandId objId) {
   8342  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8343  AutoOutputRegister output(*this);
   8344  Register obj = allocator.useRegister(masm, objId);
   8345 
   8346  EmitStoreResult(masm, obj, JSVAL_TYPE_OBJECT, output);
   8347 
   8348  return true;
   8349 }
   8350 
   8351 bool CacheIRCompiler::emitLoadStringResult(StringOperandId strId) {
   8352  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8353  AutoOutputRegister output(*this);
   8354  Register str = allocator.useRegister(masm, strId);
   8355 
   8356  masm.tagValue(JSVAL_TYPE_STRING, str, output.valueReg());
   8357 
   8358  return true;
   8359 }
   8360 
   8361 bool CacheIRCompiler::emitLoadSymbolResult(SymbolOperandId symId) {
   8362  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8363  AutoOutputRegister output(*this);
   8364  Register sym = allocator.useRegister(masm, symId);
   8365 
   8366  masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
   8367 
   8368  return true;
   8369 }
   8370 
   8371 bool CacheIRCompiler::emitLoadInt32Result(Int32OperandId valId) {
   8372  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8373  AutoOutputRegister output(*this);
   8374  Register val = allocator.useRegister(masm, valId);
   8375 
   8376  masm.tagValue(JSVAL_TYPE_INT32, val, output.valueReg());
   8377 
   8378  return true;
   8379 }
   8380 
   8381 bool CacheIRCompiler::emitLoadBigIntResult(BigIntOperandId valId) {
   8382  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8383  AutoOutputRegister output(*this);
   8384  Register val = allocator.useRegister(masm, valId);
   8385 
   8386  masm.tagValue(JSVAL_TYPE_BIGINT, val, output.valueReg());
   8387 
   8388  return true;
   8389 }
   8390 
   8391 bool CacheIRCompiler::emitLoadDoubleResult(NumberOperandId valId) {
   8392  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8393  AutoOutputRegister output(*this);
   8394  ValueOperand val = allocator.useValueRegister(masm, valId);
   8395 
   8396 #ifdef DEBUG
   8397  Label ok;
   8398  masm.branchTestDouble(Assembler::Equal, val, &ok);
   8399  masm.branchTestInt32(Assembler::Equal, val, &ok);
   8400  masm.assumeUnreachable("input must be double or int32");
   8401  masm.bind(&ok);
   8402 #endif
   8403 
   8404  masm.moveValue(val, output.valueReg());
   8405  masm.convertInt32ValueToDouble(output.valueReg());
   8406 
   8407  return true;
   8408 }
   8409 
   8410 bool CacheIRCompiler::emitLoadTypeOfObjectResult(ObjOperandId objId) {
   8411  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8412  AutoOutputRegister output(*this);
   8413  Register obj = allocator.useRegister(masm, objId);
   8414  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   8415 
   8416  Label slowCheck, isObject, isCallable, isUndefined, done;
   8417  masm.typeOfObject(obj, scratch, &slowCheck, &isObject, &isCallable,
   8418                    &isUndefined);
   8419 
   8420  masm.bind(&isCallable);
   8421  masm.moveValue(StringValue(cx_->names().function), output.valueReg());
   8422  masm.jump(&done);
   8423 
   8424  masm.bind(&isUndefined);
   8425  masm.moveValue(StringValue(cx_->names().undefined), output.valueReg());
   8426  masm.jump(&done);
   8427 
   8428  masm.bind(&isObject);
   8429  masm.moveValue(StringValue(cx_->names().object), output.valueReg());
   8430  masm.jump(&done);
   8431 
   8432  {
   8433    masm.bind(&slowCheck);
   8434    LiveRegisterSet save = liveVolatileRegs();
   8435    masm.PushRegsInMask(save);
   8436 
   8437    using Fn = JSString* (*)(JSObject * obj, JSRuntime * rt);
   8438    masm.setupUnalignedABICall(scratch);
   8439    masm.passABIArg(obj);
   8440    masm.movePtr(ImmPtr(cx_->runtime()), scratch);
   8441    masm.passABIArg(scratch);
   8442    masm.callWithABI<Fn, TypeOfNameObject>();
   8443    masm.storeCallPointerResult(scratch);
   8444 
   8445    LiveRegisterSet ignore;
   8446    ignore.add(scratch);
   8447    masm.PopRegsInMaskIgnore(save, ignore);
   8448 
   8449    masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
   8450  }
   8451 
   8452  masm.bind(&done);
   8453  return true;
   8454 }
   8455 
   8456 bool CacheIRCompiler::emitLoadTypeOfEqObjectResult(ObjOperandId objId,
   8457                                                   TypeofEqOperand operand) {
   8458  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8459  AutoOutputRegister output(*this);
   8460  Register obj = allocator.useRegister(masm, objId);
   8461  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   8462  JSType type = operand.type();
   8463  JSOp compareOp = operand.compareOp();
   8464  bool result;
   8465 
   8466  Label slowCheck, isObject, isCallable, isUndefined, done;
   8467  masm.typeOfObject(obj, scratch, &slowCheck, &isObject, &isCallable,
   8468                    &isUndefined);
   8469 
   8470  masm.bind(&isCallable);
   8471  result = type == JSTYPE_FUNCTION;
   8472  if (compareOp == JSOp::Ne) {
   8473    result = !result;
   8474  }
   8475  masm.moveValue(BooleanValue(result), output.valueReg());
   8476  masm.jump(&done);
   8477 
   8478  masm.bind(&isUndefined);
   8479  result = type == JSTYPE_UNDEFINED;
   8480  if (compareOp == JSOp::Ne) {
   8481    result = !result;
   8482  }
   8483  masm.moveValue(BooleanValue(result), output.valueReg());
   8484  masm.jump(&done);
   8485 
   8486  masm.bind(&isObject);
   8487  result = type == JSTYPE_OBJECT;
   8488  if (compareOp == JSOp::Ne) {
   8489    result = !result;
   8490  }
   8491  masm.moveValue(BooleanValue(result), output.valueReg());
   8492  masm.jump(&done);
   8493 
   8494  {
   8495    masm.bind(&slowCheck);
   8496    LiveRegisterSet save = liveVolatileRegs();
   8497    save.takeUnchecked(output.valueReg());
   8498    save.takeUnchecked(scratch);
   8499    masm.PushRegsInMask(save);
   8500 
   8501    using Fn = bool (*)(JSObject* obj, TypeofEqOperand operand);
   8502    masm.setupUnalignedABICall(scratch);
   8503    masm.passABIArg(obj);
   8504    masm.move32(Imm32(TypeofEqOperand(type, compareOp).rawValue()), scratch);
   8505    masm.passABIArg(scratch);
   8506    masm.callWithABI<Fn, TypeOfEqObject>();
   8507    masm.storeCallBoolResult(scratch);
   8508 
   8509    masm.PopRegsInMask(save);
   8510 
   8511    masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
   8512  }
   8513 
   8514  masm.bind(&done);
   8515  return true;
   8516 }
   8517 
   8518 bool CacheIRCompiler::emitLoadInt32TruthyResult(ValOperandId inputId) {
   8519  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8520  AutoOutputRegister output(*this);
   8521  ValueOperand val = allocator.useValueRegister(masm, inputId);
   8522 
   8523  Label ifFalse, done;
   8524  masm.branchTestInt32Truthy(false, val, &ifFalse);
   8525  masm.moveValue(BooleanValue(true), output.valueReg());
   8526  masm.jump(&done);
   8527 
   8528  masm.bind(&ifFalse);
   8529  masm.moveValue(BooleanValue(false), output.valueReg());
   8530 
   8531  masm.bind(&done);
   8532  return true;
   8533 }
   8534 
   8535 bool CacheIRCompiler::emitLoadStringTruthyResult(StringOperandId strId) {
   8536  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8537  AutoOutputRegister output(*this);
   8538  Register str = allocator.useRegister(masm, strId);
   8539 
   8540  Label ifFalse, done;
   8541  masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
   8542                Imm32(0), &ifFalse);
   8543  masm.moveValue(BooleanValue(true), output.valueReg());
   8544  masm.jump(&done);
   8545 
   8546  masm.bind(&ifFalse);
   8547  masm.moveValue(BooleanValue(false), output.valueReg());
   8548 
   8549  masm.bind(&done);
   8550  return true;
   8551 }
   8552 
   8553 bool CacheIRCompiler::emitLoadDoubleTruthyResult(NumberOperandId inputId) {
   8554  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8555  AutoOutputRegister output(*this);
   8556 
   8557  AutoScratchFloatRegister floatReg(this);
   8558 
   8559  allocator.ensureDoubleRegister(masm, inputId, floatReg);
   8560 
   8561  Label ifFalse, done;
   8562 
   8563  masm.branchTestDoubleTruthy(false, floatReg, &ifFalse);
   8564  masm.moveValue(BooleanValue(true), output.valueReg());
   8565  masm.jump(&done);
   8566 
   8567  masm.bind(&ifFalse);
   8568  masm.moveValue(BooleanValue(false), output.valueReg());
   8569 
   8570  masm.bind(&done);
   8571  return true;
   8572 }
   8573 
   8574 bool CacheIRCompiler::emitLoadObjectTruthyResult(ObjOperandId objId) {
   8575  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8576  AutoOutputRegister output(*this);
   8577  Register obj = allocator.useRegister(masm, objId);
   8578  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   8579 
   8580  Label emulatesUndefined, slowPath, done;
   8581  masm.branchIfObjectEmulatesUndefined(obj, scratch, &slowPath,
   8582                                       &emulatesUndefined);
   8583  masm.moveValue(BooleanValue(true), output.valueReg());
   8584  masm.jump(&done);
   8585 
   8586  masm.bind(&emulatesUndefined);
   8587  masm.moveValue(BooleanValue(false), output.valueReg());
   8588  masm.jump(&done);
   8589 
   8590  masm.bind(&slowPath);
   8591  {
   8592    LiveRegisterSet volatileRegs = liveVolatileRegs();
   8593    volatileRegs.takeUnchecked(scratch);
   8594    volatileRegs.takeUnchecked(output);
   8595    masm.PushRegsInMask(volatileRegs);
   8596 
   8597    using Fn = bool (*)(JSObject* obj);
   8598    masm.setupUnalignedABICall(scratch);
   8599    masm.passABIArg(obj);
   8600    masm.callWithABI<Fn, js::EmulatesUndefined>();
   8601    masm.storeCallBoolResult(scratch);
   8602    masm.xor32(Imm32(1), scratch);
   8603 
   8604    masm.PopRegsInMask(volatileRegs);
   8605 
   8606    masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
   8607  }
   8608 
   8609  masm.bind(&done);
   8610  return true;
   8611 }
   8612 
   8613 bool CacheIRCompiler::emitLoadBigIntTruthyResult(BigIntOperandId bigIntId) {
   8614  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8615  AutoOutputRegister output(*this);
   8616  Register bigInt = allocator.useRegister(masm, bigIntId);
   8617 
   8618  Label ifFalse, done;
   8619  masm.branch32(Assembler::Equal,
   8620                Address(bigInt, BigInt::offsetOfDigitLength()), Imm32(0),
   8621                &ifFalse);
   8622  masm.moveValue(BooleanValue(true), output.valueReg());
   8623  masm.jump(&done);
   8624 
   8625  masm.bind(&ifFalse);
   8626  masm.moveValue(BooleanValue(false), output.valueReg());
   8627 
   8628  masm.bind(&done);
   8629  return true;
   8630 }
   8631 
   8632 bool CacheIRCompiler::emitLoadValueTruthyResult(ValOperandId inputId) {
   8633  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8634 
   8635  AutoOutputRegister output(*this);
   8636  ValueOperand value = allocator.useValueRegister(masm, inputId);
   8637  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   8638  AutoScratchRegister scratch2(allocator, masm);
   8639  AutoScratchFloatRegister floatReg(this);
   8640 
   8641  Label ifFalse, ifTrue, done;
   8642 
   8643  {
   8644    ScratchTagScope tag(masm, value);
   8645    masm.splitTagForTest(value, tag);
   8646 
   8647    masm.branchTestUndefined(Assembler::Equal, tag, &ifFalse);
   8648    masm.branchTestNull(Assembler::Equal, tag, &ifFalse);
   8649 
   8650    Label notBoolean;
   8651    masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
   8652    {
   8653      ScratchTagScopeRelease _(&tag);
   8654      masm.branchTestBooleanTruthy(false, value, &ifFalse);
   8655      masm.jump(&ifTrue);
   8656    }
   8657    masm.bind(&notBoolean);
   8658 
   8659    Label notInt32;
   8660    masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
   8661    {
   8662      ScratchTagScopeRelease _(&tag);
   8663      masm.branchTestInt32Truthy(false, value, &ifFalse);
   8664      masm.jump(&ifTrue);
   8665    }
   8666    masm.bind(&notInt32);
   8667 
   8668    Label notObject;
   8669    masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
   8670    {
   8671      ScratchTagScopeRelease _(&tag);
   8672 
   8673      Register obj = masm.extractObject(value, scratch1);
   8674 
   8675      Label slowPath;
   8676      masm.branchIfObjectEmulatesUndefined(obj, scratch2, &slowPath, &ifFalse);
   8677      masm.jump(&ifTrue);
   8678 
   8679      masm.bind(&slowPath);
   8680      {
   8681        LiveRegisterSet volatileRegs = liveVolatileRegs();
   8682        volatileRegs.takeUnchecked(scratch1);
   8683        volatileRegs.takeUnchecked(scratch2);
   8684        volatileRegs.takeUnchecked(output);
   8685        masm.PushRegsInMask(volatileRegs);
   8686 
   8687        using Fn = bool (*)(JSObject* obj);
   8688        masm.setupUnalignedABICall(scratch2);
   8689        masm.passABIArg(obj);
   8690        masm.callWithABI<Fn, js::EmulatesUndefined>();
   8691        masm.storeCallPointerResult(scratch2);
   8692 
   8693        masm.PopRegsInMask(volatileRegs);
   8694 
   8695        masm.branchIfTrueBool(scratch2, &ifFalse);
   8696        masm.jump(&ifTrue);
   8697      }
   8698    }
   8699    masm.bind(&notObject);
   8700 
   8701    Label notString;
   8702    masm.branchTestString(Assembler::NotEqual, tag, &notString);
   8703    {
   8704      ScratchTagScopeRelease _(&tag);
   8705      masm.branchTestStringTruthy(false, value, &ifFalse);
   8706      masm.jump(&ifTrue);
   8707    }
   8708    masm.bind(&notString);
   8709 
   8710    Label notBigInt;
   8711    masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
   8712    {
   8713      ScratchTagScopeRelease _(&tag);
   8714      masm.branchTestBigIntTruthy(false, value, &ifFalse);
   8715      masm.jump(&ifTrue);
   8716    }
   8717    masm.bind(&notBigInt);
   8718 
   8719    masm.branchTestSymbol(Assembler::Equal, tag, &ifTrue);
   8720 
   8721 #ifdef DEBUG
   8722    Label isDouble;
   8723    masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
   8724    masm.assumeUnreachable("Unexpected value type");
   8725    masm.bind(&isDouble);
   8726 #endif
   8727 
   8728    {
   8729      ScratchTagScopeRelease _(&tag);
   8730      masm.unboxDouble(value, floatReg);
   8731      masm.branchTestDoubleTruthy(false, floatReg, &ifFalse);
   8732    }
   8733 
   8734    // Fall through to true case.
   8735  }
   8736 
   8737  masm.bind(&ifTrue);
   8738  masm.moveValue(BooleanValue(true), output.valueReg());
   8739  masm.jump(&done);
   8740 
   8741  masm.bind(&ifFalse);
   8742  masm.moveValue(BooleanValue(false), output.valueReg());
   8743 
   8744  masm.bind(&done);
   8745  return true;
   8746 }
   8747 
   8748 bool CacheIRCompiler::emitComparePointerResultShared(JSOp op,
   8749                                                     TypedOperandId lhsId,
   8750                                                     TypedOperandId rhsId) {
   8751  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8752  AutoOutputRegister output(*this);
   8753 
   8754  Register left = allocator.useRegister(masm, lhsId);
   8755  Register right = allocator.useRegister(masm, rhsId);
   8756 
   8757  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   8758 
   8759  Label ifTrue, done;
   8760  masm.branchPtr(JSOpToCondition(op, /* signed = */ true), left, right,
   8761                 &ifTrue);
   8762 
   8763  EmitStoreBoolean(masm, false, output);
   8764  masm.jump(&done);
   8765 
   8766  masm.bind(&ifTrue);
   8767  EmitStoreBoolean(masm, true, output);
   8768  masm.bind(&done);
   8769  return true;
   8770 }
   8771 
   8772 bool CacheIRCompiler::emitCompareObjectResult(JSOp op, ObjOperandId lhsId,
   8773                                              ObjOperandId rhsId) {
   8774  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8775  return emitComparePointerResultShared(op, lhsId, rhsId);
   8776 }
   8777 
   8778 bool CacheIRCompiler::emitCompareSymbolResult(JSOp op, SymbolOperandId lhsId,
   8779                                              SymbolOperandId rhsId) {
   8780  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8781  return emitComparePointerResultShared(op, lhsId, rhsId);
   8782 }
   8783 
   8784 bool CacheIRCompiler::emitCompareInt32Result(JSOp op, Int32OperandId lhsId,
   8785                                             Int32OperandId rhsId) {
   8786  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8787  AutoOutputRegister output(*this);
   8788  Register left = allocator.useRegister(masm, lhsId);
   8789  Register right = allocator.useRegister(masm, rhsId);
   8790 
   8791  Label ifTrue, done;
   8792  masm.branch32(JSOpToCondition(op, /* signed = */ true), left, right, &ifTrue);
   8793 
   8794  EmitStoreBoolean(masm, false, output);
   8795  masm.jump(&done);
   8796 
   8797  masm.bind(&ifTrue);
   8798  EmitStoreBoolean(masm, true, output);
   8799  masm.bind(&done);
   8800  return true;
   8801 }
   8802 
   8803 bool CacheIRCompiler::emitCompareDoubleResult(JSOp op, NumberOperandId lhsId,
   8804                                              NumberOperandId rhsId) {
   8805  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8806  AutoOutputRegister output(*this);
   8807 
   8808  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   8809  AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
   8810 
   8811  FailurePath* failure;
   8812  if (!addFailurePath(&failure)) {
   8813    return false;
   8814  }
   8815 
   8816  allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
   8817  allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
   8818 
   8819  Label done, ifTrue;
   8820  masm.branchDouble(JSOpToDoubleCondition(op), floatScratch0, floatScratch1,
   8821                    &ifTrue);
   8822  EmitStoreBoolean(masm, false, output);
   8823  masm.jump(&done);
   8824 
   8825  masm.bind(&ifTrue);
   8826  EmitStoreBoolean(masm, true, output);
   8827  masm.bind(&done);
   8828  return true;
   8829 }
   8830 
   8831 bool CacheIRCompiler::emitCompareBigIntResult(JSOp op, BigIntOperandId lhsId,
   8832                                              BigIntOperandId rhsId) {
   8833  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8834  AutoOutputRegister output(*this);
   8835 
   8836  Register lhs = allocator.useRegister(masm, lhsId);
   8837  Register rhs = allocator.useRegister(masm, rhsId);
   8838 
   8839  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   8840 
   8841  LiveRegisterSet save = liveVolatileRegs();
   8842  masm.PushRegsInMask(save);
   8843 
   8844  masm.setupUnalignedABICall(scratch);
   8845 
   8846  // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
   8847  // - |left <= right| is implemented as |right >= left|.
   8848  // - |left > right| is implemented as |right < left|.
   8849  if (op == JSOp::Le || op == JSOp::Gt) {
   8850    masm.passABIArg(rhs);
   8851    masm.passABIArg(lhs);
   8852  } else {
   8853    masm.passABIArg(lhs);
   8854    masm.passABIArg(rhs);
   8855  }
   8856 
   8857  using Fn = bool (*)(BigInt*, BigInt*);
   8858  Fn fn;
   8859  if (op == JSOp::Eq || op == JSOp::StrictEq) {
   8860    fn = jit::BigIntEqual<EqualityKind::Equal>;
   8861  } else if (op == JSOp::Ne || op == JSOp::StrictNe) {
   8862    fn = jit::BigIntEqual<EqualityKind::NotEqual>;
   8863  } else if (op == JSOp::Lt || op == JSOp::Gt) {
   8864    fn = jit::BigIntCompare<ComparisonKind::LessThan>;
   8865  } else {
   8866    MOZ_ASSERT(op == JSOp::Le || op == JSOp::Ge);
   8867    fn = jit::BigIntCompare<ComparisonKind::GreaterThanOrEqual>;
   8868  }
   8869 
   8870  masm.callWithABI(DynamicFunction<Fn>(fn));
   8871  masm.storeCallBoolResult(scratch);
   8872 
   8873  LiveRegisterSet ignore;
   8874  ignore.add(scratch);
   8875  masm.PopRegsInMaskIgnore(save, ignore);
   8876 
   8877  EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
   8878  return true;
   8879 }
   8880 
   8881 bool CacheIRCompiler::emitCompareBigIntInt32Result(JSOp op,
   8882                                                   BigIntOperandId lhsId,
   8883                                                   Int32OperandId rhsId) {
   8884  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8885  AutoOutputRegister output(*this);
   8886  Register bigInt = allocator.useRegister(masm, lhsId);
   8887  Register int32 = allocator.useRegister(masm, rhsId);
   8888 
   8889  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   8890  AutoScratchRegister scratch2(allocator, masm);
   8891 
   8892  Label ifTrue, ifFalse;
   8893  masm.compareBigIntAndInt32(op, bigInt, int32, scratch1, scratch2, &ifTrue,
   8894                             &ifFalse);
   8895 
   8896  Label done;
   8897  masm.bind(&ifFalse);
   8898  EmitStoreBoolean(masm, false, output);
   8899  masm.jump(&done);
   8900 
   8901  masm.bind(&ifTrue);
   8902  EmitStoreBoolean(masm, true, output);
   8903 
   8904  masm.bind(&done);
   8905  return true;
   8906 }
   8907 
   8908 bool CacheIRCompiler::emitCompareBigIntNumberResult(JSOp op,
   8909                                                    BigIntOperandId lhsId,
   8910                                                    NumberOperandId rhsId) {
   8911  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8912  AutoOutputRegister output(*this);
   8913 
   8914  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   8915 
   8916  Register lhs = allocator.useRegister(masm, lhsId);
   8917  allocator.ensureDoubleRegister(masm, rhsId, floatScratch0);
   8918 
   8919  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   8920 
   8921  LiveRegisterSet save = liveVolatileRegs();
   8922  masm.PushRegsInMask(save);
   8923 
   8924  masm.setupUnalignedABICall(scratch);
   8925 
   8926  // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
   8927  // - |left <= right| is implemented as |right >= left|.
   8928  // - |left > right| is implemented as |right < left|.
   8929  if (op == JSOp::Le || op == JSOp::Gt) {
   8930    masm.passABIArg(floatScratch0, ABIType::Float64);
   8931    masm.passABIArg(lhs);
   8932  } else {
   8933    masm.passABIArg(lhs);
   8934    masm.passABIArg(floatScratch0, ABIType::Float64);
   8935  }
   8936 
   8937  using FnBigIntNumber = bool (*)(BigInt*, double);
   8938  using FnNumberBigInt = bool (*)(double, BigInt*);
   8939  switch (op) {
   8940    case JSOp::Eq: {
   8941      masm.callWithABI<FnBigIntNumber,
   8942                       jit::BigIntNumberEqual<EqualityKind::Equal>>();
   8943      break;
   8944    }
   8945    case JSOp::Ne: {
   8946      masm.callWithABI<FnBigIntNumber,
   8947                       jit::BigIntNumberEqual<EqualityKind::NotEqual>>();
   8948      break;
   8949    }
   8950    case JSOp::Lt: {
   8951      masm.callWithABI<FnBigIntNumber,
   8952                       jit::BigIntNumberCompare<ComparisonKind::LessThan>>();
   8953      break;
   8954    }
   8955    case JSOp::Gt: {
   8956      masm.callWithABI<FnNumberBigInt,
   8957                       jit::NumberBigIntCompare<ComparisonKind::LessThan>>();
   8958      break;
   8959    }
   8960    case JSOp::Le: {
   8961      masm.callWithABI<
   8962          FnNumberBigInt,
   8963          jit::NumberBigIntCompare<ComparisonKind::GreaterThanOrEqual>>();
   8964      break;
   8965    }
   8966    case JSOp::Ge: {
   8967      masm.callWithABI<
   8968          FnBigIntNumber,
   8969          jit::BigIntNumberCompare<ComparisonKind::GreaterThanOrEqual>>();
   8970      break;
   8971    }
   8972    default:
   8973      MOZ_CRASH("unhandled op");
   8974  }
   8975 
   8976  masm.storeCallBoolResult(scratch);
   8977 
   8978  LiveRegisterSet ignore;
   8979  ignore.add(scratch);
   8980  masm.PopRegsInMaskIgnore(save, ignore);
   8981 
   8982  EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
   8983  return true;
   8984 }
   8985 
   8986 bool CacheIRCompiler::emitCompareBigIntStringResult(JSOp op,
   8987                                                    BigIntOperandId lhsId,
   8988                                                    StringOperandId rhsId) {
   8989  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   8990  AutoCallVM callvm(masm, this, allocator);
   8991 
   8992  Register lhs = allocator.useRegister(masm, lhsId);
   8993  Register rhs = allocator.useRegister(masm, rhsId);
   8994 
   8995  callvm.prepare();
   8996 
   8997  // Push the operands in reverse order for JSOp::Le and JSOp::Gt:
   8998  // - |left <= right| is implemented as |right >= left|.
   8999  // - |left > right| is implemented as |right < left|.
   9000  if (op == JSOp::Le || op == JSOp::Gt) {
   9001    masm.Push(lhs);
   9002    masm.Push(rhs);
   9003  } else {
   9004    masm.Push(rhs);
   9005    masm.Push(lhs);
   9006  }
   9007 
   9008  using FnBigIntString =
   9009      bool (*)(JSContext*, HandleBigInt, HandleString, bool*);
   9010  using FnStringBigInt =
   9011      bool (*)(JSContext*, HandleString, HandleBigInt, bool*);
   9012 
   9013  switch (op) {
   9014    case JSOp::Eq: {
   9015      constexpr auto Equal = EqualityKind::Equal;
   9016      callvm.call<FnBigIntString, BigIntStringEqual<Equal>>();
   9017      break;
   9018    }
   9019    case JSOp::Ne: {
   9020      constexpr auto NotEqual = EqualityKind::NotEqual;
   9021      callvm.call<FnBigIntString, BigIntStringEqual<NotEqual>>();
   9022      break;
   9023    }
   9024    case JSOp::Lt: {
   9025      constexpr auto LessThan = ComparisonKind::LessThan;
   9026      callvm.call<FnBigIntString, BigIntStringCompare<LessThan>>();
   9027      break;
   9028    }
   9029    case JSOp::Gt: {
   9030      constexpr auto LessThan = ComparisonKind::LessThan;
   9031      callvm.call<FnStringBigInt, StringBigIntCompare<LessThan>>();
   9032      break;
   9033    }
   9034    case JSOp::Le: {
   9035      constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
   9036      callvm.call<FnStringBigInt, StringBigIntCompare<GreaterThanOrEqual>>();
   9037      break;
   9038    }
   9039    case JSOp::Ge: {
   9040      constexpr auto GreaterThanOrEqual = ComparisonKind::GreaterThanOrEqual;
   9041      callvm.call<FnBigIntString, BigIntStringCompare<GreaterThanOrEqual>>();
   9042      break;
   9043    }
   9044    default:
   9045      MOZ_CRASH("unhandled op");
   9046  }
   9047  return true;
   9048 }
   9049 
   9050 bool CacheIRCompiler::emitCompareNullUndefinedResult(JSOp op, bool isUndefined,
   9051                                                     ValOperandId inputId) {
   9052  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9053 
   9054  AutoOutputRegister output(*this);
   9055  ValueOperand input = allocator.useValueRegister(masm, inputId);
   9056  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   9057  AutoScratchRegister scratch2(allocator, masm);
   9058 
   9059  if (IsStrictEqualityOp(op)) {
   9060    if (isUndefined) {
   9061      masm.testUndefinedSet(JSOpToCondition(op, false), input, scratch);
   9062    } else {
   9063      masm.testNullSet(JSOpToCondition(op, false), input, scratch);
   9064    }
   9065    EmitStoreResult(masm, scratch, JSVAL_TYPE_BOOLEAN, output);
   9066    return true;
   9067  }
   9068 
   9069  FailurePath* failure;
   9070  if (!addFailurePath(&failure)) {
   9071    return false;
   9072  }
   9073 
   9074  MOZ_ASSERT(IsLooseEqualityOp(op));
   9075 
   9076  Label nullOrLikeUndefined, notNullOrLikeUndefined, done;
   9077  {
   9078    ScratchTagScope tag(masm, input);
   9079    masm.splitTagForTest(input, tag);
   9080 
   9081    if (isUndefined) {
   9082      masm.branchTestUndefined(Assembler::Equal, tag, &nullOrLikeUndefined);
   9083      masm.branchTestNull(Assembler::Equal, tag, &nullOrLikeUndefined);
   9084    } else {
   9085      masm.branchTestNull(Assembler::Equal, tag, &nullOrLikeUndefined);
   9086      masm.branchTestUndefined(Assembler::Equal, tag, &nullOrLikeUndefined);
   9087    }
   9088    masm.branchTestObject(Assembler::NotEqual, tag, &notNullOrLikeUndefined);
   9089 
   9090    {
   9091      ScratchTagScopeRelease _(&tag);
   9092 
   9093      masm.unboxObject(input, scratch);
   9094      masm.branchIfObjectEmulatesUndefined(scratch, scratch2, failure->label(),
   9095                                           &nullOrLikeUndefined);
   9096      masm.jump(&notNullOrLikeUndefined);
   9097    }
   9098  }
   9099 
   9100  masm.bind(&nullOrLikeUndefined);
   9101  EmitStoreBoolean(masm, op == JSOp::Eq, output);
   9102  masm.jump(&done);
   9103 
   9104  masm.bind(&notNullOrLikeUndefined);
   9105  EmitStoreBoolean(masm, op == JSOp::Ne, output);
   9106 
   9107  masm.bind(&done);
   9108  return true;
   9109 }
   9110 
   9111 bool CacheIRCompiler::emitCompareDoubleSameValueResult(NumberOperandId lhsId,
   9112                                                       NumberOperandId rhsId) {
   9113  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9114 
   9115  AutoOutputRegister output(*this);
   9116  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   9117  AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
   9118  AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
   9119  AutoAvailableFloatRegister floatScratch2(*this, FloatReg2);
   9120 
   9121  allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
   9122  allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
   9123 
   9124  masm.sameValueDouble(floatScratch0, floatScratch1, floatScratch2, scratch);
   9125  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
   9126  return true;
   9127 }
   9128 
   9129 bool CacheIRCompiler::emitIndirectTruncateInt32Result(Int32OperandId valId) {
   9130  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9131  AutoOutputRegister output(*this);
   9132  Register val = allocator.useRegister(masm, valId);
   9133 
   9134  if (output.hasValue()) {
   9135    masm.tagValue(JSVAL_TYPE_INT32, val, output.valueReg());
   9136  } else {
   9137    masm.mov(val, output.typedReg().gpr());
   9138  }
   9139  return true;
   9140 }
   9141 
   9142 bool CacheIRCompiler::emitCallPrintString(const char* str) {
   9143  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9144  masm.printf(str);
   9145  return true;
   9146 }
   9147 
   9148 bool CacheIRCompiler::emitBreakpoint() {
   9149  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9150  masm.breakpoint();
   9151  return true;
   9152 }
   9153 
   9154 void CacheIRCompiler::emitPostBarrierShared(Register obj,
   9155                                            const ConstantOrRegister& val,
   9156                                            Register scratch,
   9157                                            Register maybeIndex) {
   9158  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9159 
   9160  if (val.constant()) {
   9161    MOZ_ASSERT_IF(val.value().isGCThing(),
   9162                  !IsInsideNursery(val.value().toGCThing()));
   9163    return;
   9164  }
   9165 
   9166  TypedOrValueRegister reg = val.reg();
   9167  if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
   9168    return;
   9169  }
   9170 
   9171  Label skipBarrier;
   9172  if (reg.hasValue()) {
   9173    masm.branchValueIsNurseryCell(Assembler::NotEqual, reg.valueReg(), scratch,
   9174                                  &skipBarrier);
   9175  } else {
   9176    masm.branchPtrInNurseryChunk(Assembler::NotEqual, reg.typedReg().gpr(),
   9177                                 scratch, &skipBarrier);
   9178  }
   9179  masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, &skipBarrier);
   9180 
   9181  // Check one element cache to avoid VM call.
   9182  auto* lastCellAddr = cx_->runtime()->gc.addressOfLastBufferedWholeCell();
   9183  masm.branchPtr(Assembler::Equal, AbsoluteAddress(lastCellAddr), obj,
   9184                 &skipBarrier);
   9185 
   9186  // Call one of these, depending on maybeIndex:
   9187  //
   9188  //   void PostWriteBarrier(JSRuntime* rt, JSObject* obj);
   9189  //   void PostWriteElementBarrier(JSRuntime* rt, JSObject* obj,
   9190  //                                int32_t index);
   9191  LiveRegisterSet save = liveVolatileRegs();
   9192  masm.PushRegsInMask(save);
   9193  masm.setupUnalignedABICall(scratch);
   9194  masm.movePtr(ImmPtr(cx_->runtime()), scratch);
   9195  masm.passABIArg(scratch);
   9196  masm.passABIArg(obj);
   9197  if (maybeIndex != InvalidReg) {
   9198    masm.passABIArg(maybeIndex);
   9199    using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
   9200    masm.callWithABI<Fn, PostWriteElementBarrier>();
   9201  } else {
   9202    using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
   9203    masm.callWithABI<Fn, PostWriteBarrier>();
   9204  }
   9205  masm.PopRegsInMask(save);
   9206 
   9207  masm.bind(&skipBarrier);
   9208 }
   9209 
   9210 bool CacheIRCompiler::emitWrapResult() {
   9211  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9212  AutoOutputRegister output(*this);
   9213  AutoScratchRegister scratch(allocator, masm);
   9214 
   9215  FailurePath* failure;
   9216  if (!addFailurePath(&failure)) {
   9217    return false;
   9218  }
   9219 
   9220  Label done;
   9221  // We only have to wrap objects, because we are in the same zone.
   9222  masm.branchTestObject(Assembler::NotEqual, output.valueReg(), &done);
   9223 
   9224  Register obj = output.valueReg().scratchReg();
   9225  masm.unboxObject(output.valueReg(), obj);
   9226 
   9227  LiveRegisterSet save = liveVolatileRegs();
   9228  masm.PushRegsInMask(save);
   9229 
   9230  using Fn = JSObject* (*)(JSContext * cx, JSObject * obj);
   9231  masm.setupUnalignedABICall(scratch);
   9232  masm.loadJSContext(scratch);
   9233  masm.passABIArg(scratch);
   9234  masm.passABIArg(obj);
   9235  masm.callWithABI<Fn, WrapObjectPure>();
   9236  masm.storeCallPointerResult(obj);
   9237 
   9238  LiveRegisterSet ignore;
   9239  ignore.add(obj);
   9240  masm.PopRegsInMaskIgnore(save, ignore);
   9241 
   9242  // We could not get a wrapper for this object.
   9243  masm.branchTestPtr(Assembler::Zero, obj, obj, failure->label());
   9244 
   9245  // We clobbered the output register, so we have to retag.
   9246  masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
   9247 
   9248  masm.bind(&done);
   9249  return true;
   9250 }
   9251 
   9252 bool CacheIRCompiler::emitMegamorphicLoadSlotByValueResult(ObjOperandId objId,
   9253                                                           ValOperandId idId) {
   9254  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9255  AutoOutputRegister output(*this);
   9256 
   9257  Register obj = allocator.useRegister(masm, objId);
   9258  ValueOperand idVal = allocator.useValueRegister(masm, idId);
   9259 
   9260 #ifdef JS_CODEGEN_X86
   9261  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   9262  AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
   9263 #else
   9264  AutoScratchRegister scratch1(allocator, masm);
   9265  AutoScratchRegister scratch2(allocator, masm);
   9266  AutoScratchRegister scratch3(allocator, masm);
   9267 #endif
   9268 
   9269  FailurePath* failure;
   9270  if (!addFailurePath(&failure)) {
   9271    return false;
   9272  }
   9273 
   9274 #ifdef JS_CODEGEN_X86
   9275  masm.xorPtr(scratch2, scratch2);
   9276 #else
   9277  Label cacheHit;
   9278  masm.emitMegamorphicCacheLookupByValue(
   9279      idVal, obj, scratch1, scratch3, scratch2, output.valueReg(), &cacheHit);
   9280 #endif
   9281 
   9282  masm.branchIfNonNativeObj(obj, scratch1, failure->label());
   9283 
   9284  // idVal will be in vp[0], result will be stored in vp[1].
   9285  masm.reserveStack(sizeof(Value));
   9286  masm.Push(idVal);
   9287  masm.moveStackPtrTo(idVal.scratchReg());
   9288 
   9289  LiveRegisterSet volatileRegs = liveVolatileRegs();
   9290  volatileRegs.takeUnchecked(scratch1);
   9291  volatileRegs.takeUnchecked(idVal);
   9292  masm.PushRegsInMask(volatileRegs);
   9293 
   9294  using Fn = bool (*)(JSContext* cx, JSObject* obj,
   9295                      MegamorphicCache::Entry* cacheEntry, Value* vp);
   9296  masm.setupUnalignedABICall(scratch1);
   9297  masm.loadJSContext(scratch1);
   9298  masm.passABIArg(scratch1);
   9299  masm.passABIArg(obj);
   9300  masm.passABIArg(scratch2);
   9301  masm.passABIArg(idVal.scratchReg());
   9302  masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
   9303 
   9304  masm.storeCallPointerResult(scratch1);
   9305  masm.PopRegsInMask(volatileRegs);
   9306 
   9307  masm.Pop(idVal);
   9308 
   9309  Label ok;
   9310  uint32_t framePushed = masm.framePushed();
   9311  masm.branchIfTrueBool(scratch1, &ok);
   9312  masm.adjustStack(sizeof(Value));
   9313  masm.jump(failure->label());
   9314 
   9315  masm.bind(&ok);
   9316  masm.setFramePushed(framePushed);
   9317  masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
   9318  masm.adjustStack(sizeof(Value));
   9319 
   9320 #ifndef JS_CODEGEN_X86
   9321  masm.bind(&cacheHit);
   9322 #endif
   9323  return true;
   9324 }
   9325 
   9326 bool CacheIRCompiler::emitMegamorphicLoadSlotByValuePermissiveResult(
   9327    ObjOperandId objId, ValOperandId idId) {
   9328  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9329 
   9330  AutoCallVM callvm(masm, this, allocator);
   9331 
   9332  const AutoOutputRegister& output = callvm.output();
   9333 
   9334  Register obj = allocator.useRegister(masm, objId);
   9335  ValueOperand idVal = allocator.useValueRegister(masm, idId);
   9336 
   9337 #ifdef JS_CODEGEN_X86
   9338  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   9339  AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
   9340 #else
   9341  AutoScratchRegister scratch1(allocator, masm);
   9342  AutoScratchRegister scratch2(allocator, masm);
   9343  AutoScratchRegister scratch3(allocator, masm);
   9344 #endif
   9345 
   9346 #ifdef JS_CODEGEN_X86
   9347  masm.xorPtr(scratch2, scratch2);
   9348 #else
   9349  Label cacheHit;
   9350  masm.emitMegamorphicCacheLookupByValue(
   9351      idVal, obj, scratch1, scratch3, scratch2, output.valueReg(), &cacheHit);
   9352 #endif
   9353 
   9354  callvm.prepare();
   9355 
   9356  masm.Push(scratch2);
   9357  masm.Push(idVal);
   9358  masm.Push(obj);
   9359 
   9360  using Fn = bool (*)(JSContext*, HandleObject, HandleValue,
   9361                      MegamorphicCacheEntry*, MutableHandleValue);
   9362  callvm.call<Fn, GetElemMaybeCached>();
   9363 
   9364 #ifndef JS_CODEGEN_X86
   9365  masm.bind(&cacheHit);
   9366 #endif
   9367  return true;
   9368 }
   9369 
   9370 bool CacheIRCompiler::emitMegamorphicHasPropResult(ObjOperandId objId,
   9371                                                   ValOperandId idId,
   9372                                                   bool hasOwn) {
   9373  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9374  AutoOutputRegister output(*this);
   9375 
   9376  Register obj = allocator.useRegister(masm, objId);
   9377  ValueOperand idVal = allocator.useValueRegister(masm, idId);
   9378 
   9379 #ifdef JS_CODEGEN_X86
   9380  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   9381  AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
   9382 #else
   9383  AutoScratchRegister scratch1(allocator, masm);
   9384  AutoScratchRegister scratch2(allocator, masm);
   9385  AutoScratchRegister scratch3(allocator, masm);
   9386 #endif
   9387 
   9388  FailurePath* failure;
   9389  if (!addFailurePath(&failure)) {
   9390    return false;
   9391  }
   9392 
   9393 #ifndef JS_CODEGEN_X86
   9394  Label cacheHit, done;
   9395  masm.emitMegamorphicCacheLookupExists(idVal, obj, scratch1, scratch3,
   9396                                        scratch2, output.maybeReg(), &cacheHit,
   9397                                        hasOwn);
   9398 #else
   9399  masm.xorPtr(scratch2, scratch2);
   9400 #endif
   9401 
   9402  masm.branchIfNonNativeObj(obj, scratch1, failure->label());
   9403 
   9404  // idVal will be in vp[0], result will be stored in vp[1].
   9405  masm.reserveStack(sizeof(Value));
   9406  masm.Push(idVal);
   9407  masm.moveStackPtrTo(idVal.scratchReg());
   9408 
   9409  LiveRegisterSet volatileRegs = liveVolatileRegs();
   9410  volatileRegs.takeUnchecked(scratch1);
   9411  volatileRegs.takeUnchecked(idVal);
   9412  masm.PushRegsInMask(volatileRegs);
   9413 
   9414  using Fn = bool (*)(JSContext* cx, JSObject* obj,
   9415                      MegamorphicCache::Entry* cacheEntry, Value* vp);
   9416  masm.setupUnalignedABICall(scratch1);
   9417  masm.loadJSContext(scratch1);
   9418  masm.passABIArg(scratch1);
   9419  masm.passABIArg(obj);
   9420  masm.passABIArg(scratch2);
   9421  masm.passABIArg(idVal.scratchReg());
   9422  if (hasOwn) {
   9423    masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
   9424  } else {
   9425    masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
   9426  }
   9427  masm.storeCallPointerResult(scratch1);
   9428  masm.PopRegsInMask(volatileRegs);
   9429 
   9430  masm.Pop(idVal);
   9431 
   9432  Label ok;
   9433  uint32_t framePushed = masm.framePushed();
   9434  masm.branchIfTrueBool(scratch1, &ok);
   9435  masm.adjustStack(sizeof(Value));
   9436  masm.jump(failure->label());
   9437 
   9438  masm.bind(&ok);
   9439  masm.setFramePushed(framePushed);
   9440  masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
   9441  masm.adjustStack(sizeof(Value));
   9442 
   9443 #ifndef JS_CODEGEN_X86
   9444  masm.jump(&done);
   9445  masm.bind(&cacheHit);
   9446  if (output.hasValue()) {
   9447    masm.tagValue(JSVAL_TYPE_BOOLEAN, output.valueReg().scratchReg(),
   9448                  output.valueReg());
   9449  }
   9450  masm.bind(&done);
   9451 #endif
   9452  return true;
   9453 }
   9454 
   9455 bool CacheIRCompiler::emitSmallObjectVariableKeyHasOwnResult(
   9456    StringOperandId idId, uint32_t propNamesOffset, uint32_t shapeOffset) {
   9457  StubFieldOffset propNames(propNamesOffset, StubField::Type::JSObject);
   9458  AutoOutputRegister output(*this);
   9459  Register id = allocator.useRegister(masm, idId);
   9460  AutoScratchRegisterMaybeOutput propNamesReg(allocator, masm, output);
   9461  AutoScratchRegister endScratch(allocator, masm);
   9462  AutoScratchRegister scratch(allocator, masm);
   9463 
   9464  FailurePath* failure;
   9465  if (!addFailurePath(&failure)) {
   9466    return false;
   9467  }
   9468 
   9469  emitLoadStubField(propNames, propNamesReg);
   9470 
   9471  Label trueResult, falseResult, loop, done;
   9472 
   9473  masm.loadPtr(Address(propNamesReg, NativeObject::offsetOfElements()),
   9474               propNamesReg);
   9475  // Compute end pointer.
   9476  Address lengthAddr(propNamesReg, ObjectElements::offsetOfInitializedLength());
   9477  masm.load32(lengthAddr, endScratch);
   9478  masm.branch32(Assembler::Equal, endScratch, Imm32(0), &falseResult);
   9479  BaseObjectElementIndex endPtrAddr(propNamesReg, endScratch);
   9480  masm.computeEffectiveAddress(endPtrAddr, endScratch);
   9481 
   9482  masm.bind(&loop);
   9483 
   9484  Address atomAddr(propNamesReg.get(), 0);
   9485 
   9486  masm.unboxString(atomAddr, scratch);
   9487  masm.branchPtr(Assembler::Equal, scratch, id, &trueResult);
   9488 
   9489  masm.addPtr(Imm32(sizeof(Value)), propNamesReg);
   9490  masm.branchPtr(Assembler::Below, propNamesReg, endScratch, &loop);
   9491 
   9492  masm.bind(&falseResult);
   9493  if (output.hasValue()) {
   9494    masm.moveValue(BooleanValue(false), output.valueReg());
   9495  } else {
   9496    masm.move32(Imm32(0), output.typedReg().gpr());
   9497  }
   9498  masm.jump(&done);
   9499  masm.bind(&trueResult);
   9500  if (output.hasValue()) {
   9501    masm.moveValue(BooleanValue(true), output.valueReg());
   9502  } else {
   9503    masm.move32(Imm32(1), output.typedReg().gpr());
   9504  }
   9505  masm.bind(&done);
   9506  return true;
   9507 }
   9508 
   9509 bool CacheIRCompiler::emitCallObjectHasSparseElementResult(
   9510    ObjOperandId objId, Int32OperandId indexId) {
   9511  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9512  AutoOutputRegister output(*this);
   9513 
   9514  Register obj = allocator.useRegister(masm, objId);
   9515  Register index = allocator.useRegister(masm, indexId);
   9516 
   9517  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   9518  AutoScratchRegister scratch2(allocator, masm);
   9519 
   9520  FailurePath* failure;
   9521  if (!addFailurePath(&failure)) {
   9522    return false;
   9523  }
   9524 
   9525  masm.reserveStack(sizeof(Value));
   9526  masm.moveStackPtrTo(scratch2.get());
   9527 
   9528  LiveRegisterSet volatileRegs = liveVolatileRegs();
   9529  volatileRegs.takeUnchecked(scratch1);
   9530  masm.PushRegsInMask(volatileRegs);
   9531 
   9532  using Fn =
   9533      bool (*)(JSContext* cx, NativeObject* obj, int32_t index, Value* vp);
   9534  masm.setupUnalignedABICall(scratch1);
   9535  masm.loadJSContext(scratch1);
   9536  masm.passABIArg(scratch1);
   9537  masm.passABIArg(obj);
   9538  masm.passABIArg(index);
   9539  masm.passABIArg(scratch2);
   9540  masm.callWithABI<Fn, HasNativeElementPure>();
   9541  masm.storeCallPointerResult(scratch1);
   9542  masm.PopRegsInMask(volatileRegs);
   9543 
   9544  Label ok;
   9545  uint32_t framePushed = masm.framePushed();
   9546  masm.branchIfTrueBool(scratch1, &ok);
   9547  masm.adjustStack(sizeof(Value));
   9548  masm.jump(failure->label());
   9549 
   9550  masm.bind(&ok);
   9551  masm.setFramePushed(framePushed);
   9552  masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
   9553  masm.adjustStack(sizeof(Value));
   9554  return true;
   9555 }
   9556 
   9557 /*
   9558 * Move a constant value into register dest.
   9559 */
   9560 void CacheIRCompiler::emitLoadStubFieldConstant(StubFieldOffset val,
   9561                                                Register dest) {
   9562  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9563  MOZ_ASSERT(mode_ == Mode::Ion);
   9564  switch (val.getStubFieldType()) {
   9565    case StubField::Type::Shape:
   9566      masm.movePtr(ImmGCPtr(shapeStubField(val.getOffset())), dest);
   9567      break;
   9568    case StubField::Type::String:
   9569      masm.movePtr(ImmGCPtr(stringStubField(val.getOffset())), dest);
   9570      break;
   9571    case StubField::Type::JSObject:
   9572      masm.movePtr(ImmGCPtr(objectStubField(val.getOffset())), dest);
   9573      break;
   9574    case StubField::Type::WeakObject:
   9575      masm.movePtr(ImmGCPtr(weakObjectStubField(val.getOffset())), dest);
   9576      break;
   9577    case StubField::Type::RawPointer:
   9578      masm.movePtr(ImmPtr(pointerStubField(val.getOffset())), dest);
   9579      break;
   9580    case StubField::Type::RawInt32:
   9581      masm.move32(Imm32(int32StubField(val.getOffset())), dest);
   9582      break;
   9583    case StubField::Type::Id:
   9584      masm.movePropertyKey(idStubField(val.getOffset()), dest);
   9585      break;
   9586    default:
   9587      MOZ_CRASH("Unhandled stub field constant type");
   9588  }
   9589 }
   9590 
   9591 /*
   9592 * After this is done executing, dest contains the value; either through a
   9593 * constant load or through the load from the stub data.
   9594 *
   9595 * The current policy is that Baseline will use loads from the stub data (to
   9596 * allow IC sharing), where as Ion doesn't share ICs, and so we can safely use
   9597 * constants in the IC.
   9598 */
   9599 void CacheIRCompiler::emitLoadStubField(StubFieldOffset val, Register dest) {
   9600  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9601  if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
   9602    emitLoadStubFieldConstant(val, dest);
   9603  } else {
   9604    Address load(ICStubReg, stubDataOffset_ + val.getOffset());
   9605 
   9606    switch (val.getStubFieldType()) {
   9607      case StubField::Type::RawPointer:
   9608      case StubField::Type::Shape:
   9609      case StubField::Type::JSObject:
   9610      case StubField::Type::WeakObject:
   9611      case StubField::Type::Symbol:
   9612      case StubField::Type::String:
   9613      case StubField::Type::Id:
   9614      case StubField::Type::AllocSite:
   9615        masm.loadPtr(load, dest);
   9616        break;
   9617      case StubField::Type::RawInt32:
   9618        masm.load32(load, dest);
   9619        break;
   9620      default:
   9621        MOZ_CRASH("Unhandled stub field constant type");
   9622    }
   9623  }
   9624 }
   9625 
   9626 void CacheIRCompiler::emitLoadValueStubField(StubFieldOffset val,
   9627                                             ValueOperand dest) {
   9628  MOZ_ASSERT(val.getStubFieldType() == StubField::Type::Value ||
   9629             val.getStubFieldType() == StubField::Type::WeakValue);
   9630 
   9631  if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
   9632    MOZ_ASSERT(mode_ == Mode::Ion);
   9633    if (val.getStubFieldType() == StubField::Type::Value) {
   9634      masm.moveValue(valueStubField(val.getOffset()), dest);
   9635    } else {
   9636      masm.moveValue(weakValueStubField(val.getOffset()), dest);
   9637    }
   9638  } else {
   9639    Address addr(ICStubReg, stubDataOffset_ + val.getOffset());
   9640    masm.loadValue(addr, dest);
   9641  }
   9642 }
   9643 
   9644 void CacheIRCompiler::emitLoadDoubleValueStubField(StubFieldOffset val,
   9645                                                   ValueOperand dest,
   9646                                                   FloatRegister scratch) {
   9647  MOZ_ASSERT(val.getStubFieldType() == StubField::Type::Double);
   9648 
   9649  if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
   9650    MOZ_ASSERT(mode_ == Mode::Ion);
   9651    double d = doubleStubField(val.getOffset());
   9652    masm.moveValue(DoubleValue(d), dest);
   9653  } else {
   9654    Address addr(ICStubReg, stubDataOffset_ + val.getOffset());
   9655    masm.loadDouble(addr, scratch);
   9656    masm.boxDouble(scratch, dest, scratch);
   9657  }
   9658 }
   9659 
   9660 bool CacheIRCompiler::emitLoadInstanceOfObjectResult(ValOperandId lhsId,
   9661                                                     ObjOperandId protoId) {
   9662  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9663  AutoOutputRegister output(*this);
   9664  ValueOperand lhs = allocator.useValueRegister(masm, lhsId);
   9665  Register proto = allocator.useRegister(masm, protoId);
   9666 
   9667  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   9668 
   9669  FailurePath* failure;
   9670  if (!addFailurePath(&failure)) {
   9671    return false;
   9672  }
   9673 
   9674  Label returnFalse, returnTrue, done;
   9675  masm.fallibleUnboxObject(lhs, scratch, &returnFalse);
   9676 
   9677  // LHS is an object. Load its proto.
   9678  masm.loadObjProto(scratch, scratch);
   9679  {
   9680    // Walk the proto chain until we either reach the target object,
   9681    // nullptr or LazyProto.
   9682    Label loop;
   9683    masm.bind(&loop);
   9684 
   9685    masm.branchPtr(Assembler::Equal, scratch, proto, &returnTrue);
   9686    masm.branchTestPtr(Assembler::Zero, scratch, scratch, &returnFalse);
   9687 
   9688    MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
   9689    masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), failure->label());
   9690 
   9691    masm.loadObjProto(scratch, scratch);
   9692    masm.jump(&loop);
   9693  }
   9694 
   9695  masm.bind(&returnFalse);
   9696  EmitStoreBoolean(masm, false, output);
   9697  masm.jump(&done);
   9698 
   9699  masm.bind(&returnTrue);
   9700  EmitStoreBoolean(masm, true, output);
   9701  // fallthrough
   9702  masm.bind(&done);
   9703  return true;
   9704 }
   9705 
   9706 bool CacheIRCompiler::emitMegamorphicLoadSlotResult(ObjOperandId objId,
   9707                                                    uint32_t idOffset) {
   9708  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9709  AutoOutputRegister output(*this);
   9710 
   9711  Register obj = allocator.useRegister(masm, objId);
   9712  StubFieldOffset id(idOffset, StubField::Type::Id);
   9713 
   9714  AutoScratchRegisterMaybeOutput idReg(allocator, masm, output);
   9715  AutoScratchRegister scratch1(allocator, masm);
   9716  AutoScratchRegister scratch2(allocator, masm);
   9717  AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, output);
   9718 
   9719 #ifdef JS_CODEGEN_X86
   9720  masm.xorPtr(scratch3, scratch3);
   9721 #else
   9722  Label cacheHit;
   9723  emitLoadStubField(id, idReg);
   9724  masm.emitMegamorphicCacheLookupByValue(idReg.get(), obj, scratch1, scratch2,
   9725                                         scratch3, output.valueReg(),
   9726                                         &cacheHit);
   9727 #endif
   9728 
   9729  FailurePath* failure;
   9730  if (!addFailurePath(&failure)) {
   9731    return false;
   9732  }
   9733 
   9734  masm.branchIfNonNativeObj(obj, scratch1, failure->label());
   9735 
   9736  masm.Push(UndefinedValue());
   9737  masm.moveStackPtrTo(idReg.get());
   9738 
   9739  LiveRegisterSet volatileRegs = liveVolatileRegs();
   9740  volatileRegs.takeUnchecked(scratch1);
   9741  volatileRegs.takeUnchecked(scratch2);
   9742  volatileRegs.takeUnchecked(scratch3);
   9743  volatileRegs.takeUnchecked(idReg);
   9744  masm.PushRegsInMask(volatileRegs);
   9745 
   9746  using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
   9747                      MegamorphicCache::Entry* cacheEntry, Value* vp);
   9748  masm.setupUnalignedABICall(scratch1);
   9749  masm.loadJSContext(scratch1);
   9750  masm.passABIArg(scratch1);
   9751  masm.passABIArg(obj);
   9752  emitLoadStubField(id, scratch2);
   9753  masm.passABIArg(scratch2);
   9754  masm.passABIArg(scratch3);
   9755  masm.passABIArg(idReg);
   9756 
   9757 #ifdef JS_CODEGEN_X86
   9758  masm.callWithABI<Fn, GetNativeDataPropertyPureWithCacheLookup>();
   9759 #else
   9760  masm.callWithABI<Fn, GetNativeDataPropertyPure>();
   9761 #endif
   9762 
   9763  masm.storeCallPointerResult(scratch2);
   9764  masm.PopRegsInMask(volatileRegs);
   9765 
   9766  masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
   9767  masm.adjustStack(sizeof(Value));
   9768 
   9769  masm.branchIfFalseBool(scratch2, failure->label());
   9770 #ifndef JS_CODEGEN_X86
   9771  masm.bind(&cacheHit);
   9772 #endif
   9773 
   9774  return true;
   9775 }
   9776 
   9777 bool CacheIRCompiler::emitMegamorphicLoadSlotPermissiveResult(
   9778    ObjOperandId objId, uint32_t idOffset) {
   9779  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9780  AutoCallVM callvm(masm, this, allocator);
   9781 
   9782  const AutoOutputRegister& output = callvm.output();
   9783 
   9784  Register obj = allocator.useRegister(masm, objId);
   9785  StubFieldOffset id(idOffset, StubField::Type::Id);
   9786 
   9787  AutoScratchRegisterMaybeOutput idReg(allocator, masm, output);
   9788  AutoScratchRegister scratch1(allocator, masm);
   9789  AutoScratchRegister scratch2(allocator, masm);
   9790  AutoScratchRegisterMaybeOutputType scratch3(allocator, masm, output);
   9791 
   9792 #ifdef JS_CODEGEN_X86
   9793  masm.xorPtr(scratch3, scratch3);
   9794 #else
   9795  Label cacheHit;
   9796  emitLoadStubField(id, idReg);
   9797  masm.emitMegamorphicCacheLookupByValue(idReg.get(), obj, scratch1, scratch2,
   9798                                         scratch3, output.valueReg(),
   9799                                         &cacheHit);
   9800 #endif
   9801 
   9802  callvm.prepare();
   9803 
   9804  emitLoadStubField(id, scratch2);
   9805  masm.Push(scratch3);
   9806  masm.Push(scratch2);
   9807  masm.Push(obj);
   9808 
   9809  using Fn = bool (*)(JSContext*, HandleObject, HandleId,
   9810                      MegamorphicCacheEntry*, MutableHandleValue);
   9811  callvm.call<Fn, GetPropMaybeCached>();
   9812 
   9813 #ifndef JS_CODEGEN_X86
   9814  masm.bind(&cacheHit);
   9815 #endif
   9816 
   9817  return true;
   9818 }
   9819 
   9820 bool CacheIRCompiler::emitMegamorphicStoreSlot(ObjOperandId objId,
   9821                                               uint32_t idOffset,
   9822                                               ValOperandId rhsId,
   9823                                               bool strict) {
   9824  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9825 
   9826  AutoCallVM callvm(masm, this, allocator);
   9827 
   9828  Register obj = allocator.useRegister(masm, objId);
   9829  ConstantOrRegister val = allocator.useConstantOrRegister(masm, rhsId);
   9830  StubFieldOffset id(idOffset, StubField::Type::Id);
   9831  AutoScratchRegister scratch(allocator, masm);
   9832 
   9833  callvm.prepare();
   9834 
   9835  masm.Push(Imm32(strict));
   9836  masm.Push(val);
   9837  emitLoadStubField(id, scratch);
   9838  masm.Push(scratch);
   9839  masm.Push(obj);
   9840 
   9841  using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
   9842  callvm.callNoResult<Fn, SetPropertyMegamorphic<false>>();
   9843  return true;
   9844 }
   9845 
   9846 bool CacheIRCompiler::emitLoadGetterSetterFunction(ValOperandId getterSetterId,
   9847                                                   bool isGetter,
   9848                                                   bool needsClassGuard,
   9849                                                   ObjOperandId resultId) {
   9850  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9851 
   9852  ValueOperand getterSetter = allocator.useValueRegister(masm, getterSetterId);
   9853  Register output = allocator.defineRegister(masm, resultId);
   9854  AutoScratchRegister scratch(allocator, masm);
   9855 
   9856  FailurePath* failure;
   9857  if (!addFailurePath(&failure)) {
   9858    return false;
   9859  }
   9860 
   9861  masm.unboxNonDouble(getterSetter, output, JSVAL_TYPE_PRIVATE_GCTHING);
   9862 
   9863  size_t offset = isGetter ? GetterSetter::offsetOfGetter()
   9864                           : GetterSetter::offsetOfSetter();
   9865  masm.loadPtr(Address(output, offset), output);
   9866 
   9867  masm.branchTestPtr(Assembler::Zero, output, output, failure->label());
   9868  if (needsClassGuard) {
   9869    masm.branchTestObjIsFunction(Assembler::NotEqual, output, scratch, output,
   9870                                 failure->label());
   9871  }
   9872 
   9873  return true;
   9874 }
   9875 
   9876 bool CacheIRCompiler::emitGuardHasGetterSetter(ObjOperandId objId,
   9877                                               uint32_t idOffset,
   9878                                               uint32_t getterSetterOffset) {
   9879  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9880 
   9881  Register obj = allocator.useRegister(masm, objId);
   9882 
   9883  StubFieldOffset id(idOffset, StubField::Type::Id);
   9884  StubFieldOffset getterSetter(getterSetterOffset, StubField::Type::WeakValue);
   9885 
   9886  AutoScratchRegister scratch1(allocator, masm);
   9887  AutoScratchRegister scratch2(allocator, masm);
   9888  AutoScratchValueRegister scratch3(allocator, masm);
   9889 
   9890  FailurePath* failure;
   9891  if (!addFailurePath(&failure)) {
   9892    return false;
   9893  }
   9894 
   9895  LiveRegisterSet volatileRegs = liveVolatileRegs();
   9896  volatileRegs.takeUnchecked(scratch1);
   9897  volatileRegs.takeUnchecked(scratch2);
   9898  volatileRegs.takeUnchecked(scratch3);
   9899  masm.PushRegsInMask(volatileRegs);
   9900 
   9901  // The GetterSetter* is stored as a PrivateGCThingValue.
   9902  emitLoadValueStubField(getterSetter, scratch3);
   9903  masm.unboxNonDouble(scratch3.get(), scratch3.get().scratchReg(),
   9904                      JSVAL_TYPE_PRIVATE_GCTHING);
   9905 
   9906  using Fn = bool (*)(JSContext* cx, JSObject* obj, jsid id,
   9907                      GetterSetter* getterSetter);
   9908  masm.setupUnalignedABICall(scratch1);
   9909  masm.loadJSContext(scratch1);
   9910  masm.passABIArg(scratch1);
   9911  masm.passABIArg(obj);
   9912  emitLoadStubField(id, scratch2);
   9913  masm.passABIArg(scratch2);
   9914  masm.passABIArg(scratch3.get().scratchReg());
   9915  masm.callWithABI<Fn, ObjectHasGetterSetterPure>();
   9916  masm.storeCallPointerResult(scratch1);
   9917  masm.PopRegsInMask(volatileRegs);
   9918 
   9919  masm.branchIfFalseBool(scratch1, failure->label());
   9920  return true;
   9921 }
   9922 
   9923 bool CacheIRCompiler::emitGuardWasmArg(ValOperandId argId,
   9924                                       wasm::ValType::Kind kind) {
   9925  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9926 
   9927  // All values can be boxed as AnyRef.
   9928  if (kind == wasm::ValType::Ref) {
   9929    return true;
   9930  }
   9931  MOZ_ASSERT(kind != wasm::ValType::V128);
   9932 
   9933  ValueOperand arg = allocator.useValueRegister(masm, argId);
   9934 
   9935  FailurePath* failure;
   9936  if (!addFailurePath(&failure)) {
   9937    return false;
   9938  }
   9939 
   9940  // Check that the argument can be converted to the Wasm type in Warp code
   9941  // without bailing out.
   9942  Label done;
   9943  switch (kind) {
   9944    case wasm::ValType::I32:
   9945    case wasm::ValType::F32:
   9946    case wasm::ValType::F64: {
   9947      // Argument must be number, bool, or undefined.
   9948      masm.branchTestNumber(Assembler::Equal, arg, &done);
   9949      masm.branchTestBoolean(Assembler::Equal, arg, &done);
   9950      masm.branchTestUndefined(Assembler::NotEqual, arg, failure->label());
   9951      break;
   9952    }
   9953    case wasm::ValType::I64: {
   9954      // Argument must be bigint, bool, or string.
   9955      masm.branchTestBigInt(Assembler::Equal, arg, &done);
   9956      masm.branchTestBoolean(Assembler::Equal, arg, &done);
   9957      masm.branchTestString(Assembler::NotEqual, arg, failure->label());
   9958      break;
   9959    }
   9960    default:
   9961      MOZ_CRASH("Unexpected kind");
   9962  }
   9963  masm.bind(&done);
   9964 
   9965  return true;
   9966 }
   9967 
   9968 bool CacheIRCompiler::emitGuardMultipleShapes(ObjOperandId objId,
   9969                                              uint32_t shapesOffset) {
   9970  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   9971  Register obj = allocator.useRegister(masm, objId);
   9972  AutoScratchRegister shapes(allocator, masm);
   9973  AutoScratchRegister scratch(allocator, masm);
   9974  AutoScratchRegister scratch2(allocator, masm);
   9975 
   9976  bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
   9977 
   9978  Register spectreScratch = InvalidReg;
   9979  Maybe<AutoScratchRegister> maybeSpectreScratch;
   9980  if (needSpectreMitigations) {
   9981    maybeSpectreScratch.emplace(allocator, masm);
   9982    spectreScratch = *maybeSpectreScratch;
   9983  }
   9984 
   9985  FailurePath* failure;
   9986  if (!addFailurePath(&failure)) {
   9987    return false;
   9988  }
   9989 
   9990  // The stub field contains a ListObject. Load its elements.
   9991  StubFieldOffset shapeArray(shapesOffset, StubField::Type::JSObject);
   9992  emitLoadStubField(shapeArray, shapes);
   9993  masm.loadPtr(Address(shapes, NativeObject::offsetOfElements()), shapes);
   9994 
   9995  masm.branchTestObjShapeList(obj, shapes, scratch, scratch2, spectreScratch,
   9996                              failure->label());
   9997  return true;
   9998 }
   9999 
  10000 bool CacheIRCompiler::emitGuardMultipleShapesToOffset(ObjOperandId objId,
  10001                                                      uint32_t shapesOffset,
  10002                                                      Int32OperandId offsetId) {
  10003  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10004  Register obj = allocator.useRegister(masm, objId);
  10005  Register offset = allocator.defineRegister(masm, offsetId);
  10006  AutoScratchRegister shapes(allocator, masm);
  10007  AutoScratchRegister scratch(allocator, masm);
  10008  AutoScratchRegister scratch2(allocator, masm);
  10009 
  10010  bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
  10011 
  10012  // We can re-use the output (offset) as scratch spectre register,
  10013  // since the output is only set after all branches.
  10014  Register spectreScratch = needSpectreMitigations ? offset : InvalidReg;
  10015 
  10016  FailurePath* failure;
  10017  if (!addFailurePath(&failure)) {
  10018    return false;
  10019  }
  10020 
  10021  // The stub field contains a ListObject. Load its elements.
  10022  StubFieldOffset shapeArray(shapesOffset, StubField::Type::JSObject);
  10023  emitLoadStubField(shapeArray, shapes);
  10024  masm.loadPtr(Address(shapes, NativeObject::offsetOfElements()), shapes);
  10025 
  10026  masm.branchTestObjShapeListSetOffset(obj, shapes, offset, scratch, scratch2,
  10027                                       spectreScratch, failure->label());
  10028  return true;
  10029 }
  10030 
  10031 bool CacheIRCompiler::emitLoadObject(ObjOperandId resultId,
  10032                                     uint32_t objOffset) {
  10033  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10034  Register reg = allocator.defineRegister(masm, resultId);
  10035  StubFieldOffset obj(objOffset, StubField::Type::JSObject);
  10036  emitLoadStubField(obj, reg);
  10037  return true;
  10038 }
  10039 
  10040 bool CacheIRCompiler::emitLoadProtoObject(ObjOperandId resultId,
  10041                                          uint32_t objOffset,
  10042                                          ObjOperandId receiverObjId) {
  10043  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10044  Register reg = allocator.defineRegister(masm, resultId);
  10045  StubFieldOffset obj(objOffset, StubField::Type::JSObject);
  10046  emitLoadStubField(obj, reg);
  10047  return true;
  10048 }
  10049 
  10050 bool CacheIRCompiler::emitLoadInt32Constant(uint32_t valOffset,
  10051                                            Int32OperandId resultId) {
  10052  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10053  Register reg = allocator.defineRegister(masm, resultId);
  10054  StubFieldOffset val(valOffset, StubField::Type::RawInt32);
  10055  emitLoadStubField(val, reg);
  10056  return true;
  10057 }
  10058 
  10059 bool CacheIRCompiler::emitLoadInt32AsIntPtrConstant(uint32_t valOffset,
  10060                                                    IntPtrOperandId resultId) {
  10061  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10062  Register reg = allocator.defineRegister(masm, resultId);
  10063  StubFieldOffset val(valOffset, StubField::Type::RawInt32);
  10064  emitLoadStubField(val, reg);
  10065  masm.move32SignExtendToPtr(reg, reg);
  10066  return true;
  10067 }
  10068 
  10069 bool CacheIRCompiler::emitLoadBooleanConstant(bool val,
  10070                                              BooleanOperandId resultId) {
  10071  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10072  Register reg = allocator.defineRegister(masm, resultId);
  10073  masm.move32(Imm32(val), reg);
  10074  return true;
  10075 }
  10076 
  10077 bool CacheIRCompiler::emitLoadDoubleConstant(uint32_t valOffset,
  10078                                             NumberOperandId resultId) {
  10079  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10080 
  10081  ValueOperand output = allocator.defineValueRegister(masm, resultId);
  10082  StubFieldOffset val(valOffset, StubField::Type::Double);
  10083 
  10084  AutoScratchFloatRegister floatReg(this);
  10085 
  10086  emitLoadDoubleValueStubField(val, output, floatReg);
  10087  return true;
  10088 }
  10089 
  10090 bool CacheIRCompiler::emitLoadUndefined(ValOperandId resultId) {
  10091  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10092 
  10093  ValueOperand reg = allocator.defineValueRegister(masm, resultId);
  10094  masm.moveValue(UndefinedValue(), reg);
  10095  return true;
  10096 }
  10097 
  10098 bool CacheIRCompiler::emitLoadConstantString(uint32_t strOffset,
  10099                                             StringOperandId resultId) {
  10100  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10101  Register reg = allocator.defineRegister(masm, resultId);
  10102  StubFieldOffset str(strOffset, StubField::Type::String);
  10103  emitLoadStubField(str, reg);
  10104  return true;
  10105 }
  10106 
  10107 bool CacheIRCompiler::emitCallInt32ToString(Int32OperandId inputId,
  10108                                            StringOperandId resultId) {
  10109  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10110  Register input = allocator.useRegister(masm, inputId);
  10111  Register result = allocator.defineRegister(masm, resultId);
  10112 
  10113  FailurePath* failure;
  10114  if (!addFailurePath(&failure)) {
  10115    return false;
  10116  }
  10117 
  10118  Label done, callVM;
  10119 
  10120  {
  10121    masm.lookupStaticIntString(input, result, cx_->staticStrings(), &callVM);
  10122    masm.jump(&done);
  10123  }
  10124 
  10125  {
  10126    masm.bind(&callVM);
  10127    LiveRegisterSet volatileRegs = liveVolatileRegs();
  10128    volatileRegs.takeUnchecked(result);
  10129    masm.PushRegsInMask(volatileRegs);
  10130 
  10131    using Fn = JSLinearString* (*)(JSContext * cx, int32_t i);
  10132    masm.setupUnalignedABICall(result);
  10133    masm.loadJSContext(result);
  10134    masm.passABIArg(result);
  10135    masm.passABIArg(input);
  10136    masm.callWithABI<Fn, js::Int32ToStringPure>();
  10137 
  10138    masm.storeCallPointerResult(result);
  10139    masm.PopRegsInMask(volatileRegs);
  10140 
  10141    masm.branchPtr(Assembler::Equal, result, ImmPtr(nullptr), failure->label());
  10142  }
  10143 
  10144  masm.bind(&done);
  10145  return true;
  10146 }
  10147 
  10148 bool CacheIRCompiler::emitCallNumberToString(NumberOperandId inputId,
  10149                                             StringOperandId resultId) {
  10150  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10151 
  10152  Register result = allocator.defineRegister(masm, resultId);
  10153 
  10154  FailurePath* failure;
  10155  if (!addFailurePath(&failure)) {
  10156    return false;
  10157  }
  10158 
  10159  AutoScratchFloatRegister scratchFloat(this, failure);
  10160  allocator.ensureDoubleRegister(masm, inputId, scratchFloat);
  10161 
  10162  LiveRegisterSet volatileRegs = liveVolatileRegs();
  10163  volatileRegs.takeUnchecked(result);
  10164  masm.PushRegsInMask(volatileRegs);
  10165 
  10166  using Fn = JSString* (*)(JSContext * cx, double d);
  10167  masm.setupUnalignedABICall(result);
  10168  masm.loadJSContext(result);
  10169  masm.passABIArg(result);
  10170  masm.passABIArg(scratchFloat, ABIType::Float64);
  10171  masm.callWithABI<Fn, js::NumberToStringPure>();
  10172 
  10173  masm.storeCallPointerResult(result);
  10174  masm.PopRegsInMask(volatileRegs);
  10175 
  10176  masm.branchPtr(Assembler::Equal, result, ImmPtr(nullptr),
  10177                 scratchFloat.failure());
  10178  return true;
  10179 }
  10180 
  10181 bool CacheIRCompiler::emitInt32ToStringWithBaseResult(Int32OperandId inputId,
  10182                                                      Int32OperandId baseId) {
  10183  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10184 
  10185  AutoCallVM callvm(masm, this, allocator);
  10186  Register input = allocator.useRegister(masm, inputId);
  10187  Register base = allocator.useRegister(masm, baseId);
  10188 
  10189  FailurePath* failure;
  10190  if (!addFailurePath(&failure)) {
  10191    return false;
  10192  }
  10193 
  10194  // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
  10195  // we can't use both at the same time. This isn't an issue here, because Ion
  10196  // doesn't support CallICs. If that ever changes, this code must be updated.
  10197  MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
  10198 
  10199  masm.branch32(Assembler::LessThan, base, Imm32(2), failure->label());
  10200  masm.branch32(Assembler::GreaterThan, base, Imm32(36), failure->label());
  10201 
  10202  // Use lower-case characters by default.
  10203  constexpr bool lowerCase = true;
  10204 
  10205  callvm.prepare();
  10206 
  10207  masm.Push(Imm32(lowerCase));
  10208  masm.Push(base);
  10209  masm.Push(input);
  10210 
  10211  using Fn = JSLinearString* (*)(JSContext*, int32_t, int32_t, bool);
  10212  callvm.call<Fn, js::Int32ToStringWithBase<CanGC>>();
  10213  return true;
  10214 }
  10215 
  10216 bool CacheIRCompiler::emitBooleanToString(BooleanOperandId inputId,
  10217                                          StringOperandId resultId) {
  10218  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10219  Register boolean = allocator.useRegister(masm, inputId);
  10220  Register result = allocator.defineRegister(masm, resultId);
  10221  const JSAtomState& names = cx_->names();
  10222  Label true_, done;
  10223 
  10224  masm.branchTest32(Assembler::NonZero, boolean, boolean, &true_);
  10225 
  10226  // False case
  10227  masm.movePtr(ImmGCPtr(names.false_), result);
  10228  masm.jump(&done);
  10229 
  10230  // True case
  10231  masm.bind(&true_);
  10232  masm.movePtr(ImmGCPtr(names.true_), result);
  10233  masm.bind(&done);
  10234 
  10235  return true;
  10236 }
  10237 
  10238 bool CacheIRCompiler::emitObjectToStringResult(ObjOperandId objId) {
  10239  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10240 
  10241  AutoOutputRegister output(*this);
  10242  Register obj = allocator.useRegister(masm, objId);
  10243  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
  10244 
  10245  FailurePath* failure;
  10246  if (!addFailurePath(&failure)) {
  10247    return false;
  10248  }
  10249 
  10250  LiveRegisterSet volatileRegs = liveVolatileRegs();
  10251  volatileRegs.takeUnchecked(output.valueReg());
  10252  volatileRegs.takeUnchecked(scratch);
  10253  masm.PushRegsInMask(volatileRegs);
  10254 
  10255  using Fn = JSString* (*)(JSContext*, JSObject*);
  10256  masm.setupUnalignedABICall(scratch);
  10257  masm.loadJSContext(scratch);
  10258  masm.passABIArg(scratch);
  10259  masm.passABIArg(obj);
  10260  masm.callWithABI<Fn, js::ObjectClassToString>();
  10261  masm.storeCallPointerResult(scratch);
  10262 
  10263  masm.PopRegsInMask(volatileRegs);
  10264 
  10265  masm.branchPtr(Assembler::Equal, scratch, ImmPtr(nullptr), failure->label());
  10266  masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
  10267 
  10268  return true;
  10269 }
  10270 
  10271 bool CacheIRCompiler::emitConcatStringsResult(StringOperandId lhsId,
  10272                                              StringOperandId rhsId,
  10273                                              uint32_t stubOffset) {
  10274  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10275 
  10276  AutoCallVM callvm(masm, this, allocator);
  10277  const AutoOutputRegister& output = callvm.output();
  10278 
  10279  Register lhs = allocator.useRegister(masm, lhsId);
  10280  Register rhs = allocator.useRegister(masm, rhsId);
  10281 
  10282  // Discard the stack to ensure it's balanced when we skip the vm-call.
  10283  allocator.discardStack(masm);
  10284 
  10285  Label done;
  10286  {
  10287    // The StringConcat stub uses CallTemp registers 0 to 5.
  10288    LiveGeneralRegisterSet liveRegs;
  10289    liveRegs.add(CallTempReg0);
  10290    liveRegs.add(CallTempReg1);
  10291    liveRegs.add(CallTempReg2);
  10292    liveRegs.add(CallTempReg3);
  10293    liveRegs.add(CallTempReg4);
  10294    liveRegs.add(CallTempReg5);
  10295 #ifdef JS_USE_LINK_REGISTER
  10296    liveRegs.add(ICTailCallReg);
  10297 #endif
  10298    liveRegs.takeUnchecked(output.valueReg());
  10299    masm.PushRegsInMask(liveRegs);
  10300 
  10301    // The stub expects lhs in CallTempReg0 and rhs in CallTempReg1.
  10302    masm.moveRegPair(lhs, rhs, CallTempReg0, CallTempReg1);
  10303 
  10304    uint32_t framePushed = masm.framePushed();
  10305 
  10306    // Call cx->zone()->jitZone()->stringConcatStub. See also the comment and
  10307    // code in CallRegExpStub.
  10308    Label vmCall;
  10309    Register temp = CallTempReg2;
  10310    masm.movePtr(ImmPtr(cx_->zone()->jitZone()), temp);
  10311    masm.loadPtr(Address(temp, JitZone::offsetOfStringConcatStub()), temp);
  10312    masm.branchTestPtr(Assembler::Zero, temp, temp, &vmCall);
  10313    masm.call(Address(temp, JitCode::offsetOfCode()));
  10314 
  10315    // The result is returned in CallTempReg5 (nullptr on failure).
  10316    masm.branchTestPtr(Assembler::Zero, CallTempReg5, CallTempReg5, &vmCall);
  10317    masm.tagValue(JSVAL_TYPE_STRING, CallTempReg5, output.valueReg());
  10318    masm.PopRegsInMask(liveRegs);
  10319    masm.jump(&done);
  10320 
  10321    masm.bind(&vmCall);
  10322    masm.setFramePushed(framePushed);
  10323    masm.PopRegsInMask(liveRegs);
  10324  }
  10325 
  10326  {
  10327    callvm.prepare();
  10328 
  10329    masm.Push(static_cast<js::jit::Imm32>(int32_t(js::gc::Heap::Default)));
  10330    masm.Push(rhs);
  10331    masm.Push(lhs);
  10332 
  10333    using Fn =
  10334        JSString* (*)(JSContext*, HandleString, HandleString, js::gc::Heap);
  10335    callvm.call<Fn, ConcatStrings<CanGC>>();
  10336  }
  10337 
  10338  masm.bind(&done);
  10339  return true;
  10340 }
  10341 
  10342 bool CacheIRCompiler::emitCallIsSuspendedGeneratorResult(ValOperandId valId) {
  10343  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10344  AutoOutputRegister output(*this);
  10345  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
  10346  AutoScratchRegister scratch2(allocator, masm);
  10347  ValueOperand input = allocator.useValueRegister(masm, valId);
  10348 
  10349  // Test if it's an object.
  10350  Label returnFalse, done;
  10351  masm.fallibleUnboxObject(input, scratch, &returnFalse);
  10352 
  10353  // Test if it's a GeneratorObject.
  10354  masm.branchTestObjClass(Assembler::NotEqual, scratch,
  10355                          &GeneratorObject::class_, scratch2, scratch,
  10356                          &returnFalse);
  10357 
  10358  // If the resumeIndex slot holds an int32 value < RESUME_INDEX_RUNNING,
  10359  // the generator is suspended.
  10360  Address addr(scratch, AbstractGeneratorObject::offsetOfResumeIndexSlot());
  10361  masm.fallibleUnboxInt32(addr, scratch, &returnFalse);
  10362  masm.branch32(Assembler::AboveOrEqual, scratch,
  10363                Imm32(AbstractGeneratorObject::RESUME_INDEX_RUNNING),
  10364                &returnFalse);
  10365 
  10366  masm.moveValue(BooleanValue(true), output.valueReg());
  10367  masm.jump(&done);
  10368 
  10369  masm.bind(&returnFalse);
  10370  masm.moveValue(BooleanValue(false), output.valueReg());
  10371 
  10372  masm.bind(&done);
  10373  return true;
  10374 }
  10375 
  10376 // This op generates no code. It is consumed by the transpiler.
  10377 bool CacheIRCompiler::emitMetaScriptedThisShape(uint32_t) { return true; }
  10378 
  10379 bool CacheIRCompiler::emitCallNativeGetElementResult(ObjOperandId objId,
  10380                                                     Int32OperandId indexId) {
  10381  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10382  AutoCallVM callvm(masm, this, allocator);
  10383 
  10384  Register obj = allocator.useRegister(masm, objId);
  10385  Register index = allocator.useRegister(masm, indexId);
  10386 
  10387  callvm.prepare();
  10388 
  10389  masm.Push(index);
  10390  masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
  10391  masm.Push(obj);
  10392 
  10393  using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
  10394                      MutableHandleValue);
  10395  callvm.call<Fn, NativeGetElement>();
  10396 
  10397  return true;
  10398 }
  10399 
  10400 bool CacheIRCompiler::emitCallNativeGetElementSuperResult(
  10401    ObjOperandId objId, Int32OperandId indexId, ValOperandId receiverId) {
  10402  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10403  AutoCallVM callvm(masm, this, allocator);
  10404 
  10405  Register obj = allocator.useRegister(masm, objId);
  10406  Register index = allocator.useRegister(masm, indexId);
  10407  ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
  10408 
  10409  callvm.prepare();
  10410 
  10411  masm.Push(index);
  10412  masm.Push(receiver);
  10413  masm.Push(obj);
  10414 
  10415  using Fn = bool (*)(JSContext*, Handle<NativeObject*>, HandleValue, int32_t,
  10416                      MutableHandleValue);
  10417  callvm.call<Fn, NativeGetElement>();
  10418 
  10419  return true;
  10420 }
  10421 
  10422 bool CacheIRCompiler::emitProxyHasPropResult(ObjOperandId objId,
  10423                                             ValOperandId idId, bool hasOwn) {
  10424  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10425  AutoCallVM callvm(masm, this, allocator);
  10426 
  10427  Register obj = allocator.useRegister(masm, objId);
  10428  ValueOperand idVal = allocator.useValueRegister(masm, idId);
  10429 
  10430  callvm.prepare();
  10431 
  10432  masm.Push(idVal);
  10433  masm.Push(obj);
  10434 
  10435  using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
  10436  if (hasOwn) {
  10437    callvm.call<Fn, ProxyHasOwn>();
  10438  } else {
  10439    callvm.call<Fn, ProxyHas>();
  10440  }
  10441  return true;
  10442 }
  10443 
  10444 bool CacheIRCompiler::emitProxyGetByValueResult(ObjOperandId objId,
  10445                                                ValOperandId idId) {
  10446  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10447  AutoCallVM callvm(masm, this, allocator);
  10448 
  10449  Register obj = allocator.useRegister(masm, objId);
  10450  ValueOperand idVal = allocator.useValueRegister(masm, idId);
  10451 
  10452  callvm.prepare();
  10453  masm.Push(idVal);
  10454  masm.Push(obj);
  10455 
  10456  using Fn =
  10457      bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
  10458  callvm.call<Fn, ProxyGetPropertyByValue>();
  10459  return true;
  10460 }
  10461 
  10462 bool CacheIRCompiler::emitCallGetSparseElementResult(ObjOperandId objId,
  10463                                                     Int32OperandId indexId) {
  10464  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10465 
  10466  AutoCallVM callvm(masm, this, allocator);
  10467 
  10468  Register obj = allocator.useRegister(masm, objId);
  10469  Register id = allocator.useRegister(masm, indexId);
  10470 
  10471  callvm.prepare();
  10472  masm.Push(id);
  10473  masm.Push(obj);
  10474 
  10475  using Fn = bool (*)(JSContext* cx, Handle<NativeObject*> obj, int32_t int_id,
  10476                      MutableHandleValue result);
  10477  callvm.call<Fn, GetSparseElementHelper>();
  10478  return true;
  10479 }
  10480 
  10481 bool CacheIRCompiler::emitRegExpSearcherLastLimitResult() {
  10482  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10483 
  10484  AutoOutputRegister output(*this);
  10485  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
  10486  AutoScratchRegister scratch2(allocator, masm);
  10487 
  10488  masm.loadAndClearRegExpSearcherLastLimit(scratch1, scratch2);
  10489 
  10490  masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
  10491  return true;
  10492 }
  10493 
  10494 bool CacheIRCompiler::emitRegExpFlagResult(ObjOperandId regexpId,
  10495                                           int32_t flagsMask) {
  10496  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10497 
  10498  AutoOutputRegister output(*this);
  10499  Register regexp = allocator.useRegister(masm, regexpId);
  10500  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
  10501 
  10502  Address flagsAddr(
  10503      regexp, NativeObject::getFixedSlotOffset(RegExpObject::flagsSlot()));
  10504  masm.unboxInt32(flagsAddr, scratch);
  10505 
  10506  Label ifFalse, done;
  10507  masm.branchTest32(Assembler::Zero, scratch, Imm32(flagsMask), &ifFalse);
  10508  masm.moveValue(BooleanValue(true), output.valueReg());
  10509  masm.jump(&done);
  10510 
  10511  masm.bind(&ifFalse);
  10512  masm.moveValue(BooleanValue(false), output.valueReg());
  10513 
  10514  masm.bind(&done);
  10515  return true;
  10516 }
  10517 
  10518 bool CacheIRCompiler::emitCallSubstringKernelResult(StringOperandId strId,
  10519                                                    Int32OperandId beginId,
  10520                                                    Int32OperandId lengthId) {
  10521  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10522 
  10523  AutoCallVM callvm(masm, this, allocator);
  10524 
  10525  Register str = allocator.useRegister(masm, strId);
  10526  Register begin = allocator.useRegister(masm, beginId);
  10527  Register length = allocator.useRegister(masm, lengthId);
  10528 
  10529  callvm.prepare();
  10530  masm.Push(length);
  10531  masm.Push(begin);
  10532  masm.Push(str);
  10533 
  10534  using Fn = JSString* (*)(JSContext * cx, HandleString str, int32_t begin,
  10535                           int32_t len);
  10536  callvm.call<Fn, SubstringKernel>();
  10537  return true;
  10538 }
  10539 
  10540 bool CacheIRCompiler::emitStringReplaceStringResult(
  10541    StringOperandId strId, StringOperandId patternId,
  10542    StringOperandId replacementId) {
  10543  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10544 
  10545  AutoCallVM callvm(masm, this, allocator);
  10546 
  10547  Register str = allocator.useRegister(masm, strId);
  10548  Register pattern = allocator.useRegister(masm, patternId);
  10549  Register replacement = allocator.useRegister(masm, replacementId);
  10550 
  10551  callvm.prepare();
  10552  masm.Push(replacement);
  10553  masm.Push(pattern);
  10554  masm.Push(str);
  10555 
  10556  using Fn =
  10557      JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
  10558  callvm.call<Fn, jit::StringReplace>();
  10559  return true;
  10560 }
  10561 
  10562 bool CacheIRCompiler::emitStringSplitStringResult(StringOperandId strId,
  10563                                                  StringOperandId separatorId) {
  10564  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10565 
  10566  AutoCallVM callvm(masm, this, allocator);
  10567 
  10568  Register str = allocator.useRegister(masm, strId);
  10569  Register separator = allocator.useRegister(masm, separatorId);
  10570 
  10571  callvm.prepare();
  10572  masm.Push(Imm32(INT32_MAX));
  10573  masm.Push(separator);
  10574  masm.Push(str);
  10575 
  10576  using Fn = ArrayObject* (*)(JSContext*, HandleString, HandleString, uint32_t);
  10577  callvm.call<Fn, js::StringSplitString>();
  10578  return true;
  10579 }
  10580 
  10581 bool CacheIRCompiler::emitGetFirstDollarIndexResult(StringOperandId strId) {
  10582  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10583 
  10584  AutoCallVM callvm(masm, this, allocator);
  10585 
  10586  Register str = allocator.useRegister(masm, strId);
  10587 
  10588  callvm.prepare();
  10589  masm.Push(str);
  10590 
  10591  using Fn = bool (*)(JSContext*, JSString*, int32_t*);
  10592  callvm.call<Fn, GetFirstDollarIndexRaw>();
  10593  return true;
  10594 }
  10595 
  10596 bool CacheIRCompiler::emitAtomicsCompareExchangeResult(
  10597    ObjOperandId objId, IntPtrOperandId indexId, uint32_t expectedId,
  10598    uint32_t replacementId, Scalar::Type elementType,
  10599    ArrayBufferViewKind viewKind) {
  10600  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10601 
  10602  Maybe<AutoOutputRegister> output;
  10603  Maybe<AutoCallVM> callvm;
  10604  if (!Scalar::isBigIntType(elementType)) {
  10605    output.emplace(*this);
  10606  } else {
  10607    callvm.emplace(masm, this, allocator);
  10608  }
  10609 #ifdef JS_CODEGEN_X86
  10610  // Use a scratch register to avoid running out of registers.
  10611  Register obj = output ? output->valueReg().typeReg()
  10612                        : callvm->outputValueReg().typeReg();
  10613  allocator.copyToScratchRegister(masm, objId, obj);
  10614 #else
  10615  Register obj = allocator.useRegister(masm, objId);
  10616 #endif
  10617  Register index = allocator.useRegister(masm, indexId);
  10618  Register expected;
  10619  Register replacement;
  10620  if (!Scalar::isBigIntType(elementType)) {
  10621    expected = allocator.useRegister(masm, Int32OperandId(expectedId));
  10622    replacement = allocator.useRegister(masm, Int32OperandId(replacementId));
  10623  } else {
  10624    expected = allocator.useRegister(masm, BigIntOperandId(expectedId));
  10625    replacement = allocator.useRegister(masm, BigIntOperandId(replacementId));
  10626  }
  10627 
  10628  Register scratch = output ? output->valueReg().scratchReg()
  10629                            : callvm->outputValueReg().scratchReg();
  10630  MOZ_ASSERT(scratch != obj, "scratchReg must not be typeReg");
  10631 
  10632  Maybe<AutoScratchRegister> scratch2;
  10633  if (viewKind == ArrayBufferViewKind::Resizable) {
  10634 #ifdef JS_CODEGEN_X86
  10635    // Not enough spare registers on x86.
  10636 #else
  10637    scratch2.emplace(allocator, masm);
  10638 #endif
  10639  }
  10640 
  10641  // Not enough registers on X86.
  10642  constexpr auto spectreTemp = mozilla::Nothing{};
  10643 
  10644  FailurePath* failure;
  10645  if (!addFailurePath(&failure)) {
  10646    return false;
  10647  }
  10648 
  10649  // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
  10650  // we can't use both at the same time. This isn't an issue here, because Ion
  10651  // doesn't support CallICs. If that ever changes, this code must be updated.
  10652  MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
  10653 
  10654  // Bounds check.
  10655  emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
  10656                            spectreTemp, failure->label());
  10657 
  10658  // Atomic operations are highly platform-dependent, for example x86/x64 has
  10659  // specific requirements on which registers are used; MIPS needs multiple
  10660  // additional temporaries. Therefore we're using either an ABI or VM call here
  10661  // instead of handling each platform separately.
  10662 
  10663  if (Scalar::isBigIntType(elementType)) {
  10664    callvm->prepare();
  10665 
  10666    masm.Push(replacement);
  10667    masm.Push(expected);
  10668    masm.Push(index);
  10669    masm.Push(obj);
  10670 
  10671    using Fn = BigInt* (*)(JSContext*, TypedArrayObject*, size_t, const BigInt*,
  10672                           const BigInt*);
  10673    callvm->call<Fn, jit::AtomicsCompareExchange64>();
  10674    return true;
  10675  }
  10676 
  10677  {
  10678    LiveRegisterSet volatileRegs = liveVolatileRegs();
  10679    volatileRegs.takeUnchecked(output->valueReg());
  10680    volatileRegs.takeUnchecked(scratch);
  10681    masm.PushRegsInMask(volatileRegs);
  10682 
  10683    masm.setupUnalignedABICall(scratch);
  10684    masm.passABIArg(obj);
  10685    masm.passABIArg(index);
  10686    masm.passABIArg(expected);
  10687    masm.passABIArg(replacement);
  10688    masm.callWithABI(DynamicFunction<AtomicsCompareExchangeFn>(
  10689        AtomicsCompareExchange(elementType)));
  10690    masm.storeCallInt32Result(scratch);
  10691 
  10692    masm.PopRegsInMask(volatileRegs);
  10693  }
  10694 
  10695  if (elementType != Scalar::Uint32) {
  10696    masm.tagValue(JSVAL_TYPE_INT32, scratch, output->valueReg());
  10697  } else {
  10698    ScratchDoubleScope fpscratch(masm);
  10699    masm.convertUInt32ToDouble(scratch, fpscratch);
  10700    masm.boxDouble(fpscratch, output->valueReg(), fpscratch);
  10701  }
  10702 
  10703  return true;
  10704 }
  10705 
  10706 bool CacheIRCompiler::emitAtomicsReadModifyWriteResult(
  10707    ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
  10708    Scalar::Type elementType, ArrayBufferViewKind viewKind,
  10709    AtomicsReadWriteModifyFn fn) {
  10710  AutoOutputRegister output(*this);
  10711  Register obj = allocator.useRegister(masm, objId);
  10712  Register index = allocator.useRegister(masm, indexId);
  10713  Register value = allocator.useRegister(masm, Int32OperandId(valueId));
  10714  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
  10715  Maybe<AutoScratchRegisterMaybeOutputType> scratch2;
  10716  if (viewKind == ArrayBufferViewKind::Resizable) {
  10717    scratch2.emplace(allocator, masm, output);
  10718  }
  10719 
  10720  // Not enough registers on X86.
  10721  constexpr auto spectreTemp = mozilla::Nothing{};
  10722 
  10723  FailurePath* failure;
  10724  if (!addFailurePath(&failure)) {
  10725    return false;
  10726  }
  10727 
  10728  // Bounds check.
  10729  emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
  10730                            spectreTemp, failure->label());
  10731 
  10732  // See comment in emitAtomicsCompareExchange for why we use an ABI call.
  10733  {
  10734    LiveRegisterSet volatileRegs = liveVolatileRegs();
  10735    volatileRegs.takeUnchecked(output.valueReg());
  10736    volatileRegs.takeUnchecked(scratch);
  10737    masm.PushRegsInMask(volatileRegs);
  10738 
  10739    masm.setupUnalignedABICall(scratch);
  10740    masm.passABIArg(obj);
  10741    masm.passABIArg(index);
  10742    masm.passABIArg(value);
  10743    masm.callWithABI(DynamicFunction<AtomicsReadWriteModifyFn>(fn));
  10744    masm.storeCallInt32Result(scratch);
  10745 
  10746    masm.PopRegsInMask(volatileRegs);
  10747  }
  10748 
  10749  if (elementType != Scalar::Uint32) {
  10750    masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
  10751  } else {
  10752    ScratchDoubleScope fpscratch(masm);
  10753    masm.convertUInt32ToDouble(scratch, fpscratch);
  10754    masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
  10755  }
  10756 
  10757  return true;
  10758 }
  10759 
  10760 template <CacheIRCompiler::AtomicsReadWriteModify64Fn fn>
  10761 bool CacheIRCompiler::emitAtomicsReadModifyWriteResult64(
  10762    ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
  10763    ArrayBufferViewKind viewKind) {
  10764  AutoCallVM callvm(masm, this, allocator);
  10765  Register obj = allocator.useRegister(masm, objId);
  10766  Register index = allocator.useRegister(masm, indexId);
  10767  Register value = allocator.useRegister(masm, BigIntOperandId(valueId));
  10768  AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
  10769  Maybe<AutoScratchRegisterMaybeOutputType> scratch2;
  10770  if (viewKind == ArrayBufferViewKind::Resizable) {
  10771    scratch2.emplace(allocator, masm, callvm.output());
  10772  }
  10773 
  10774  // Not enough registers on X86.
  10775  constexpr auto spectreTemp = mozilla::Nothing{};
  10776 
  10777  FailurePath* failure;
  10778  if (!addFailurePath(&failure)) {
  10779    return false;
  10780  }
  10781 
  10782  // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
  10783  // we can't use both at the same time. This isn't an issue here, because Ion
  10784  // doesn't support CallICs. If that ever changes, this code must be updated.
  10785  MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
  10786 
  10787  // Bounds check.
  10788  emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
  10789                            spectreTemp, failure->label());
  10790 
  10791  // See comment in emitAtomicsCompareExchange for why we use a VM call.
  10792 
  10793  callvm.prepare();
  10794 
  10795  masm.Push(value);
  10796  masm.Push(index);
  10797  masm.Push(obj);
  10798 
  10799  callvm.call<AtomicsReadWriteModify64Fn, fn>();
  10800  return true;
  10801 }
  10802 
  10803 bool CacheIRCompiler::emitAtomicsExchangeResult(ObjOperandId objId,
  10804                                                IntPtrOperandId indexId,
  10805                                                uint32_t valueId,
  10806                                                Scalar::Type elementType,
  10807                                                ArrayBufferViewKind viewKind) {
  10808  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10809 
  10810  if (Scalar::isBigIntType(elementType)) {
  10811    return emitAtomicsReadModifyWriteResult64<jit::AtomicsExchange64>(
  10812        objId, indexId, valueId, viewKind);
  10813  }
  10814  return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
  10815                                          viewKind,
  10816                                          AtomicsExchange(elementType));
  10817 }
  10818 
  10819 bool CacheIRCompiler::emitAtomicsAddResult(
  10820    ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
  10821    Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
  10822  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10823 
  10824  if (Scalar::isBigIntType(elementType)) {
  10825    return emitAtomicsReadModifyWriteResult64<jit::AtomicsAdd64>(
  10826        objId, indexId, valueId, viewKind);
  10827  }
  10828  return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
  10829                                          viewKind, AtomicsAdd(elementType));
  10830 }
  10831 
  10832 bool CacheIRCompiler::emitAtomicsSubResult(
  10833    ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
  10834    Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
  10835  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10836 
  10837  if (Scalar::isBigIntType(elementType)) {
  10838    return emitAtomicsReadModifyWriteResult64<jit::AtomicsSub64>(
  10839        objId, indexId, valueId, viewKind);
  10840  }
  10841  return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
  10842                                          viewKind, AtomicsSub(elementType));
  10843 }
  10844 
  10845 bool CacheIRCompiler::emitAtomicsAndResult(
  10846    ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
  10847    Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
  10848  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10849 
  10850  if (Scalar::isBigIntType(elementType)) {
  10851    return emitAtomicsReadModifyWriteResult64<jit::AtomicsAnd64>(
  10852        objId, indexId, valueId, viewKind);
  10853  }
  10854  return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
  10855                                          viewKind, AtomicsAnd(elementType));
  10856 }
  10857 
  10858 bool CacheIRCompiler::emitAtomicsOrResult(
  10859    ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
  10860    Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
  10861  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10862 
  10863  if (Scalar::isBigIntType(elementType)) {
  10864    return emitAtomicsReadModifyWriteResult64<jit::AtomicsOr64>(
  10865        objId, indexId, valueId, viewKind);
  10866  }
  10867  return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
  10868                                          viewKind, AtomicsOr(elementType));
  10869 }
  10870 
  10871 bool CacheIRCompiler::emitAtomicsXorResult(
  10872    ObjOperandId objId, IntPtrOperandId indexId, uint32_t valueId,
  10873    Scalar::Type elementType, bool forEffect, ArrayBufferViewKind viewKind) {
  10874  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10875 
  10876  if (Scalar::isBigIntType(elementType)) {
  10877    return emitAtomicsReadModifyWriteResult64<jit::AtomicsXor64>(
  10878        objId, indexId, valueId, viewKind);
  10879  }
  10880  return emitAtomicsReadModifyWriteResult(objId, indexId, valueId, elementType,
  10881                                          viewKind, AtomicsXor(elementType));
  10882 }
  10883 
  10884 bool CacheIRCompiler::emitAtomicsLoadResult(ObjOperandId objId,
  10885                                            IntPtrOperandId indexId,
  10886                                            Scalar::Type elementType,
  10887                                            ArrayBufferViewKind viewKind) {
  10888  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10889 
  10890  Maybe<AutoOutputRegister> output;
  10891  Maybe<AutoCallVM> callvm;
  10892  if (!Scalar::isBigIntType(elementType)) {
  10893    output.emplace(*this);
  10894  } else {
  10895    callvm.emplace(masm, this, allocator);
  10896  }
  10897  Register obj = allocator.useRegister(masm, objId);
  10898  Register index = allocator.useRegister(masm, indexId);
  10899  AutoScratchRegisterMaybeOutput scratch(allocator, masm,
  10900                                         output ? *output : callvm->output());
  10901  Maybe<AutoSpectreBoundsScratchRegister> spectreTemp;
  10902  Maybe<AutoScratchRegister> scratch2;
  10903  if (viewKind == ArrayBufferViewKind::FixedLength ||
  10904      viewKind == ArrayBufferViewKind::Immutable) {
  10905    spectreTemp.emplace(allocator, masm);
  10906  } else {
  10907    scratch2.emplace(allocator, masm);
  10908  }
  10909  AutoAvailableFloatRegister floatReg(*this, FloatReg0);
  10910 
  10911  FailurePath* failure;
  10912  if (!addFailurePath(&failure)) {
  10913    return false;
  10914  }
  10915 
  10916  // AutoCallVM's AutoSaveLiveRegisters aren't accounted for in FailurePath, so
  10917  // we can't use both at the same time. This isn't an issue here, because Ion
  10918  // doesn't support CallICs. If that ever changes, this code must be updated.
  10919  MOZ_ASSERT(isBaseline(), "Can't use FailurePath with AutoCallVM in Ion ICs");
  10920 
  10921  // Bounds check.
  10922  emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
  10923                            spectreTemp, failure->label());
  10924 
  10925  // Atomic operations are highly platform-dependent, for example x86/arm32 has
  10926  // specific requirements on which registers are used. Therefore we're using a
  10927  // VM call here instead of handling each platform separately.
  10928  if (Scalar::isBigIntType(elementType)) {
  10929    callvm->prepare();
  10930 
  10931    masm.Push(index);
  10932    masm.Push(obj);
  10933 
  10934    using Fn = BigInt* (*)(JSContext*, TypedArrayObject*, size_t);
  10935    callvm->call<Fn, jit::AtomicsLoad64>();
  10936    return true;
  10937  }
  10938 
  10939  // Load the elements vector.
  10940  masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch);
  10941 
  10942  // Load the value.
  10943  BaseIndex source(scratch, index, ScaleFromScalarType(elementType));
  10944 
  10945  // NOTE: the generated code must match the assembly code in gen_load in
  10946  // GenerateAtomicOperations.py
  10947  auto sync = Synchronization::Load();
  10948 
  10949  masm.memoryBarrierBefore(sync);
  10950 
  10951  Label* failUint32 = nullptr;
  10952  MacroAssembler::Uint32Mode mode = MacroAssembler::Uint32Mode::ForceDouble;
  10953  masm.loadFromTypedArray(elementType, source, output->valueReg(), mode,
  10954                          InvalidReg, failUint32, LiveRegisterSet{});
  10955  masm.memoryBarrierAfter(sync);
  10956 
  10957  return true;
  10958 }
  10959 
  10960 bool CacheIRCompiler::emitAtomicsStoreResult(ObjOperandId objId,
  10961                                             IntPtrOperandId indexId,
  10962                                             uint32_t valueId,
  10963                                             Scalar::Type elementType,
  10964                                             ArrayBufferViewKind viewKind) {
  10965  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  10966 
  10967  AutoOutputRegister output(*this);
  10968  Register obj = allocator.useRegister(masm, objId);
  10969  Register index = allocator.useRegister(masm, indexId);
  10970  Maybe<Register> valueInt32;
  10971  Maybe<Register> valueBigInt;
  10972  if (!Scalar::isBigIntType(elementType)) {
  10973    valueInt32.emplace(allocator.useRegister(masm, Int32OperandId(valueId)));
  10974  } else {
  10975    valueBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(valueId)));
  10976  }
  10977  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
  10978  Maybe<AutoScratchRegisterMaybeOutputType> scratch2;
  10979  if (viewKind == ArrayBufferViewKind::Resizable) {
  10980    scratch2.emplace(allocator, masm, output);
  10981  }
  10982 
  10983  // Not enough registers on X86.
  10984  constexpr auto spectreTemp = mozilla::Nothing{};
  10985 
  10986  FailurePath* failure;
  10987  if (!addFailurePath(&failure)) {
  10988    return false;
  10989  }
  10990 
  10991  // Bounds check.
  10992  emitTypedArrayBoundsCheck(viewKind, obj, index, scratch, scratch2,
  10993                            spectreTemp, failure->label());
  10994 
  10995  if (!Scalar::isBigIntType(elementType)) {
  10996    // Load the elements vector.
  10997    masm.loadPtr(Address(obj, ArrayBufferViewObject::dataOffset()), scratch);
  10998 
  10999    // Store the value.
  11000    BaseIndex dest(scratch, index, ScaleFromScalarType(elementType));
  11001 
  11002    // NOTE: the generated code must match the assembly code in gen_store in
  11003    // GenerateAtomicOperations.py
  11004    auto sync = Synchronization::Store();
  11005 
  11006    masm.memoryBarrierBefore(sync);
  11007    masm.storeToTypedIntArray(elementType, *valueInt32, dest);
  11008    masm.memoryBarrierAfter(sync);
  11009 
  11010    masm.tagValue(JSVAL_TYPE_INT32, *valueInt32, output.valueReg());
  11011  } else {
  11012    // See comment in emitAtomicsCompareExchange for why we use an ABI call.
  11013 
  11014    LiveRegisterSet volatileRegs = liveVolatileRegs();
  11015    volatileRegs.takeUnchecked(output.valueReg());
  11016    volatileRegs.takeUnchecked(scratch);
  11017    masm.PushRegsInMask(volatileRegs);
  11018 
  11019    using Fn = void (*)(TypedArrayObject*, size_t, const BigInt*);
  11020    masm.setupUnalignedABICall(scratch);
  11021    masm.passABIArg(obj);
  11022    masm.passABIArg(index);
  11023    masm.passABIArg(*valueBigInt);
  11024    masm.callWithABI<Fn, jit::AtomicsStore64>();
  11025 
  11026    masm.PopRegsInMask(volatileRegs);
  11027 
  11028    masm.tagValue(JSVAL_TYPE_BIGINT, *valueBigInt, output.valueReg());
  11029  }
  11030 
  11031  return true;
  11032 }
  11033 
  11034 bool CacheIRCompiler::emitAtomicsIsLockFreeResult(Int32OperandId valueId) {
  11035  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11036 
  11037  AutoOutputRegister output(*this);
  11038  Register value = allocator.useRegister(masm, valueId);
  11039  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
  11040 
  11041  masm.atomicIsLockFreeJS(value, scratch);
  11042  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
  11043 
  11044  return true;
  11045 }
  11046 
  11047 bool CacheIRCompiler::emitAtomicsPauseResult() {
  11048  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11049 
  11050  AutoOutputRegister output(*this);
  11051 
  11052  masm.atomicPause();
  11053  masm.moveValue(UndefinedValue(), output.valueReg());
  11054 
  11055  return true;
  11056 }
  11057 
  11058 bool CacheIRCompiler::emitBigIntAsIntNResult(Int32OperandId bitsId,
  11059                                             BigIntOperandId bigIntId) {
  11060  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11061 
  11062  AutoCallVM callvm(masm, this, allocator);
  11063 
  11064  Register bits = allocator.useRegister(masm, bitsId);
  11065  Register bigInt = allocator.useRegister(masm, bigIntId);
  11066 
  11067  callvm.prepare();
  11068  masm.Push(bits);
  11069  masm.Push(bigInt);
  11070 
  11071  using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
  11072  callvm.call<Fn, jit::BigIntAsIntN>();
  11073  return true;
  11074 }
  11075 
  11076 bool CacheIRCompiler::emitBigIntAsUintNResult(Int32OperandId bitsId,
  11077                                              BigIntOperandId bigIntId) {
  11078  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11079 
  11080  AutoCallVM callvm(masm, this, allocator);
  11081 
  11082  Register bits = allocator.useRegister(masm, bitsId);
  11083  Register bigInt = allocator.useRegister(masm, bigIntId);
  11084 
  11085  callvm.prepare();
  11086  masm.Push(bits);
  11087  masm.Push(bigInt);
  11088 
  11089  using Fn = BigInt* (*)(JSContext*, HandleBigInt, int32_t);
  11090  callvm.call<Fn, jit::BigIntAsUintN>();
  11091  return true;
  11092 }
  11093 
  11094 bool CacheIRCompiler::emitSetHasResult(ObjOperandId setId, ValOperandId valId) {
  11095  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11096 
  11097  AutoCallVM callvm(masm, this, allocator);
  11098 
  11099  Register set = allocator.useRegister(masm, setId);
  11100  ValueOperand val = allocator.useValueRegister(masm, valId);
  11101 
  11102  callvm.prepare();
  11103  masm.Push(val);
  11104  masm.Push(set);
  11105 
  11106  using Fn = bool (*)(JSContext*, Handle<SetObject*>, HandleValue, bool*);
  11107  callvm.call<Fn, jit::SetObjectHas>();
  11108  return true;
  11109 }
  11110 
  11111 bool CacheIRCompiler::emitSetHasNonGCThingResult(ObjOperandId setId,
  11112                                                 ValOperandId valId) {
  11113  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11114 
  11115  AutoOutputRegister output(*this);
  11116  Register set = allocator.useRegister(masm, setId);
  11117  ValueOperand val = allocator.useValueRegister(masm, valId);
  11118 
  11119  AutoScratchRegister scratch1(allocator, masm);
  11120  AutoScratchRegister scratch2(allocator, masm);
  11121  AutoScratchRegister scratch3(allocator, masm);
  11122  AutoScratchRegister scratch4(allocator, masm);
  11123  AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
  11124 
  11125  masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
  11126  masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
  11127 
  11128  masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
  11129                             scratch3, scratch4);
  11130  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
  11131  return true;
  11132 }
  11133 
  11134 bool CacheIRCompiler::emitSetHasSymbolResult(ObjOperandId setId,
  11135                                             SymbolOperandId symId) {
  11136  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11137 
  11138  AutoOutputRegister output(*this);
  11139  Register set = allocator.useRegister(masm, setId);
  11140  Register sym = allocator.useRegister(masm, symId);
  11141 
  11142  AutoScratchRegister scratch1(allocator, masm);
  11143  AutoScratchRegister scratch2(allocator, masm);
  11144  AutoScratchRegister scratch3(allocator, masm);
  11145  AutoScratchRegister scratch4(allocator, masm);
  11146 
  11147  masm.prepareHashSymbol(sym, scratch1);
  11148 
  11149  masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
  11150  masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
  11151                             scratch3, scratch4);
  11152  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
  11153  return true;
  11154 }
  11155 
  11156 bool CacheIRCompiler::emitSetHasBigIntResult(ObjOperandId setId,
  11157                                             BigIntOperandId bigIntId) {
  11158  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11159 
  11160  AutoOutputRegister output(*this);
  11161  Register set = allocator.useRegister(masm, setId);
  11162  Register bigInt = allocator.useRegister(masm, bigIntId);
  11163 
  11164  AutoScratchRegister scratch1(allocator, masm);
  11165  AutoScratchRegister scratch2(allocator, masm);
  11166  AutoScratchRegister scratch3(allocator, masm);
  11167  AutoScratchRegister scratch4(allocator, masm);
  11168  AutoScratchRegister scratch5(allocator, masm);
  11169 #ifndef JS_CODEGEN_ARM
  11170  AutoScratchRegister scratch6(allocator, masm);
  11171 #else
  11172  // We don't have more registers available on ARM32.
  11173  Register scratch6 = set;
  11174 
  11175  masm.push(set);
  11176 #endif
  11177 
  11178  masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
  11179 
  11180  masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
  11181  masm.setObjectHasBigInt(set, output.valueReg(), scratch1, scratch2, scratch3,
  11182                          scratch4, scratch5, scratch6);
  11183  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
  11184 
  11185 #ifdef JS_CODEGEN_ARM
  11186  masm.pop(set);
  11187 #endif
  11188  return true;
  11189 }
  11190 
  11191 bool CacheIRCompiler::emitSetHasObjectResult(ObjOperandId setId,
  11192                                             ObjOperandId objId) {
  11193  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11194 
  11195  AutoOutputRegister output(*this);
  11196  Register set = allocator.useRegister(masm, setId);
  11197  Register obj = allocator.useRegister(masm, objId);
  11198 
  11199  AutoScratchRegister scratch1(allocator, masm);
  11200  AutoScratchRegister scratch2(allocator, masm);
  11201  AutoScratchRegister scratch3(allocator, masm);
  11202  AutoScratchRegister scratch4(allocator, masm);
  11203  AutoScratchRegister scratch5(allocator, masm);
  11204 
  11205  masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
  11206  masm.prepareHashObject(set, output.valueReg(), scratch1, scratch2, scratch3,
  11207                         scratch4, scratch5);
  11208 
  11209  masm.setObjectHasNonBigInt(set, output.valueReg(), scratch1, scratch2,
  11210                             scratch3, scratch4);
  11211  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
  11212  return true;
  11213 }
  11214 
  11215 bool CacheIRCompiler::emitSetDeleteResult(ObjOperandId setId,
  11216                                          ValOperandId valId) {
  11217  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11218 
  11219  AutoCallVM callvm(masm, this, allocator);
  11220 
  11221  Register set = allocator.useRegister(masm, setId);
  11222  ValueOperand val = allocator.useValueRegister(masm, valId);
  11223 
  11224  callvm.prepare();
  11225  masm.Push(val);
  11226  masm.Push(set);
  11227 
  11228  using Fn = bool (*)(JSContext*, Handle<SetObject*>, HandleValue, bool*);
  11229  callvm.call<Fn, jit::SetObjectDelete>();
  11230  return true;
  11231 }
  11232 
  11233 bool CacheIRCompiler::emitSetAddResult(ObjOperandId setId, ValOperandId keyId) {
  11234  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11235 
  11236  AutoCallVM callvm(masm, this, allocator);
  11237 
  11238  Register set = allocator.useRegister(masm, setId);
  11239  ValueOperand key = allocator.useValueRegister(masm, keyId);
  11240 
  11241  callvm.prepare();
  11242  masm.Push(key);
  11243  masm.Push(set);
  11244 
  11245  using Fn =
  11246      bool (*)(JSContext*, Handle<SetObject*>, HandleValue, MutableHandleValue);
  11247  callvm.call<Fn, jit::SetObjectAddFromIC>();
  11248  return true;
  11249 }
  11250 
  11251 bool CacheIRCompiler::emitSetSizeResult(ObjOperandId setId) {
  11252  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11253 
  11254  AutoOutputRegister output(*this);
  11255  Register set = allocator.useRegister(masm, setId);
  11256  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
  11257 
  11258  masm.loadSetObjectSize(set, scratch);
  11259  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
  11260  return true;
  11261 }
  11262 
  11263 bool CacheIRCompiler::emitMapHasResult(ObjOperandId mapId, ValOperandId valId) {
  11264  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11265 
  11266  AutoCallVM callvm(masm, this, allocator);
  11267 
  11268  Register map = allocator.useRegister(masm, mapId);
  11269  ValueOperand val = allocator.useValueRegister(masm, valId);
  11270 
  11271  callvm.prepare();
  11272  masm.Push(val);
  11273  masm.Push(map);
  11274 
  11275  using Fn = bool (*)(JSContext*, Handle<MapObject*>, HandleValue, bool*);
  11276  callvm.call<Fn, jit::MapObjectHas>();
  11277  return true;
  11278 }
  11279 
  11280 bool CacheIRCompiler::emitMapHasNonGCThingResult(ObjOperandId mapId,
  11281                                                 ValOperandId valId) {
  11282  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11283 
  11284  AutoOutputRegister output(*this);
  11285  Register map = allocator.useRegister(masm, mapId);
  11286  ValueOperand val = allocator.useValueRegister(masm, valId);
  11287 
  11288  AutoScratchRegister scratch1(allocator, masm);
  11289  AutoScratchRegister scratch2(allocator, masm);
  11290  AutoScratchRegister scratch3(allocator, masm);
  11291  AutoScratchRegister scratch4(allocator, masm);
  11292  AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
  11293 
  11294  masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
  11295  masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
  11296 
  11297  masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
  11298                             scratch3, scratch4);
  11299  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
  11300  return true;
  11301 }
  11302 
  11303 bool CacheIRCompiler::emitMapHasSymbolResult(ObjOperandId mapId,
  11304                                             SymbolOperandId symId) {
  11305  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11306 
  11307  AutoOutputRegister output(*this);
  11308  Register map = allocator.useRegister(masm, mapId);
  11309  Register sym = allocator.useRegister(masm, symId);
  11310 
  11311  AutoScratchRegister scratch1(allocator, masm);
  11312  AutoScratchRegister scratch2(allocator, masm);
  11313  AutoScratchRegister scratch3(allocator, masm);
  11314  AutoScratchRegister scratch4(allocator, masm);
  11315 
  11316  masm.prepareHashSymbol(sym, scratch1);
  11317 
  11318  masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
  11319  masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
  11320                             scratch3, scratch4);
  11321  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
  11322  return true;
  11323 }
  11324 
  11325 bool CacheIRCompiler::emitMapHasBigIntResult(ObjOperandId mapId,
  11326                                             BigIntOperandId bigIntId) {
  11327  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11328 
  11329  AutoOutputRegister output(*this);
  11330  Register map = allocator.useRegister(masm, mapId);
  11331  Register bigInt = allocator.useRegister(masm, bigIntId);
  11332 
  11333  AutoScratchRegister scratch1(allocator, masm);
  11334  AutoScratchRegister scratch2(allocator, masm);
  11335  AutoScratchRegister scratch3(allocator, masm);
  11336  AutoScratchRegister scratch4(allocator, masm);
  11337  AutoScratchRegister scratch5(allocator, masm);
  11338 #ifndef JS_CODEGEN_ARM
  11339  AutoScratchRegister scratch6(allocator, masm);
  11340 #else
  11341  // We don't have more registers available on ARM32.
  11342  Register scratch6 = map;
  11343 
  11344  masm.push(map);
  11345 #endif
  11346 
  11347  masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
  11348 
  11349  masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
  11350  masm.mapObjectHasBigInt(map, output.valueReg(), scratch1, scratch2, scratch3,
  11351                          scratch4, scratch5, scratch6);
  11352  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
  11353 
  11354 #ifdef JS_CODEGEN_ARM
  11355  masm.pop(map);
  11356 #endif
  11357  return true;
  11358 }
  11359 
  11360 bool CacheIRCompiler::emitMapHasObjectResult(ObjOperandId mapId,
  11361                                             ObjOperandId objId) {
  11362  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11363 
  11364  AutoOutputRegister output(*this);
  11365  Register map = allocator.useRegister(masm, mapId);
  11366  Register obj = allocator.useRegister(masm, objId);
  11367 
  11368  AutoScratchRegister scratch1(allocator, masm);
  11369  AutoScratchRegister scratch2(allocator, masm);
  11370  AutoScratchRegister scratch3(allocator, masm);
  11371  AutoScratchRegister scratch4(allocator, masm);
  11372  AutoScratchRegister scratch5(allocator, masm);
  11373 
  11374  masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
  11375  masm.prepareHashObject(map, output.valueReg(), scratch1, scratch2, scratch3,
  11376                         scratch4, scratch5);
  11377 
  11378  masm.mapObjectHasNonBigInt(map, output.valueReg(), scratch1, scratch2,
  11379                             scratch3, scratch4);
  11380  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
  11381  return true;
  11382 }
  11383 
  11384 bool CacheIRCompiler::emitMapGetResult(ObjOperandId mapId, ValOperandId valId) {
  11385  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11386 
  11387  AutoCallVM callvm(masm, this, allocator);
  11388 
  11389  Register map = allocator.useRegister(masm, mapId);
  11390  ValueOperand val = allocator.useValueRegister(masm, valId);
  11391 
  11392  callvm.prepare();
  11393  masm.Push(val);
  11394  masm.Push(map);
  11395 
  11396  using Fn =
  11397      bool (*)(JSContext*, Handle<MapObject*>, HandleValue, MutableHandleValue);
  11398  callvm.call<Fn, jit::MapObjectGet>();
  11399  return true;
  11400 }
  11401 
  11402 bool CacheIRCompiler::emitMapDeleteResult(ObjOperandId mapId,
  11403                                          ValOperandId valId) {
  11404  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11405 
  11406  AutoCallVM callvm(masm, this, allocator);
  11407 
  11408  Register map = allocator.useRegister(masm, mapId);
  11409  ValueOperand val = allocator.useValueRegister(masm, valId);
  11410 
  11411  callvm.prepare();
  11412  masm.Push(val);
  11413  masm.Push(map);
  11414 
  11415  using Fn = bool (*)(JSContext*, Handle<MapObject*>, HandleValue, bool*);
  11416  callvm.call<Fn, jit::MapObjectDelete>();
  11417  return true;
  11418 }
  11419 
  11420 bool CacheIRCompiler::emitMapSetResult(ObjOperandId mapId, ValOperandId keyId,
  11421                                       ValOperandId valId) {
  11422  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11423 
  11424  AutoCallVM callvm(masm, this, allocator);
  11425 
  11426  Register map = allocator.useRegister(masm, mapId);
  11427  ValueOperand key = allocator.useValueRegister(masm, keyId);
  11428  ValueOperand val = allocator.useValueRegister(masm, valId);
  11429 
  11430  callvm.prepare();
  11431  masm.Push(val);
  11432  masm.Push(key);
  11433  masm.Push(map);
  11434 
  11435  using Fn = bool (*)(JSContext*, Handle<MapObject*>, HandleValue, HandleValue,
  11436                      MutableHandleValue);
  11437  callvm.call<Fn, jit::MapObjectSetFromIC>();
  11438  return true;
  11439 }
  11440 
  11441 bool CacheIRCompiler::emitMapGetNonGCThingResult(ObjOperandId mapId,
  11442                                                 ValOperandId valId) {
  11443  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11444 
  11445  AutoOutputRegister output(*this);
  11446  Register map = allocator.useRegister(masm, mapId);
  11447  ValueOperand val = allocator.useValueRegister(masm, valId);
  11448 
  11449  AutoScratchRegister scratch1(allocator, masm);
  11450  AutoScratchRegister scratch2(allocator, masm);
  11451  AutoScratchRegister scratch3(allocator, masm);
  11452  AutoScratchRegister scratch4(allocator, masm);
  11453  AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
  11454 
  11455  masm.toHashableNonGCThing(val, output.valueReg(), scratchFloat);
  11456  masm.prepareHashNonGCThing(output.valueReg(), scratch1, scratch2);
  11457 
  11458  masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
  11459                             output.valueReg(), scratch2, scratch3, scratch4);
  11460  return true;
  11461 }
  11462 
  11463 bool CacheIRCompiler::emitMapGetSymbolResult(ObjOperandId mapId,
  11464                                             SymbolOperandId symId) {
  11465  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11466 
  11467  AutoOutputRegister output(*this);
  11468  Register map = allocator.useRegister(masm, mapId);
  11469  Register sym = allocator.useRegister(masm, symId);
  11470 
  11471  AutoScratchRegister scratch1(allocator, masm);
  11472  AutoScratchRegister scratch2(allocator, masm);
  11473  AutoScratchRegister scratch3(allocator, masm);
  11474  AutoScratchRegister scratch4(allocator, masm);
  11475 
  11476  masm.prepareHashSymbol(sym, scratch1);
  11477 
  11478  masm.tagValue(JSVAL_TYPE_SYMBOL, sym, output.valueReg());
  11479  masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
  11480                             output.valueReg(), scratch2, scratch3, scratch4);
  11481  return true;
  11482 }
  11483 
  11484 bool CacheIRCompiler::emitMapGetBigIntResult(ObjOperandId mapId,
  11485                                             BigIntOperandId bigIntId) {
  11486  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11487 
  11488  AutoOutputRegister output(*this);
  11489  Register map = allocator.useRegister(masm, mapId);
  11490  Register bigInt = allocator.useRegister(masm, bigIntId);
  11491 
  11492  AutoScratchRegister scratch1(allocator, masm);
  11493  AutoScratchRegister scratch2(allocator, masm);
  11494  AutoScratchRegister scratch3(allocator, masm);
  11495  AutoScratchRegister scratch4(allocator, masm);
  11496  AutoScratchRegister scratch5(allocator, masm);
  11497 #ifndef JS_CODEGEN_ARM
  11498  AutoScratchRegister scratch6(allocator, masm);
  11499 #else
  11500  // We don't have more registers available on ARM32.
  11501  Register scratch6 = map;
  11502 
  11503  masm.push(map);
  11504 #endif
  11505 
  11506  masm.prepareHashBigInt(bigInt, scratch1, scratch2, scratch3, scratch4);
  11507 
  11508  masm.tagValue(JSVAL_TYPE_BIGINT, bigInt, output.valueReg());
  11509  masm.mapObjectGetBigInt(map, output.valueReg(), scratch1, output.valueReg(),
  11510                          scratch2, scratch3, scratch4, scratch5, scratch6);
  11511 
  11512 #ifdef JS_CODEGEN_ARM
  11513  masm.pop(map);
  11514 #endif
  11515  return true;
  11516 }
  11517 
  11518 bool CacheIRCompiler::emitMapGetObjectResult(ObjOperandId mapId,
  11519                                             ObjOperandId objId) {
  11520  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11521 
  11522  AutoOutputRegister output(*this);
  11523  Register map = allocator.useRegister(masm, mapId);
  11524  Register obj = allocator.useRegister(masm, objId);
  11525 
  11526  AutoScratchRegister scratch1(allocator, masm);
  11527  AutoScratchRegister scratch2(allocator, masm);
  11528  AutoScratchRegister scratch3(allocator, masm);
  11529  AutoScratchRegister scratch4(allocator, masm);
  11530  AutoScratchRegister scratch5(allocator, masm);
  11531 
  11532  masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
  11533  masm.prepareHashObject(map, output.valueReg(), scratch1, scratch2, scratch3,
  11534                         scratch4, scratch5);
  11535 
  11536  masm.mapObjectGetNonBigInt(map, output.valueReg(), scratch1,
  11537                             output.valueReg(), scratch2, scratch3, scratch4);
  11538  return true;
  11539 }
  11540 
  11541 bool CacheIRCompiler::emitMapSizeResult(ObjOperandId mapId) {
  11542  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11543 
  11544  AutoOutputRegister output(*this);
  11545  Register map = allocator.useRegister(masm, mapId);
  11546  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
  11547 
  11548  masm.loadMapObjectSize(map, scratch);
  11549  masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
  11550  return true;
  11551 }
  11552 
  11553 bool CacheIRCompiler::emitWeakMapGetObjectResult(ObjOperandId weakMapId,
  11554                                                 ObjOperandId objId) {
  11555  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11556 
  11557  AutoOutputRegister output(*this);
  11558  Register weakMap = allocator.useRegister(masm, weakMapId);
  11559  Register obj = allocator.useRegister(masm, objId);
  11560  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
  11561  AutoScratchRegister scratch2(allocator, masm);
  11562 
  11563  // The result Value will be stored on the stack.
  11564  masm.reserveStack(sizeof(Value));
  11565  masm.moveStackPtrTo(scratch1.get());
  11566 
  11567  LiveRegisterSet volatileRegs = liveVolatileRegs();
  11568  volatileRegs.takeUnchecked(scratch1);
  11569  volatileRegs.takeUnchecked(scratch2);
  11570  masm.PushRegsInMask(volatileRegs);
  11571 
  11572  using Fn = void (*)(WeakMapObject*, JSObject*, Value*);
  11573  masm.setupUnalignedABICall(scratch2);
  11574  masm.passABIArg(weakMap);
  11575  masm.passABIArg(obj);
  11576  masm.passABIArg(scratch1);
  11577  masm.callWithABI<Fn, js::WeakMapObject::getObject>();
  11578 
  11579  masm.PopRegsInMask(volatileRegs);
  11580 
  11581  masm.Pop(output.valueReg());
  11582  return true;
  11583 }
  11584 
  11585 bool CacheIRCompiler::emitWeakMapHasObjectResult(ObjOperandId weakMapId,
  11586                                                 ObjOperandId objId) {
  11587  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11588 
  11589  AutoOutputRegister output(*this);
  11590  Register weakMap = allocator.useRegister(masm, weakMapId);
  11591  Register obj = allocator.useRegister(masm, objId);
  11592  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
  11593 
  11594  LiveRegisterSet volatileRegs = liveVolatileRegs();
  11595  volatileRegs.takeUnchecked(scratch);
  11596  masm.PushRegsInMask(volatileRegs);
  11597 
  11598  using Fn = bool (*)(WeakMapObject*, JSObject*);
  11599  masm.setupUnalignedABICall(scratch);
  11600  masm.passABIArg(weakMap);
  11601  masm.passABIArg(obj);
  11602  masm.callWithABI<Fn, js::WeakMapObject::hasObject>();
  11603  masm.storeCallBoolResult(scratch);
  11604 
  11605  masm.PopRegsInMask(volatileRegs);
  11606 
  11607  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
  11608  return true;
  11609 }
  11610 
  11611 bool CacheIRCompiler::emitWeakSetHasObjectResult(ObjOperandId weakSetId,
  11612                                                 ObjOperandId objId) {
  11613  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11614 
  11615  AutoOutputRegister output(*this);
  11616  Register weakSet = allocator.useRegister(masm, weakSetId);
  11617  Register obj = allocator.useRegister(masm, objId);
  11618  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
  11619 
  11620  LiveRegisterSet volatileRegs = liveVolatileRegs();
  11621  volatileRegs.takeUnchecked(scratch);
  11622  masm.PushRegsInMask(volatileRegs);
  11623 
  11624  using Fn = bool (*)(WeakSetObject*, JSObject*);
  11625  masm.setupUnalignedABICall(scratch);
  11626  masm.passABIArg(weakSet);
  11627  masm.passABIArg(obj);
  11628  masm.callWithABI<Fn, js::WeakSetObject::hasObject>();
  11629  masm.storeCallBoolResult(scratch);
  11630 
  11631  masm.PopRegsInMask(volatileRegs);
  11632 
  11633  masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
  11634  return true;
  11635 }
  11636 
  11637 bool CacheIRCompiler::emitDateFillLocalTimeSlots(ObjOperandId dateId) {
  11638  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11639 
  11640  Register date = allocator.useRegister(masm, dateId);
  11641  AutoScratchRegister scratch(allocator, masm);
  11642 
  11643  masm.dateFillLocalTimeSlots(date, scratch, liveVolatileRegs());
  11644  return true;
  11645 }
  11646 
  11647 bool CacheIRCompiler::emitDateHoursFromSecondsIntoYearResult(
  11648    ValOperandId secondsIntoYearId) {
  11649  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11650 
  11651  AutoOutputRegister output(*this);
  11652  ValueOperand secondsIntoYear =
  11653      allocator.useValueRegister(masm, secondsIntoYearId);
  11654  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
  11655  AutoScratchRegister scratch2(allocator, masm);
  11656 
  11657  masm.dateHoursFromSecondsIntoYear(secondsIntoYear, output.valueReg(),
  11658                                    scratch1, scratch2);
  11659  return true;
  11660 }
  11661 
  11662 bool CacheIRCompiler::emitDateMinutesFromSecondsIntoYearResult(
  11663    ValOperandId secondsIntoYearId) {
  11664  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11665 
  11666  AutoOutputRegister output(*this);
  11667  ValueOperand secondsIntoYear =
  11668      allocator.useValueRegister(masm, secondsIntoYearId);
  11669  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
  11670  AutoScratchRegister scratch2(allocator, masm);
  11671 
  11672  masm.dateMinutesFromSecondsIntoYear(secondsIntoYear, output.valueReg(),
  11673                                      scratch1, scratch2);
  11674  return true;
  11675 }
  11676 
  11677 bool CacheIRCompiler::emitDateSecondsFromSecondsIntoYearResult(
  11678    ValOperandId secondsIntoYearId) {
  11679  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11680 
  11681  AutoOutputRegister output(*this);
  11682  ValueOperand secondsIntoYear =
  11683      allocator.useValueRegister(masm, secondsIntoYearId);
  11684  AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
  11685  AutoScratchRegister scratch2(allocator, masm);
  11686 
  11687  masm.dateSecondsFromSecondsIntoYear(secondsIntoYear, output.valueReg(),
  11688                                      scratch1, scratch2);
  11689  return true;
  11690 }
  11691 
  11692 bool CacheIRCompiler::emitArrayFromArgumentsObjectResult(ObjOperandId objId,
  11693                                                         uint32_t shapeOffset) {
  11694  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11695 
  11696  AutoCallVM callvm(masm, this, allocator);
  11697 
  11698  Register obj = allocator.useRegister(masm, objId);
  11699 
  11700  callvm.prepare();
  11701  masm.Push(obj);
  11702 
  11703  using Fn = ArrayObject* (*)(JSContext*, Handle<ArgumentsObject*>);
  11704  callvm.call<Fn, js::ArrayFromArgumentsObject>();
  11705  return true;
  11706 }
  11707 
  11708 bool CacheIRCompiler::emitGuardGlobalGeneration(uint32_t expectedOffset,
  11709                                                uint32_t generationAddrOffset) {
  11710  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11711 
  11712  AutoScratchRegister scratch(allocator, masm);
  11713  AutoScratchRegister scratch2(allocator, masm);
  11714 
  11715  FailurePath* failure;
  11716  if (!addFailurePath(&failure)) {
  11717    return false;
  11718  }
  11719 
  11720  StubFieldOffset expected(expectedOffset, StubField::Type::RawInt32);
  11721  emitLoadStubField(expected, scratch);
  11722 
  11723  StubFieldOffset generationAddr(generationAddrOffset,
  11724                                 StubField::Type::RawPointer);
  11725  emitLoadStubField(generationAddr, scratch2);
  11726 
  11727  masm.branch32(Assembler::NotEqual, Address(scratch2, 0), scratch,
  11728                failure->label());
  11729 
  11730  return true;
  11731 }
  11732 
  11733 bool CacheIRCompiler::emitGuardFuse(RealmFuses::FuseIndex fuseIndex) {
  11734  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11735  AutoScratchRegister scratch(allocator, masm);
  11736 
  11737  FailurePath* failure;
  11738  if (!addFailurePath(&failure)) {
  11739    return false;
  11740  }
  11741 
  11742  masm.loadRealmFuse(fuseIndex, scratch);
  11743  masm.branchPtr(Assembler::NotEqual, scratch, ImmPtr(nullptr),
  11744                 failure->label());
  11745  return true;
  11746 }
  11747 
  11748 bool CacheIRCompiler::emitGuardRuntimeFuse(RuntimeFuses::FuseIndex fuseIndex) {
  11749  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11750 
  11751  FailurePath* failure;
  11752  if (!addFailurePath(&failure)) {
  11753    return false;
  11754  }
  11755 
  11756  masm.guardRuntimeFuse(fuseIndex, failure->label());
  11757  return true;
  11758 }
  11759 
  11760 bool CacheIRCompiler::emitGuardObjectFuseProperty(
  11761    ObjOperandId objId, uint32_t objFuseOwnerOffset, uint32_t objFuseOffset,
  11762    uint32_t expectedGenerationOffset, uint32_t propIndexOffset,
  11763    uint32_t propMaskOffset, bool canUseFastPath) {
  11764  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11765 
  11766  Register obj = allocator.useRegister(masm, objId);
  11767  AutoScratchRegister scratch1(allocator, masm);
  11768  AutoScratchRegister scratch2(allocator, masm);
  11769 
  11770  FailurePath* failure;
  11771  if (!addFailurePath(&failure)) {
  11772    return false;
  11773  }
  11774 
  11775 #ifdef DEBUG
  11776  {
  11777    Label ok;
  11778    StubFieldOffset fuseOwner(objFuseOwnerOffset, StubField::Type::WeakObject);
  11779    emitLoadStubField(fuseOwner, scratch1);
  11780    masm.branchPtr(Assembler::Equal, obj, scratch1, &ok);
  11781    masm.assumeUnreachable("Object doesn't match fuse!");
  11782    masm.bind(&ok);
  11783  }
  11784 #else
  11785  (void)obj;
  11786 #endif
  11787 
  11788  StubFieldOffset objFuse(objFuseOffset, StubField::Type::RawPointer);
  11789  emitLoadStubField(objFuse, scratch1);
  11790 
  11791  // Fast path for the case where no property has been invalidated. This is
  11792  // very common, especially for prototype objects.
  11793  Label done;
  11794  if (canUseFastPath) {
  11795    masm.branch32(
  11796        Assembler::Equal,
  11797        Address(scratch1, ObjectFuse::offsetOfInvalidatedConstantProperty()),
  11798        Imm32(0), &done);
  11799  }
  11800 
  11801  // Guard on the generation field.
  11802  StubFieldOffset expectedGeneration(expectedGenerationOffset,
  11803                                     StubField::Type::RawInt32);
  11804  emitLoadStubField(expectedGeneration, scratch2);
  11805  masm.branch32(Assembler::NotEqual,
  11806                Address(scratch1, ObjectFuse::offsetOfGeneration()), scratch2,
  11807                failure->label());
  11808 
  11809  // Load the uint32_t from the properties array. After the generation check,
  11810  // the property must be marked either Constant or NotConstant so we don't have
  11811  // to bounds check the index.
  11812  StubFieldOffset propIndex(propIndexOffset, StubField::Type::RawInt32);
  11813  emitLoadStubField(propIndex, scratch2);
  11814  masm.loadPtr(Address(scratch1, ObjectFuse::offsetOfPropertyStateBits()),
  11815               scratch1);
  11816  masm.load32(BaseIndex(scratch1, scratch2, TimesFour), scratch1);
  11817 
  11818  // Use the mask to guard the property is still marked Constant.
  11819  StubFieldOffset propMask(propMaskOffset, StubField::Type::RawInt32);
  11820  emitLoadStubField(propMask, scratch2);
  11821  masm.branchTest32(Assembler::NonZero, scratch1, scratch2, failure->label());
  11822 
  11823  masm.bind(&done);
  11824  return true;
  11825 }
  11826 
  11827 bool CacheIRCompiler::emitBailout() {
  11828  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11829 
  11830  // Generates no code.
  11831 
  11832  return true;
  11833 }
  11834 
  11835 bool CacheIRCompiler::emitAssertFloat32Result(ValOperandId valId,
  11836                                              bool mustFloat32) {
  11837  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11838 
  11839  AutoOutputRegister output(*this);
  11840 
  11841  // NOP when not in IonMonkey
  11842  masm.moveValue(UndefinedValue(), output.valueReg());
  11843 
  11844  return true;
  11845 }
  11846 
  11847 bool CacheIRCompiler::emitAssertRecoveredOnBailoutResult(ValOperandId valId,
  11848                                                         bool mustBeRecovered) {
  11849  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11850 
  11851  AutoOutputRegister output(*this);
  11852 
  11853  // NOP when not in IonMonkey
  11854  masm.moveValue(UndefinedValue(), output.valueReg());
  11855 
  11856  return true;
  11857 }
  11858 
  11859 bool CacheIRCompiler::emitAssertPropertyLookup(ObjOperandId objId,
  11860                                               uint32_t idOffset,
  11861                                               uint32_t slotOffset) {
  11862  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11863 
  11864  Register obj = allocator.useRegister(masm, objId);
  11865 
  11866  AutoScratchRegister id(allocator, masm);
  11867  AutoScratchRegister slot(allocator, masm);
  11868 
  11869  LiveRegisterSet save = liveVolatileRegs();
  11870  masm.PushRegsInMask(save);
  11871 
  11872  masm.setupUnalignedABICall(id);
  11873 
  11874  StubFieldOffset idField(idOffset, StubField::Type::Id);
  11875  emitLoadStubField(idField, id);
  11876 
  11877  StubFieldOffset slotField(slotOffset, StubField::Type::RawInt32);
  11878  emitLoadStubField(slotField, slot);
  11879 
  11880  masm.passABIArg(obj);
  11881  masm.passABIArg(id);
  11882  masm.passABIArg(slot);
  11883  using Fn = void (*)(NativeObject*, PropertyKey, uint32_t);
  11884  masm.callWithABI<Fn, js::jit::AssertPropertyLookup>();
  11885  masm.PopRegsInMask(save);
  11886 
  11887  return true;
  11888 }
  11889 
  11890 #ifdef FUZZING_JS_FUZZILLI
  11891 bool CacheIRCompiler::emitFuzzilliHashResult(ValOperandId valId) {
  11892  JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
  11893 
  11894  AutoCallVM callvm(masm, this, allocator);
  11895  const AutoOutputRegister& output = callvm.output();
  11896 
  11897  ValueOperand input = allocator.useValueRegister(masm, valId);
  11898  AutoScratchRegister scratch(allocator, masm);
  11899  AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
  11900  AutoScratchRegisterMaybeOutputType scratchJSContext(allocator, masm, output);
  11901  AutoScratchFloatRegister floatReg(this);
  11902 
  11903  Label hashDouble, updateHash, done;
  11904 
  11905  Label isInt32, isDouble, isNull, isUndefined, isBoolean, isBigInt, isObject;
  11906  {
  11907    ScratchTagScope tag(masm, input);
  11908    masm.splitTagForTest(input, tag);
  11909 
  11910    masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
  11911    masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
  11912    masm.branchTestNull(Assembler::Equal, tag, &isNull);
  11913    masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
  11914    masm.branchTestBoolean(Assembler::Equal, tag, &isBoolean);
  11915    masm.branchTestBigInt(Assembler::Equal, tag, &isBigInt);
  11916    masm.branchTestObject(Assembler::Equal, tag, &isObject);
  11917 
  11918    // Symbol or String.
  11919    masm.move32(Imm32(0), scratch);
  11920    masm.jump(&updateHash);
  11921  }
  11922 
  11923  masm.bind(&isInt32);
  11924  {
  11925    masm.unboxInt32(input, scratch);
  11926    masm.convertInt32ToDouble(scratch, floatReg);
  11927    masm.jump(&hashDouble);
  11928  }
  11929 
  11930  masm.bind(&isDouble);
  11931  {
  11932    masm.unboxDouble(input, floatReg);
  11933    masm.jump(&hashDouble);
  11934  }
  11935 
  11936  masm.bind(&isNull);
  11937  {
  11938    masm.loadConstantDouble(1.0, floatReg);
  11939    masm.jump(&hashDouble);
  11940  }
  11941 
  11942  masm.bind(&isUndefined);
  11943  {
  11944    masm.loadConstantDouble(2.0, floatReg);
  11945    masm.jump(&hashDouble);
  11946  }
  11947 
  11948  masm.bind(&isBoolean);
  11949  {
  11950    masm.unboxBoolean(input, scratch);
  11951    masm.add32(Imm32(3), scratch);
  11952    masm.convertInt32ToDouble(scratch, floatReg);
  11953    masm.jump(&hashDouble);
  11954  }
  11955 
  11956  masm.bind(&isBigInt);
  11957  {
  11958    masm.unboxBigInt(input, scratch);
  11959 
  11960    LiveRegisterSet volatileRegs = liveVolatileRegs();
  11961    masm.PushRegsInMask(volatileRegs);
  11962 
  11963    using Fn = uint32_t (*)(BigInt* bigInt);
  11964    masm.setupUnalignedABICall(scratchJSContext);
  11965    masm.loadJSContext(scratchJSContext);
  11966    masm.passABIArg(scratch);
  11967    masm.callWithABI<Fn, js::FuzzilliHashBigInt>();
  11968    masm.storeCallInt32Result(scratch);
  11969 
  11970    LiveRegisterSet ignore;
  11971    ignore.add(scratch);
  11972    ignore.add(scratchJSContext);
  11973    masm.PopRegsInMaskIgnore(volatileRegs, ignore);
  11974    masm.jump(&updateHash);
  11975  }
  11976 
  11977  masm.bind(&isObject);
  11978  {
  11979    masm.unboxObject(input, scratch);
  11980 
  11981    callvm.prepare();
  11982    masm.Push(scratch);
  11983 
  11984    using Fn = void (*)(JSContext* cx, JSObject* o);
  11985    callvm.callNoResult<Fn, js::FuzzilliHashObject>();
  11986 
  11987    masm.jump(&done);
  11988  }
  11989 
  11990  masm.bind(&hashDouble);
  11991  masm.fuzzilliHashDouble(floatReg, scratch, scratch2);
  11992 
  11993  masm.bind(&updateHash);
  11994  masm.fuzzilliStoreHash(scratch, scratchJSContext, scratch2);
  11995 
  11996  masm.bind(&done);
  11997 
  11998  masm.moveValue(UndefinedValue(), output.valueReg());
  11999  return true;
  12000 }
  12001 #endif
  12002 
  12003 template <typename Fn, Fn fn>
  12004 void CacheIRCompiler::callVM(MacroAssembler& masm) {
  12005  VMFunctionId id = VMFunctionToId<Fn, fn>::id;
  12006  callVMInternal(masm, id);
  12007 }
  12008 
  12009 void CacheIRCompiler::callVMInternal(MacroAssembler& masm, VMFunctionId id) {
  12010  MOZ_ASSERT(enteredStubFrame_);
  12011  if (mode_ == Mode::Ion) {
  12012    TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(id);
  12013    const VMFunctionData& fun = GetVMFunction(id);
  12014    uint32_t frameSize = fun.explicitStackSlots() * sizeof(void*);
  12015    masm.Push(FrameDescriptor(FrameType::IonICCall));
  12016    masm.callJit(code);
  12017 
  12018    // Pop rest of the exit frame and the arguments left on the stack.
  12019    int framePop =
  12020        sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
  12021    masm.implicitPop(frameSize + framePop);
  12022 
  12023    masm.freeStack(asIon()->localTracingSlots() * sizeof(Value));
  12024 
  12025    // Pop IonICCallFrameLayout.
  12026    masm.Pop(FramePointer);
  12027    masm.freeStack(IonICCallFrameLayout::Size() - sizeof(void*));
  12028    return;
  12029  }
  12030 
  12031  MOZ_ASSERT(mode_ == Mode::Baseline);
  12032 
  12033  TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(id);
  12034 
  12035  EmitBaselineCallVM(code, masm);
  12036 }
  12037 
  12038 bool CacheIRCompiler::isBaseline() { return mode_ == Mode::Baseline; }
  12039 
  12040 bool CacheIRCompiler::isIon() { return mode_ == Mode::Ion; }
  12041 
  12042 BaselineCacheIRCompiler* CacheIRCompiler::asBaseline() {
  12043  MOZ_ASSERT(this->isBaseline());
  12044  return static_cast<BaselineCacheIRCompiler*>(this);
  12045 }
  12046 
  12047 IonCacheIRCompiler* CacheIRCompiler::asIon() {
  12048  MOZ_ASSERT(this->isIon());
  12049  return static_cast<IonCacheIRCompiler*>(this);
  12050 }
  12051 
  12052 #ifdef DEBUG
  12053 void CacheIRCompiler::assertFloatRegisterAvailable(FloatRegister reg) {
  12054  if (isBaseline()) {
  12055    // Baseline does not have any FloatRegisters live when calling an IC stub.
  12056    return;
  12057  }
  12058 
  12059  asIon()->assertFloatRegisterAvailable(reg);
  12060 }
  12061 #endif
  12062 
  12063 AutoCallVM::AutoCallVM(MacroAssembler& masm, CacheIRCompiler* compiler,
  12064                       CacheRegisterAllocator& allocator)
  12065    : masm_(masm), compiler_(compiler), allocator_(allocator) {
  12066  // Ion needs to `enterStubFrame` before it can callVM and it also needs to
  12067  // initialize AutoSaveLiveRegisters.
  12068  if (compiler_->mode_ == CacheIRCompiler::Mode::Ion) {
  12069    // Will need to use a downcast here as well, in order to pass the
  12070    // stub to AutoSaveLiveRegisters
  12071    save_.emplace(*compiler_->asIon());
  12072  }
  12073 
  12074  if (compiler->outputUnchecked_.isSome()) {
  12075    output_.emplace(*compiler);
  12076  }
  12077 
  12078  if (compiler_->mode_ == CacheIRCompiler::Mode::Baseline) {
  12079    stubFrame_.emplace(*compiler_->asBaseline());
  12080    if (output_.isSome()) {
  12081      scratch_.emplace(allocator_, masm_, output_.ref());
  12082    } else {
  12083      scratch_.emplace(allocator_, masm_);
  12084    }
  12085  }
  12086 }
  12087 
  12088 void AutoCallVM::prepare() {
  12089  allocator_.discardStack(masm_);
  12090  MOZ_ASSERT(compiler_ != nullptr);
  12091  if (compiler_->mode_ == CacheIRCompiler::Mode::Ion) {
  12092    compiler_->asIon()->enterStubFrame(masm_, *save_.ptr());
  12093    return;
  12094  }
  12095  MOZ_ASSERT(compiler_->mode_ == CacheIRCompiler::Mode::Baseline);
  12096  stubFrame_->enter(masm_, scratch_.ref());
  12097 }
  12098 
  12099 void AutoCallVM::storeResult(JSValueType returnType) {
  12100  MOZ_ASSERT(returnType != JSVAL_TYPE_DOUBLE);
  12101 
  12102  if (returnType == JSVAL_TYPE_UNKNOWN) {
  12103    masm_.storeCallResultValue(output_.ref());
  12104  } else {
  12105    if (output_->hasValue()) {
  12106      masm_.tagValue(returnType, ReturnReg, output_->valueReg());
  12107    } else {
  12108      masm_.storeCallPointerResult(output_->typedReg().gpr());
  12109    }
  12110  }
  12111 }
  12112 
  12113 void AutoCallVM::leaveBaselineStubFrame() {
  12114  if (compiler_->mode_ == CacheIRCompiler::Mode::Baseline) {
  12115    stubFrame_->leave(masm_);
  12116  }
  12117 }
  12118 
  12119 template <typename...>
  12120 struct VMFunctionReturnType;
  12121 
  12122 template <class R, typename... Args>
  12123 struct VMFunctionReturnType<R (*)(JSContext*, Args...)> {
  12124  using LastArgument = typename LastArg<Args...>::Type;
  12125 
  12126  // By convention VMFunctions returning `bool` use an output parameter.
  12127  using ReturnType =
  12128      std::conditional_t<std::is_same_v<R, bool>, LastArgument, R>;
  12129 };
  12130 
  12131 template <class>
  12132 struct ReturnTypeToJSValueType;
  12133 
  12134 // Definitions for the currently used return types.
  12135 template <>
  12136 struct ReturnTypeToJSValueType<MutableHandleValue> {
  12137  static constexpr JSValueType result = JSVAL_TYPE_UNKNOWN;
  12138 };
  12139 template <>
  12140 struct ReturnTypeToJSValueType<bool*> {
  12141  static constexpr JSValueType result = JSVAL_TYPE_BOOLEAN;
  12142 };
  12143 template <>
  12144 struct ReturnTypeToJSValueType<int32_t*> {
  12145  static constexpr JSValueType result = JSVAL_TYPE_INT32;
  12146 };
  12147 template <>
  12148 struct ReturnTypeToJSValueType<JSString*> {
  12149  static constexpr JSValueType result = JSVAL_TYPE_STRING;
  12150 };
  12151 template <>
  12152 struct ReturnTypeToJSValueType<JSLinearString*> {
  12153  static constexpr JSValueType result = JSVAL_TYPE_STRING;
  12154 };
  12155 template <>
  12156 struct ReturnTypeToJSValueType<JSAtom*> {
  12157  static constexpr JSValueType result = JSVAL_TYPE_STRING;
  12158 };
  12159 template <>
  12160 struct ReturnTypeToJSValueType<BigInt*> {
  12161  static constexpr JSValueType result = JSVAL_TYPE_BIGINT;
  12162 };
  12163 template <>
  12164 struct ReturnTypeToJSValueType<JSObject*> {
  12165  static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
  12166 };
  12167 template <>
  12168 struct ReturnTypeToJSValueType<PropertyIteratorObject*> {
  12169  static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
  12170 };
  12171 template <>
  12172 struct ReturnTypeToJSValueType<ArrayIteratorObject*> {
  12173  static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
  12174 };
  12175 template <>
  12176 struct ReturnTypeToJSValueType<StringIteratorObject*> {
  12177  static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
  12178 };
  12179 template <>
  12180 struct ReturnTypeToJSValueType<RegExpStringIteratorObject*> {
  12181  static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
  12182 };
  12183 template <>
  12184 struct ReturnTypeToJSValueType<PlainObject*> {
  12185  static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
  12186 };
  12187 template <>
  12188 struct ReturnTypeToJSValueType<ArrayObject*> {
  12189  static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
  12190 };
  12191 template <>
  12192 struct ReturnTypeToJSValueType<TypedArrayObject*> {
  12193  static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
  12194 };
  12195 template <>
  12196 struct ReturnTypeToJSValueType<MapObject*> {
  12197  static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
  12198 };
  12199 template <>
  12200 struct ReturnTypeToJSValueType<SetObject*> {
  12201  static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
  12202 };
  12203 template <>
  12204 struct ReturnTypeToJSValueType<BoundFunctionObject*> {
  12205  static constexpr JSValueType result = JSVAL_TYPE_OBJECT;
  12206 };
  12207 
  12208 template <typename Fn>
  12209 void AutoCallVM::storeResult() {
  12210  using ReturnType = typename VMFunctionReturnType<Fn>::ReturnType;
  12211  storeResult(ReturnTypeToJSValueType<ReturnType>::result);
  12212 }
  12213 
  12214 AutoScratchFloatRegister::AutoScratchFloatRegister(CacheIRCompiler* compiler,
  12215                                                   FailurePath* failure)
  12216    : compiler_(compiler), failure_(failure) {
  12217  // If we're compiling a Baseline IC, FloatReg0 is always available.
  12218  if (!compiler_->isBaseline()) {
  12219    MacroAssembler& masm = compiler_->masm;
  12220    masm.push(FloatReg0);
  12221    compiler->allocator.setHasAutoScratchFloatRegisterSpill(true);
  12222  }
  12223 
  12224  if (failure_) {
  12225    failure_->setHasAutoScratchFloatRegister();
  12226  }
  12227 }
  12228 
  12229 AutoScratchFloatRegister::~AutoScratchFloatRegister() {
  12230  if (failure_) {
  12231    failure_->clearHasAutoScratchFloatRegister();
  12232  }
  12233 
  12234  if (!compiler_->isBaseline()) {
  12235    MacroAssembler& masm = compiler_->masm;
  12236    masm.pop(FloatReg0);
  12237    compiler_->allocator.setHasAutoScratchFloatRegisterSpill(false);
  12238 
  12239    if (failure_) {
  12240      Label done;
  12241      masm.jump(&done);
  12242      masm.bind(&failurePopReg_);
  12243      masm.pop(FloatReg0);
  12244      masm.jump(failure_->label());
  12245      masm.bind(&done);
  12246    }
  12247  }
  12248 }
  12249 
  12250 Label* AutoScratchFloatRegister::failure() {
  12251  MOZ_ASSERT(failure_);
  12252 
  12253  if (!compiler_->isBaseline()) {
  12254    return &failurePopReg_;
  12255  }
  12256  return failure_->labelUnchecked();
  12257 }