tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

CodeGenerator-loong64.cpp (74962B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/loong64/CodeGenerator-loong64.h"
      8 
      9 #include "mozilla/MathAlgorithms.h"
     10 
     11 #include "jsnum.h"
     12 
     13 #include "jit/CodeGenerator.h"
     14 #include "jit/InlineScriptTree.h"
     15 #include "jit/JitRuntime.h"
     16 #include "jit/MIR-wasm.h"
     17 #include "jit/MIR.h"
     18 #include "jit/MIRGraph.h"
     19 #include "vm/JSContext.h"
     20 #include "vm/Realm.h"
     21 #include "vm/Shape.h"
     22 
     23 #include "jit/shared/CodeGenerator-shared-inl.h"
     24 #include "vm/JSScript-inl.h"
     25 
     26 using namespace js;
     27 using namespace js::jit;
     28 
     29 using JS::GenericNaN;
     30 using mozilla::NegativeInfinity;
     31 
     32 // shared
     33 CodeGeneratorLOONG64::CodeGeneratorLOONG64(
     34    MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm,
     35    const wasm::CodeMetadata* wasmCodeMeta)
     36    : CodeGeneratorShared(gen, graph, masm, wasmCodeMeta) {}
     37 
     38 Operand CodeGeneratorLOONG64::ToOperand(const LAllocation& a) {
     39  if (a.isGeneralReg()) {
     40    return Operand(a.toGeneralReg()->reg());
     41  }
     42  if (a.isFloatReg()) {
     43    return Operand(a.toFloatReg()->reg());
     44  }
     45  return Operand(ToAddress(a));
     46 }
     47 
     48 Operand CodeGeneratorLOONG64::ToOperand(const LAllocation* a) {
     49  return ToOperand(*a);
     50 }
     51 
     52 Operand CodeGeneratorLOONG64::ToOperand(const LDefinition* def) {
     53  return ToOperand(def->output());
     54 }
     55 
     56 void CodeGeneratorLOONG64::branchToBlock(Assembler::FloatFormat fmt,
     57                                         FloatRegister lhs, FloatRegister rhs,
     58                                         MBasicBlock* mir,
     59                                         Assembler::DoubleCondition cond) {
     60  // Skip past trivial blocks.
     61  Label* label = skipTrivialBlocks(mir)->lir()->label();
     62  if (fmt == Assembler::DoubleFloat) {
     63    masm.branchDouble(cond, lhs, rhs, label);
     64  } else {
     65    masm.branchFloat(cond, lhs, rhs, label);
     66  }
     67 }
     68 
     69 MoveOperand CodeGeneratorLOONG64::toMoveOperand(LAllocation a) const {
     70  if (a.isGeneralReg()) {
     71    return MoveOperand(ToRegister(a));
     72  }
     73  if (a.isFloatReg()) {
     74    return MoveOperand(ToFloatRegister(a));
     75  }
     76  MoveOperand::Kind kind = a.isStackArea() ? MoveOperand::Kind::EffectiveAddress
     77                                           : MoveOperand::Kind::Memory;
     78  Address address = ToAddress(a);
     79  MOZ_ASSERT((address.offset & 3) == 0);
     80 
     81  return MoveOperand(address, kind);
     82 }
     83 
     84 void CodeGeneratorLOONG64::bailoutFrom(Label* label, LSnapshot* snapshot) {
     85  MOZ_ASSERT_IF(!masm.oom(), label->used());
     86  MOZ_ASSERT_IF(!masm.oom(), !label->bound());
     87 
     88  encode(snapshot);
     89 
     90  InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
     91  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
     92    // Push snapshotOffset and make sure stack is aligned.
     93    masm.subPtr(Imm32(sizeof(Value)), StackPointer);
     94    masm.storePtr(ImmWord(snapshot->snapshotOffset()),
     95                  Address(StackPointer, 0));
     96 
     97    masm.jump(&deoptLabel_);
     98  });
     99  addOutOfLineCode(ool,
    100                   new (alloc()) BytecodeSite(tree, tree->script()->code()));
    101 
    102  masm.retarget(label, ool->entry());
    103 }
    104 
    105 void CodeGeneratorLOONG64::bailout(LSnapshot* snapshot) {
    106  Label label;
    107  masm.jump(&label);
    108  bailoutFrom(&label, snapshot);
    109 }
    110 
    111 bool CodeGeneratorLOONG64::generateOutOfLineCode() {
    112  if (!CodeGeneratorShared::generateOutOfLineCode()) {
    113    return false;
    114  }
    115 
    116  if (deoptLabel_.used()) {
    117    // All non-table-based bailouts will go here.
    118    masm.bind(&deoptLabel_);
    119 
    120    // Push the frame size, so the handler can recover the IonScript.
    121    // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk
    122    // We have to use 'ra' because generateBailoutTable will implicitly do
    123    // the same.
    124    masm.move32(Imm32(frameSize()), ra);
    125 
    126    TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
    127    masm.jump(handler);
    128  }
    129 
    130  return !masm.oom();
    131 }
    132 
    133 class js::jit::OutOfLineTableSwitch
    134    : public OutOfLineCodeBase<CodeGeneratorLOONG64> {
    135  MTableSwitch* mir_;
    136  CodeLabel jumpLabel_;
    137 
    138  void accept(CodeGeneratorLOONG64* codegen) {
    139    codegen->visitOutOfLineTableSwitch(this);
    140  }
    141 
    142 public:
    143  explicit OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
    144 
    145  MTableSwitch* mir() const { return mir_; }
    146 
    147  CodeLabel* jumpLabel() { return &jumpLabel_; }
    148 };
    149 
    150 void CodeGeneratorLOONG64::emitTableSwitchDispatch(MTableSwitch* mir,
    151                                                   Register index,
    152                                                   Register base) {
    153  Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
    154 
    155  // Lower value with low value
    156  if (mir->low() != 0) {
    157    masm.subPtr(Imm32(mir->low()), index);
    158  }
    159 
    160  // Jump to default case if input is out of range
    161  int32_t cases = mir->numCases();
    162  masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase);
    163 
    164  // To fill in the CodeLabels for the case entries, we need to first
    165  // generate the case entries (we don't yet know their offsets in the
    166  // instruction stream).
    167  OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
    168  addOutOfLineCode(ool, mir);
    169 
    170  // Compute the position where a pointer to the right case stands.
    171  masm.ma_li(base, ool->jumpLabel());
    172 
    173  BaseIndex pointer(base, index, ScalePointer);
    174 
    175  // Jump to the right case
    176  masm.branchToComputedAddress(pointer);
    177 }
    178 
    179 template <typename T>
    180 void CodeGeneratorLOONG64::emitWasmLoad(T* lir) {
    181  const MWasmLoad* mir = lir->mir();
    182  UseScratchRegisterScope temps(masm);
    183  Register scratch = temps.Acquire();
    184 
    185  Register memoryBase = ToRegister(lir->memoryBase());
    186  Register ptr = ToRegister(lir->ptr());
    187  Register ptrScratch = ToTempRegisterOrInvalid(lir->temp0());
    188 
    189  if (mir->base()->type() == MIRType::Int32) {
    190    masm.move32To64ZeroExtend(ptr, Register64(scratch));
    191    ptr = scratch;
    192    ptrScratch = ptrScratch != InvalidReg ? scratch : InvalidReg;
    193  }
    194 
    195  // ptr is a GPR and is either a 32-bit value zero-extended to 64-bit, or a
    196  // true 64-bit value.
    197  masm.wasmLoad(mir->access(), memoryBase, ptr, ptrScratch,
    198                ToAnyRegister(lir->output()));
    199 }
    200 
    201 template <typename T>
    202 void CodeGeneratorLOONG64::emitWasmStore(T* lir) {
    203  const MWasmStore* mir = lir->mir();
    204  UseScratchRegisterScope temps(masm);
    205  Register scratch = temps.Acquire();
    206 
    207  Register memoryBase = ToRegister(lir->memoryBase());
    208  Register ptr = ToRegister(lir->ptr());
    209  Register ptrScratch = ToTempRegisterOrInvalid(lir->temp0());
    210 
    211  if (mir->base()->type() == MIRType::Int32) {
    212    masm.move32To64ZeroExtend(ptr, Register64(scratch));
    213    ptr = scratch;
    214    ptrScratch = ptrScratch != InvalidReg ? scratch : InvalidReg;
    215  }
    216 
    217  // ptr is a GPR and is either a 32-bit value zero-extended to 64-bit, or a
    218  // true 64-bit value.
    219  masm.wasmStore(mir->access(), ToAnyRegister(lir->value()), memoryBase, ptr,
    220                 ptrScratch);
    221 }
    222 
    223 void CodeGeneratorLOONG64::generateInvalidateEpilogue() {
    224  // Ensure that there is enough space in the buffer for the OsiPoint
    225  // patching to occur. Otherwise, we could overwrite the invalidation
    226  // epilogue
    227  for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
    228    masm.nop();
    229  }
    230 
    231  masm.bind(&invalidate_);
    232 
    233  // Push the return address of the point that we bailed out at to the stack
    234  masm.Push(ra);
    235 
    236  // Push the Ion script onto the stack (when we determine what that
    237  // pointer is).
    238  invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
    239 
    240  // Jump to the invalidator which will replace the current frame.
    241  TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
    242 
    243  masm.jump(thunk);
    244 }
    245 
    246 void CodeGeneratorLOONG64::visitOutOfLineTableSwitch(
    247    OutOfLineTableSwitch* ool) {
    248  MTableSwitch* mir = ool->mir();
    249 
    250  masm.haltingAlign(sizeof(void*));
    251  masm.bind(ool->jumpLabel());
    252  masm.addCodeLabel(*ool->jumpLabel());
    253 
    254  for (size_t i = 0; i < mir->numCases(); i++) {
    255    LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
    256    Label* caseheader = caseblock->label();
    257    uint32_t caseoffset = caseheader->offset();
    258 
    259    // The entries of the jump table need to be absolute addresses and thus
    260    // must be patched after codegen is finished.
    261    CodeLabel cl;
    262    masm.writeCodePointer(&cl);
    263    cl.target()->bind(caseoffset);
    264    masm.addCodeLabel(cl);
    265  }
    266 }
    267 
    268 void CodeGeneratorLOONG64::visitOutOfLineWasmTruncateCheck(
    269    OutOfLineWasmTruncateCheck* ool) {
    270  if (ool->toType() == MIRType::Int32) {
    271    masm.outOfLineWasmTruncateToInt32Check(ool->input(), ool->output(),
    272                                           ool->fromType(), ool->flags(),
    273                                           ool->rejoin(), ool->trapSiteDesc());
    274  } else {
    275    MOZ_ASSERT(ool->toType() == MIRType::Int64);
    276    masm.outOfLineWasmTruncateToInt64Check(ool->input(), ool->output64(),
    277                                           ool->fromType(), ool->flags(),
    278                                           ool->rejoin(), ool->trapSiteDesc());
    279  }
    280 }
    281 
    282 void CodeGenerator::visitBox(LBox* box) {
    283  const LAllocation* in = box->payload();
    284  ValueOperand result = ToOutValue(box);
    285 
    286  masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
    287 }
    288 
    289 void CodeGenerator::visitUnbox(LUnbox* unbox) {
    290  MUnbox* mir = unbox->mir();
    291 
    292  Register result = ToRegister(unbox->output());
    293 
    294  if (mir->fallible()) {
    295    ValueOperand value = ToValue(unbox->input());
    296    Label bail;
    297    switch (mir->type()) {
    298      case MIRType::Int32:
    299        masm.fallibleUnboxInt32(value, result, &bail);
    300        break;
    301      case MIRType::Boolean:
    302        masm.fallibleUnboxBoolean(value, result, &bail);
    303        break;
    304      case MIRType::Object:
    305        masm.fallibleUnboxObject(value, result, &bail);
    306        break;
    307      case MIRType::String:
    308        masm.fallibleUnboxString(value, result, &bail);
    309        break;
    310      case MIRType::Symbol:
    311        masm.fallibleUnboxSymbol(value, result, &bail);
    312        break;
    313      case MIRType::BigInt:
    314        masm.fallibleUnboxBigInt(value, result, &bail);
    315        break;
    316      default:
    317        MOZ_CRASH("Given MIRType cannot be unboxed.");
    318    }
    319    bailoutFrom(&bail, unbox->snapshot());
    320    return;
    321  }
    322 
    323  LAllocation* input = unbox->getOperand(LUnbox::Input);
    324  if (input->isGeneralReg()) {
    325    Register inputReg = ToRegister(input);
    326    switch (mir->type()) {
    327      case MIRType::Int32:
    328        masm.unboxInt32(inputReg, result);
    329        break;
    330      case MIRType::Boolean:
    331        masm.unboxBoolean(inputReg, result);
    332        break;
    333      case MIRType::Object:
    334        masm.unboxObject(inputReg, result);
    335        break;
    336      case MIRType::String:
    337        masm.unboxString(inputReg, result);
    338        break;
    339      case MIRType::Symbol:
    340        masm.unboxSymbol(inputReg, result);
    341        break;
    342      case MIRType::BigInt:
    343        masm.unboxBigInt(inputReg, result);
    344        break;
    345      default:
    346        MOZ_CRASH("Given MIRType cannot be unboxed.");
    347    }
    348    return;
    349  }
    350 
    351  Address inputAddr = ToAddress(input);
    352  switch (mir->type()) {
    353    case MIRType::Int32:
    354      masm.unboxInt32(inputAddr, result);
    355      break;
    356    case MIRType::Boolean:
    357      masm.unboxBoolean(inputAddr, result);
    358      break;
    359    case MIRType::Object:
    360      masm.unboxObject(inputAddr, result);
    361      break;
    362    case MIRType::String:
    363      masm.unboxString(inputAddr, result);
    364      break;
    365    case MIRType::Symbol:
    366      masm.unboxSymbol(inputAddr, result);
    367      break;
    368    case MIRType::BigInt:
    369      masm.unboxBigInt(inputAddr, result);
    370      break;
    371    default:
    372      MOZ_CRASH("Given MIRType cannot be unboxed.");
    373  }
    374 }
    375 
    376 void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
    377  Register lhs = ToRegister(lir->lhs());
    378  Register rhs = ToRegister(lir->rhs());
    379  Register output = ToRegister(lir->output());
    380 
    381  Label done;
    382 
    383  // Handle divide by zero.
    384  if (lir->canBeDivideByZero()) {
    385    Label nonZero;
    386    masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
    387    masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->trapSiteDesc());
    388    masm.bind(&nonZero);
    389  }
    390 
    391  // Handle an integer overflow exception from INT64_MIN / -1.
    392  if (lir->canBeNegativeOverflow()) {
    393    Label notOverflow;
    394    masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notOverflow);
    395    masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notOverflow);
    396    if (lir->mir()->isMod()) {
    397      masm.as_xor(output, output, output);
    398    } else {
    399      masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->trapSiteDesc());
    400    }
    401    masm.jump(&done);
    402    masm.bind(&notOverflow);
    403  }
    404 
    405  if (lir->mir()->isMod()) {
    406    masm.as_mod_d(output, lhs, rhs);
    407  } else {
    408    masm.as_div_d(output, lhs, rhs);
    409  }
    410 
    411  masm.bind(&done);
    412 }
    413 
    414 void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
    415  Register lhs = ToRegister(lir->lhs());
    416  Register rhs = ToRegister(lir->rhs());
    417  Register output = ToRegister(lir->output());
    418 
    419  Label done;
    420 
    421  // Prevent divide by zero.
    422  if (lir->canBeDivideByZero()) {
    423    Label nonZero;
    424    masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
    425    masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->trapSiteDesc());
    426    masm.bind(&nonZero);
    427  }
    428 
    429  if (lir->mir()->isMod()) {
    430    masm.as_mod_du(output, lhs, rhs);
    431  } else {
    432    masm.as_div_du(output, lhs, rhs);
    433  }
    434 
    435  masm.bind(&done);
    436 }
    437 
    438 void CodeGeneratorLOONG64::emitBigIntPtrDiv(LBigIntPtrDiv* ins,
    439                                            Register dividend, Register divisor,
    440                                            Register output) {
    441  // Callers handle division by zero and integer overflow.
    442  masm.as_div_d(/* result= */ output, dividend, divisor);
    443 }
    444 
    445 void CodeGeneratorLOONG64::emitBigIntPtrMod(LBigIntPtrMod* ins,
    446                                            Register dividend, Register divisor,
    447                                            Register output) {
    448  // Callers handle division by zero and integer overflow.
    449  masm.as_mod_d(/* result= */ output, dividend, divisor);
    450 }
    451 
    452 void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) {
    453  const MWasmLoad* mir = lir->mir();
    454 
    455  Register memoryBase = ToRegister(lir->memoryBase());
    456  Register ptrScratch = ToTempRegisterOrInvalid(lir->temp0());
    457 
    458  Register ptrReg = ToRegister(lir->ptr());
    459  if (mir->base()->type() == MIRType::Int32) {
    460    // See comment in visitWasmLoad re the type of 'base'.
    461    masm.move32ZeroExtendToPtr(ptrReg, ptrReg);
    462  }
    463 
    464  masm.wasmLoadI64(mir->access(), memoryBase, ptrReg, ptrScratch,
    465                   ToOutRegister64(lir));
    466 }
    467 
    468 void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
    469  const MWasmStore* mir = lir->mir();
    470 
    471  Register memoryBase = ToRegister(lir->memoryBase());
    472  Register ptrScratch = ToTempRegisterOrInvalid(lir->temp0());
    473 
    474  Register ptrReg = ToRegister(lir->ptr());
    475  if (mir->base()->type() == MIRType::Int32) {
    476    // See comment in visitWasmLoad re the type of 'base'.
    477    masm.move32ZeroExtendToPtr(ptrReg, ptrReg);
    478  }
    479 
    480  masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), memoryBase,
    481                    ptrReg, ptrScratch);
    482 }
    483 
    484 void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
    485  MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
    486 
    487  Register cond = ToRegister(lir->condExpr());
    488  LInt64Allocation falseExpr = lir->falseExpr();
    489 
    490  Register64 out = ToOutRegister64(lir);
    491  MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
    492             "true expr is reused for input");
    493 
    494  if (falseExpr.value().isGeneralReg()) {
    495    masm.moveIfZero(out.reg, ToRegister(falseExpr.value()), cond);
    496  } else {
    497    Label done;
    498    masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
    499    masm.loadPtr(ToAddress(falseExpr.value()), out.reg);
    500    masm.bind(&done);
    501  }
    502 }
    503 
    504 void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
    505  const LAllocation* input = lir->input();
    506  Register output = ToRegister(lir->output());
    507 
    508  if (lir->mir()->isUnsigned()) {
    509    masm.as_bstrpick_d(output, ToRegister(input), 31, 0);
    510  } else {
    511    masm.as_slli_w(output, ToRegister(input), 0);
    512  }
    513 }
    514 
    515 void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
    516  LInt64Allocation input = lir->input();
    517  Register output = ToRegister(lir->output());
    518 
    519  if (lir->mir()->bottomHalf()) {
    520    if (input.value().isMemory()) {
    521      masm.load32(ToAddress(input), output);
    522    } else {
    523      masm.move64To32(ToRegister64(input), output);
    524    }
    525  } else {
    526    MOZ_CRASH("Not implemented.");
    527  }
    528 }
    529 
    530 void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
    531  Register64 input = ToRegister64(lir->input());
    532  Register64 output = ToOutRegister64(lir);
    533  switch (lir->mir()->mode()) {
    534    case MSignExtendInt64::Byte:
    535      masm.move32To64SignExtend(input.reg, output);
    536      masm.move8SignExtend(output.reg, output.reg);
    537      break;
    538    case MSignExtendInt64::Half:
    539      masm.move32To64SignExtend(input.reg, output);
    540      masm.move16SignExtend(output.reg, output.reg);
    541      break;
    542    case MSignExtendInt64::Word:
    543      masm.move32To64SignExtend(input.reg, output);
    544      break;
    545  }
    546 }
    547 
    548 void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index* lir) {
    549  Register input = ToRegister(lir->input());
    550  Register output = ToRegister(lir->output());
    551  MOZ_ASSERT(input == output);
    552  masm.move32To64ZeroExtend(input, Register64(output));
    553 }
    554 
    555 void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
    556  Register input = ToRegister(lir->input());
    557  Register output = ToRegister(lir->output());
    558  MOZ_ASSERT(input == output);
    559  masm.move64To32(Register64(input), output);
    560 }
    561 
    562 void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
    563  FloatRegister input = ToFloatRegister(lir->input());
    564  Register64 output = ToOutRegister64(lir);
    565 
    566  MWasmTruncateToInt64* mir = lir->mir();
    567  MIRType fromType = mir->input()->type();
    568 
    569  MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
    570 
    571  auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
    572  addOutOfLineCode(ool, mir);
    573 
    574  Label* oolEntry = ool->entry();
    575  Label* oolRejoin = ool->rejoin();
    576  bool isSaturating = mir->isSaturating();
    577 
    578  if (fromType == MIRType::Double) {
    579    if (mir->isUnsigned()) {
    580      masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, oolEntry,
    581                                      oolRejoin, InvalidFloatReg);
    582    } else {
    583      masm.wasmTruncateDoubleToInt64(input, output, isSaturating, oolEntry,
    584                                     oolRejoin, InvalidFloatReg);
    585    }
    586  } else {
    587    if (mir->isUnsigned()) {
    588      masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating, oolEntry,
    589                                       oolRejoin, InvalidFloatReg);
    590    } else {
    591      masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, oolEntry,
    592                                      oolRejoin, InvalidFloatReg);
    593    }
    594  }
    595 }
    596 
    597 void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
    598  Register64 input = ToRegister64(lir->input());
    599  FloatRegister output = ToFloatRegister(lir->output());
    600 
    601  MIRType outputType = lir->mir()->type();
    602  MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
    603 
    604  if (outputType == MIRType::Double) {
    605    if (lir->mir()->isUnsigned()) {
    606      masm.convertUInt64ToDouble(input, output, Register::Invalid());
    607    } else {
    608      masm.convertInt64ToDouble(input, output);
    609    }
    610  } else {
    611    if (lir->mir()->isUnsigned()) {
    612      masm.convertUInt64ToFloat32(input, output, Register::Invalid());
    613    } else {
    614      masm.convertInt64ToFloat32(input, output);
    615    }
    616  }
    617 }
    618 
    619 void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
    620  FloatRegister first = ToFloatRegister(ins->first());
    621  FloatRegister second = ToFloatRegister(ins->second());
    622 
    623  MOZ_ASSERT(first == ToFloatRegister(ins->output()));
    624 
    625  if (ins->mir()->isMax()) {
    626    masm.maxDouble(second, first, true);
    627  } else {
    628    masm.minDouble(second, first, true);
    629  }
    630 }
    631 
    632 void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
    633  FloatRegister first = ToFloatRegister(ins->first());
    634  FloatRegister second = ToFloatRegister(ins->second());
    635 
    636  MOZ_ASSERT(first == ToFloatRegister(ins->output()));
    637 
    638  if (ins->mir()->isMax()) {
    639    masm.maxFloat32(second, first, true);
    640  } else {
    641    masm.minFloat32(second, first, true);
    642  }
    643 }
    644 
    645 void CodeGenerator::visitAddI(LAddI* ins) {
    646  const LAllocation* lhs = ins->lhs();
    647  const LAllocation* rhs = ins->rhs();
    648  const LDefinition* dest = ins->output();
    649 
    650  MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
    651 
    652  // If there is no snapshot, we don't need to check for overflow
    653  if (!ins->snapshot()) {
    654    if (rhs->isConstant()) {
    655      masm.ma_add_w(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
    656    } else {
    657      masm.as_add_w(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
    658    }
    659    return;
    660  }
    661 
    662  Label overflow;
    663  if (rhs->isConstant()) {
    664    masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
    665                              Imm32(ToInt32(rhs)), &overflow);
    666  } else {
    667    masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
    668                              ToRegister(rhs), &overflow);
    669  }
    670 
    671  bailoutFrom(&overflow, ins->snapshot());
    672 }
    673 
    674 void CodeGenerator::visitAddIntPtr(LAddIntPtr* ins) {
    675  Register lhs = ToRegister(ins->lhs());
    676  const LAllocation* rhs = ins->rhs();
    677  Register dest = ToRegister(ins->output());
    678 
    679  if (rhs->isConstant()) {
    680    masm.ma_add_d(dest, lhs, ImmWord(ToIntPtr(rhs)));
    681  } else {
    682    masm.as_add_d(dest, lhs, ToRegister(rhs));
    683  }
    684 }
    685 
    686 void CodeGenerator::visitAddI64(LAddI64* lir) {
    687  Register lhs = ToRegister64(lir->lhs()).reg;
    688  LInt64Allocation rhs = lir->rhs();
    689  Register dest = ToOutRegister64(lir).reg;
    690 
    691  if (IsConstant(rhs)) {
    692    masm.ma_add_d(dest, lhs, ImmWord(ToInt64(rhs)));
    693  } else {
    694    masm.as_add_d(dest, lhs, ToRegister64(rhs).reg);
    695  }
    696 }
    697 
    698 void CodeGenerator::visitSubI(LSubI* ins) {
    699  const LAllocation* lhs = ins->lhs();
    700  const LAllocation* rhs = ins->rhs();
    701  const LDefinition* dest = ins->output();
    702 
    703  MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
    704 
    705  // If there is no snapshot, we don't need to check for overflow
    706 
    707  if (!ins->snapshot()) {
    708    if (rhs->isConstant()) {
    709      masm.ma_sub_w(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
    710    } else {
    711      masm.as_sub_w(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
    712    }
    713    return;
    714  }
    715 
    716  Label overflow;
    717  if (rhs->isConstant()) {
    718    masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
    719                              Imm32(ToInt32(rhs)), &overflow);
    720  } else {
    721    masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
    722                              ToRegister(rhs), &overflow);
    723  }
    724 
    725  bailoutFrom(&overflow, ins->snapshot());
    726 }
    727 
    728 void CodeGenerator::visitSubIntPtr(LSubIntPtr* ins) {
    729  Register lhs = ToRegister(ins->lhs());
    730  const LAllocation* rhs = ins->rhs();
    731  Register dest = ToRegister(ins->output());
    732 
    733  if (rhs->isConstant()) {
    734    masm.ma_sub_d(dest, lhs, ImmWord(ToIntPtr(rhs)));
    735  } else {
    736    masm.as_sub_d(dest, lhs, ToRegister(rhs));
    737  }
    738 }
    739 
    740 void CodeGenerator::visitSubI64(LSubI64* lir) {
    741  Register lhs = ToRegister64(lir->lhs()).reg;
    742  LInt64Allocation rhs = lir->rhs();
    743  Register dest = ToOutRegister64(lir).reg;
    744 
    745  if (IsConstant(rhs)) {
    746    masm.ma_sub_d(dest, lhs, ImmWord(ToInt64(rhs)));
    747  } else {
    748    masm.as_sub_d(dest, lhs, ToRegister64(rhs).reg);
    749  }
    750 }
    751 
    752 void CodeGenerator::visitMulI(LMulI* ins) {
    753  Register lhs = ToRegister(ins->lhs());
    754  const LAllocation* rhs = ins->rhs();
    755  Register dest = ToRegister(ins->output());
    756  MMul* mul = ins->mir();
    757 
    758  MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
    759                !mul->canBeNegativeZero() && !mul->canOverflow());
    760 
    761  if (rhs->isConstant()) {
    762    int32_t constant = ToInt32(rhs);
    763 
    764    // Bailout on -0.0
    765    if (mul->canBeNegativeZero() && constant <= 0) {
    766      Assembler::Condition cond =
    767          (constant == 0) ? Assembler::LessThan : Assembler::Equal;
    768      bailoutCmp32(cond, lhs, Imm32(0), ins->snapshot());
    769    }
    770 
    771    switch (constant) {
    772      case -1:
    773        if (mul->canOverflow()) {
    774          bailoutCmp32(Assembler::Equal, lhs, Imm32(INT32_MIN),
    775                       ins->snapshot());
    776        }
    777 
    778        masm.as_sub_w(dest, zero, lhs);
    779        return;
    780      case 0:
    781        masm.move32(zero, dest);
    782        return;
    783      case 1:
    784        masm.move32(lhs, dest);
    785        return;
    786      case 2:
    787        if (mul->canOverflow()) {
    788          Label mulTwoOverflow;
    789          masm.ma_add32TestOverflow(dest, lhs, lhs, &mulTwoOverflow);
    790 
    791          bailoutFrom(&mulTwoOverflow, ins->snapshot());
    792        } else {
    793          masm.as_add_w(dest, lhs, lhs);
    794        }
    795        return;
    796    }
    797 
    798    if (constant > 0) {
    799      uint32_t shift = mozilla::FloorLog2(constant);
    800 
    801      if (!mul->canOverflow()) {
    802        // If it cannot overflow, we can do lots of optimizations.
    803 
    804        // See if the constant has one bit set, meaning it can be
    805        // encoded as a bitshift.
    806        if ((1 << shift) == constant) {
    807          masm.as_slli_w(dest, lhs, shift);
    808          return;
    809        }
    810 
    811        // If the constant cannot be encoded as (1<<C1), see if it can
    812        // be encoded as (1<<C1) | (1<<C2), which can be computed
    813        // using an add and a shift.
    814        uint32_t rest = constant - (1 << shift);
    815        uint32_t shift_rest = mozilla::FloorLog2(rest);
    816        if ((1u << shift_rest) == rest) {
    817          UseScratchRegisterScope temps(masm);
    818          Register scratch = temps.Acquire();
    819 
    820          masm.as_slli_w(scratch, lhs, (shift - shift_rest));
    821          masm.as_add_w(dest, scratch, lhs);
    822          if (shift_rest != 0) {
    823            masm.as_slli_w(dest, dest, shift_rest);
    824          }
    825          return;
    826        }
    827      } else {
    828        // To stay on the safe side, only optimize things that are a power of 2.
    829        if ((1 << shift) == constant) {
    830          UseScratchRegisterScope temps(masm);
    831          Register scratch = temps.Acquire();
    832 
    833          // dest = lhs * pow(2, shift)
    834          masm.as_slli_d(dest, lhs, shift);
    835 
    836          // At runtime, check (dest >> shift == intptr_t(dest) >> shift), if
    837          // this does not hold, some bits were lost due to overflow, and the
    838          // computation should be resumed as a double.
    839          masm.as_slli_w(scratch, dest, 0);
    840          bailoutCmp32(Assembler::NotEqual, dest, scratch, ins->snapshot());
    841          return;
    842        }
    843      }
    844    }
    845 
    846    if (mul->canOverflow()) {
    847      Label mulConstOverflow;
    848      masm.ma_mul32TestOverflow(dest, lhs, Imm32(constant), &mulConstOverflow);
    849 
    850      bailoutFrom(&mulConstOverflow, ins->snapshot());
    851    } else {
    852      masm.ma_mul(dest, lhs, Imm32(constant));
    853    }
    854  } else {
    855    if (mul->canOverflow()) {
    856      Label multRegOverflow;
    857      masm.ma_mul32TestOverflow(dest, lhs, ToRegister(rhs), &multRegOverflow);
    858 
    859      bailoutFrom(&multRegOverflow, ins->snapshot());
    860    } else {
    861      masm.as_mul_w(dest, lhs, ToRegister(rhs));
    862    }
    863 
    864    if (mul->canBeNegativeZero()) {
    865      Label done;
    866      masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump);
    867 
    868      // Result is -0 if lhs or rhs is negative.
    869      // In that case result must be double value so bailout
    870      UseScratchRegisterScope temps(masm);
    871      Register scratch = temps.Acquire();
    872      masm.as_or(scratch, lhs, ToRegister(rhs));
    873      bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot());
    874 
    875      masm.bind(&done);
    876    }
    877  }
    878 }
    879 
    880 void CodeGeneratorLOONG64::emitMulI64(Register lhs, int64_t rhs,
    881                                      Register dest) {
    882  switch (rhs) {
    883    case -1:
    884      masm.as_sub_d(dest, zero, lhs);
    885      return;
    886    case 0:
    887      masm.movePtr(zero, dest);
    888      return;
    889    case 1:
    890      if (dest != lhs) {
    891        masm.movePtr(lhs, dest);
    892      }
    893      return;
    894    case 2:
    895      masm.as_add_d(dest, lhs, lhs);
    896      return;
    897  }
    898 
    899  if (rhs > 0) {
    900    if (mozilla::IsPowerOfTwo(static_cast<uint64_t>(rhs + 1))) {
    901      int32_t shift = mozilla::FloorLog2(rhs + 1);
    902 
    903      UseScratchRegisterScope temps(masm);
    904      Register savedLhs = lhs;
    905      if (dest == lhs) {
    906        savedLhs = temps.Acquire();
    907        masm.movePtr(lhs, savedLhs);
    908      }
    909      masm.as_slli_d(dest, lhs, shift);
    910      masm.as_sub_d(dest, dest, savedLhs);
    911      return;
    912    }
    913 
    914    if (mozilla::IsPowerOfTwo(static_cast<uint64_t>(rhs - 1))) {
    915      int32_t shift = mozilla::FloorLog2(rhs - 1);
    916      if (shift < 5) {
    917        masm.as_alsl_d(dest, lhs, lhs, shift - 1);
    918      } else {
    919        UseScratchRegisterScope temps(masm);
    920        Register savedLhs = lhs;
    921        if (dest == lhs) {
    922          savedLhs = temps.Acquire();
    923          masm.movePtr(lhs, savedLhs);
    924        }
    925        masm.as_slli_d(dest, lhs, shift);
    926        masm.as_add_d(dest, dest, savedLhs);
    927      }
    928      return;
    929    }
    930 
    931    // Use shift if constant is power of 2.
    932    int32_t shift = mozilla::FloorLog2(rhs);
    933    if (int64_t(1) << shift == rhs) {
    934      masm.as_slli_d(dest, lhs, shift);
    935      return;
    936    }
    937  }
    938 
    939  masm.ma_mul_d(dest, lhs, ImmWord(rhs));
    940 }
    941 
    942 void CodeGenerator::visitMulIntPtr(LMulIntPtr* ins) {
    943  Register lhs = ToRegister(ins->lhs());
    944  const LAllocation* rhs = ins->rhs();
    945  Register dest = ToRegister(ins->output());
    946 
    947  if (rhs->isConstant()) {
    948    emitMulI64(lhs, ToIntPtr(rhs), dest);
    949  } else {
    950    masm.as_mul_d(dest, lhs, ToRegister(rhs));
    951  }
    952 }
    953 
    954 void CodeGenerator::visitMulI64(LMulI64* lir) {
    955  Register lhs = ToRegister64(lir->lhs()).reg;
    956  LInt64Allocation rhs = lir->rhs();
    957  Register dest = ToOutRegister64(lir).reg;
    958 
    959  if (IsConstant(rhs)) {
    960    emitMulI64(lhs, ToInt64(rhs), dest);
    961  } else {
    962    masm.as_mul_d(dest, lhs, ToRegister64(rhs).reg);
    963  }
    964 }
    965 
    966 void CodeGenerator::visitDivI(LDivI* ins) {
    967  Register lhs = ToRegister(ins->lhs());
    968  Register rhs = ToRegister(ins->rhs());
    969  Register dest = ToRegister(ins->output());
    970  Register temp = ToRegister(ins->temp0());
    971  MDiv* mir = ins->mir();
    972 
    973  Label done;
    974 
    975  // Handle divide by zero.
    976  if (mir->canBeDivideByZero()) {
    977    if (mir->trapOnError()) {
    978      Label nonZero;
    979      masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
    980      masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->trapSiteDesc());
    981      masm.bind(&nonZero);
    982    } else if (mir->canTruncateInfinities()) {
    983      // Truncated division by zero is zero (Infinity|0 == 0)
    984      Label notzero;
    985      masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
    986      masm.move32(Imm32(0), dest);
    987      masm.ma_b(&done, ShortJump);
    988      masm.bind(&notzero);
    989    } else {
    990      MOZ_ASSERT(mir->fallible());
    991      bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot());
    992    }
    993  }
    994 
    995  // Handle an integer overflow exception from -2147483648 / -1.
    996  if (mir->canBeNegativeOverflow()) {
    997    Label notMinInt;
    998    masm.move32(Imm32(INT32_MIN), temp);
    999    masm.ma_b(lhs, temp, &notMinInt, Assembler::NotEqual, ShortJump);
   1000 
   1001    masm.move32(Imm32(-1), temp);
   1002    if (mir->trapOnError()) {
   1003      Label ok;
   1004      masm.ma_b(rhs, temp, &ok, Assembler::NotEqual);
   1005      masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->trapSiteDesc());
   1006      masm.bind(&ok);
   1007    } else if (mir->canTruncateOverflow()) {
   1008      // (-INT32_MIN)|0 == INT32_MIN
   1009      Label skip;
   1010      masm.ma_b(rhs, temp, &skip, Assembler::NotEqual, ShortJump);
   1011      masm.move32(Imm32(INT32_MIN), dest);
   1012      masm.ma_b(&done, ShortJump);
   1013      masm.bind(&skip);
   1014    } else {
   1015      MOZ_ASSERT(mir->fallible());
   1016      bailoutCmp32(Assembler::Equal, rhs, temp, ins->snapshot());
   1017    }
   1018    masm.bind(&notMinInt);
   1019  }
   1020 
   1021  // Handle negative 0. (0/-Y)
   1022  if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
   1023    Label nonzero;
   1024    masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump);
   1025    bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot());
   1026    masm.bind(&nonzero);
   1027  }
   1028 
   1029  // All regular. Lets call div.
   1030  if (mir->canTruncateRemainder()) {
   1031    masm.as_div_w(dest, lhs, rhs);
   1032  } else {
   1033    MOZ_ASSERT(mir->fallible());
   1034 
   1035    masm.as_mod_w(temp, lhs, rhs);
   1036    bailoutCmp32(Assembler::NonZero, temp, temp, ins->snapshot());
   1037 
   1038    masm.as_div_w(dest, lhs, rhs);
   1039  }
   1040 
   1041  masm.bind(&done);
   1042 }
   1043 
   1044 void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
   1045  Register lhs = ToRegister(ins->numerator());
   1046  Register dest = ToRegister(ins->output());
   1047  Register tmp = ToRegister(ins->temp0());
   1048  int32_t shift = ins->shift();
   1049  MOZ_ASSERT(0 <= shift && shift <= 31);
   1050 
   1051  if (shift != 0) {
   1052    MDiv* mir = ins->mir();
   1053    if (!mir->isTruncated()) {
   1054      // If the remainder is going to be != 0, bailout since this must
   1055      // be a double.
   1056      masm.as_slli_w(tmp, lhs, (32 - shift));
   1057      bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot());
   1058    }
   1059 
   1060    if (!mir->canBeNegativeDividend()) {
   1061      // Numerator is unsigned, so needs no adjusting. Do the shift.
   1062      masm.as_srai_w(dest, lhs, shift);
   1063      return;
   1064    }
   1065 
   1066    // Adjust the value so that shifting produces a correctly rounded result
   1067    // when the numerator is negative. See 10-1 "Signed Division by a Known
   1068    // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
   1069    if (shift > 1) {
   1070      masm.as_srai_w(tmp, lhs, 31);
   1071      masm.as_srli_w(tmp, tmp, (32 - shift));
   1072      masm.add32(lhs, tmp);
   1073    } else {
   1074      masm.as_srli_w(tmp, lhs, (32 - shift));
   1075      masm.add32(lhs, tmp);
   1076    }
   1077 
   1078    // Do the shift.
   1079    masm.as_srai_w(dest, tmp, shift);
   1080  } else {
   1081    masm.move32(lhs, dest);
   1082  }
   1083 }
   1084 
   1085 void CodeGenerator::visitModI(LModI* ins) {
   1086  Register lhs = ToRegister(ins->lhs());
   1087  Register rhs = ToRegister(ins->rhs());
   1088  Register dest = ToRegister(ins->output());
   1089  MOZ_ASSERT(rhs != dest && lhs != dest);
   1090  MMod* mir = ins->mir();
   1091  Label done;
   1092 
   1093  // Prevent divide by zero.
   1094  if (mir->canBeDivideByZero()) {
   1095    if (mir->isTruncated()) {
   1096      if (mir->trapOnError()) {
   1097        Label nonZero;
   1098        masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero, ShortJump);
   1099        masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->trapSiteDesc());
   1100        masm.bind(&nonZero);
   1101      } else {
   1102        // Truncated division by zero yields integer zero.
   1103        masm.move32(rhs, dest);
   1104        masm.ma_b(rhs, rhs, &done, Assembler::Zero, ShortJump);
   1105      }
   1106    } else {
   1107      MOZ_ASSERT(mir->fallible());
   1108      bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
   1109    }
   1110  }
   1111 
   1112  masm.as_mod_w(dest, lhs, rhs);
   1113 
   1114  // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
   1115  if (mir->canBeNegativeDividend() && !mir->isTruncated()) {
   1116    MOZ_ASSERT(mir->fallible());
   1117    masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump);
   1118    bailoutCmp32(Assembler::Signed, lhs, Imm32(0), ins->snapshot());
   1119  }
   1120  masm.bind(&done);
   1121 }
   1122 
   1123 void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
   1124  Register in = ToRegister(ins->input());
   1125  Register out = ToRegister(ins->output());
   1126  MMod* mir = ins->mir();
   1127  Label negative, done;
   1128 
   1129  masm.move32(in, out);
   1130  masm.ma_b(in, in, &done, Assembler::Zero, ShortJump);
   1131  // Switch based on sign of the lhs.
   1132  // Positive numbers are just a bitmask
   1133  masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump);
   1134  {
   1135    masm.and32(Imm32((1 << ins->shift()) - 1), out);
   1136    masm.ma_b(&done, ShortJump);
   1137  }
   1138 
   1139  // Negative numbers need a negate, bitmask, negate
   1140  {
   1141    masm.bind(&negative);
   1142    masm.neg32(out);
   1143    masm.and32(Imm32((1 << ins->shift()) - 1), out);
   1144    masm.neg32(out);
   1145  }
   1146  if (mir->canBeNegativeDividend()) {
   1147    if (!mir->isTruncated()) {
   1148      MOZ_ASSERT(mir->fallible());
   1149      bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot());
   1150    } else {
   1151      // -0|0 == 0
   1152    }
   1153  }
   1154  masm.bind(&done);
   1155 }
   1156 
   1157 void CodeGenerator::visitModMaskI(LModMaskI* ins) {
   1158  Register src = ToRegister(ins->input());
   1159  Register dest = ToRegister(ins->output());
   1160  Register tmp0 = ToRegister(ins->temp0());
   1161  Register tmp1 = ToRegister(ins->temp1());
   1162  MMod* mir = ins->mir();
   1163 
   1164  if (!mir->isTruncated() && mir->canBeNegativeDividend()) {
   1165    MOZ_ASSERT(mir->fallible());
   1166 
   1167    Label bail;
   1168    masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), &bail);
   1169    bailoutFrom(&bail, ins->snapshot());
   1170  } else {
   1171    masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), nullptr);
   1172  }
   1173 }
   1174 
   1175 void CodeGenerator::visitBitNotI(LBitNotI* ins) {
   1176  Register input = ToRegister(ins->input());
   1177  Register dest = ToRegister(ins->output());
   1178  masm.as_nor(dest, input, zero);
   1179 }
   1180 
   1181 void CodeGenerator::visitBitNotI64(LBitNotI64* ins) {
   1182  Register input = ToRegister64(ins->input()).reg;
   1183  Register dest = ToOutRegister64(ins).reg;
   1184  masm.as_nor(dest, input, zero);
   1185 }
   1186 
   1187 void CodeGenerator::visitBitOpI(LBitOpI* ins) {
   1188  Register lhs = ToRegister(ins->lhs());
   1189  const LAllocation* rhs = ins->rhs();
   1190  Register dest = ToRegister(ins->output());
   1191 
   1192  // all of these bitops should be either imm32's, or integer registers.
   1193  switch (ins->bitop()) {
   1194    case JSOp::BitOr:
   1195      if (rhs->isConstant()) {
   1196        masm.ma_or(dest, lhs, Imm32(ToInt32(rhs)));
   1197      } else {
   1198        masm.as_or(dest, lhs, ToRegister(rhs));
   1199        masm.as_slli_w(dest, dest, 0);
   1200      }
   1201      break;
   1202    case JSOp::BitXor:
   1203      if (rhs->isConstant()) {
   1204        masm.ma_xor(dest, lhs, Imm32(ToInt32(rhs)));
   1205      } else {
   1206        masm.as_xor(dest, lhs, ToRegister(rhs));
   1207        masm.as_slli_w(dest, dest, 0);
   1208      }
   1209      break;
   1210    case JSOp::BitAnd:
   1211      if (rhs->isConstant()) {
   1212        masm.ma_and(dest, lhs, Imm32(ToInt32(rhs)));
   1213      } else {
   1214        masm.as_and(dest, lhs, ToRegister(rhs));
   1215        masm.as_slli_w(dest, dest, 0);
   1216      }
   1217      break;
   1218    default:
   1219      MOZ_CRASH("unexpected binary opcode");
   1220  }
   1221 }
   1222 
   1223 void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
   1224  Register lhs = ToRegister64(lir->lhs()).reg;
   1225  Register rhs;
   1226  Register dest = ToOutRegister64(lir).reg;
   1227 
   1228  UseScratchRegisterScope temps(masm);
   1229  if (IsConstant(lir->rhs())) {
   1230    rhs = temps.Acquire();
   1231 
   1232    // Small immediates can be handled without the load immediate instruction,
   1233    // but this optimisation isn't yet implemented.
   1234    masm.ma_li(rhs, ImmWord(ToInt64(lir->rhs())));
   1235  } else {
   1236    rhs = ToRegister64(lir->rhs()).reg;
   1237  }
   1238 
   1239  switch (lir->bitop()) {
   1240    case JSOp::BitOr:
   1241      masm.as_or(dest, lhs, rhs);
   1242      break;
   1243    case JSOp::BitXor:
   1244      masm.as_xor(dest, lhs, rhs);
   1245      break;
   1246    case JSOp::BitAnd:
   1247      masm.as_and(dest, lhs, rhs);
   1248      break;
   1249    default:
   1250      MOZ_CRASH("unexpected binary opcode");
   1251  }
   1252 }
   1253 
   1254 void CodeGenerator::visitShiftI(LShiftI* ins) {
   1255  Register lhs = ToRegister(ins->lhs());
   1256  const LAllocation* rhs = ins->rhs();
   1257  Register dest = ToRegister(ins->output());
   1258 
   1259  if (rhs->isConstant()) {
   1260    int32_t shift = ToInt32(rhs) & 0x1F;
   1261    switch (ins->bitop()) {
   1262      case JSOp::Lsh:
   1263        if (shift) {
   1264          masm.as_slli_w(dest, lhs, shift);
   1265        } else {
   1266          masm.move32(lhs, dest);
   1267        }
   1268        break;
   1269      case JSOp::Rsh:
   1270        if (shift) {
   1271          masm.as_srai_w(dest, lhs, shift);
   1272        } else {
   1273          masm.move32(lhs, dest);
   1274        }
   1275        break;
   1276      case JSOp::Ursh:
   1277        if (shift) {
   1278          masm.as_srli_w(dest, lhs, shift);
   1279        } else {
   1280          // x >>> 0 can overflow.
   1281          if (ins->mir()->toUrsh()->fallible()) {
   1282            bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
   1283          }
   1284          masm.move32(lhs, dest);
   1285        }
   1286        break;
   1287      default:
   1288        MOZ_CRASH("Unexpected shift op");
   1289    }
   1290  } else {
   1291    Register shift = ToRegister(rhs);
   1292    switch (ins->bitop()) {
   1293      case JSOp::Lsh:
   1294        masm.as_sll_w(dest, lhs, shift);
   1295        break;
   1296      case JSOp::Rsh:
   1297        masm.as_sra_w(dest, lhs, shift);
   1298        break;
   1299      case JSOp::Ursh:
   1300        masm.as_srl_w(dest, lhs, shift);
   1301        if (ins->mir()->toUrsh()->fallible()) {
   1302          // x >>> 0 can overflow.
   1303          bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot());
   1304        }
   1305        break;
   1306      default:
   1307        MOZ_CRASH("Unexpected shift op");
   1308    }
   1309  }
   1310 }
   1311 
   1312 void CodeGenerator::visitShiftIntPtr(LShiftIntPtr* ins) {
   1313  Register lhs = ToRegister(ins->lhs());
   1314  const LAllocation* rhs = ins->rhs();
   1315  Register dest = ToRegister(ins->output());
   1316 
   1317  if (rhs->isConstant()) {
   1318    int32_t shift = ToIntPtr(rhs) & 0x3F;
   1319    if (shift) {
   1320      switch (ins->bitop()) {
   1321        case JSOp::Lsh:
   1322          masm.as_slli_d(dest, lhs, shift);
   1323          break;
   1324        case JSOp::Rsh:
   1325          masm.as_srai_d(dest, lhs, shift);
   1326          break;
   1327        case JSOp::Ursh:
   1328          masm.as_srli_d(dest, lhs, shift);
   1329          break;
   1330        default:
   1331          MOZ_CRASH("Unexpected shift op");
   1332      }
   1333    } else if (lhs != dest) {
   1334      masm.movePtr(lhs, dest);
   1335    }
   1336  } else {
   1337    Register shift = ToRegister(rhs);
   1338    switch (ins->bitop()) {
   1339      case JSOp::Lsh:
   1340        masm.as_sll_d(dest, lhs, shift);
   1341        break;
   1342      case JSOp::Rsh:
   1343        masm.as_sra_d(dest, lhs, shift);
   1344        break;
   1345      case JSOp::Ursh:
   1346        masm.as_srl_d(dest, lhs, shift);
   1347        break;
   1348      default:
   1349        MOZ_CRASH("Unexpected shift op");
   1350    }
   1351  }
   1352 }
   1353 
   1354 void CodeGenerator::visitShiftI64(LShiftI64* lir) {
   1355  Register lhs = ToRegister64(lir->lhs()).reg;
   1356  const LAllocation* rhs = lir->rhs();
   1357  Register dest = ToOutRegister64(lir).reg;
   1358 
   1359  if (rhs->isConstant()) {
   1360    int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
   1361    if (shift) {
   1362      switch (lir->bitop()) {
   1363        case JSOp::Lsh:
   1364          masm.as_slli_d(dest, lhs, shift);
   1365          break;
   1366        case JSOp::Rsh:
   1367          masm.as_srai_d(dest, lhs, shift);
   1368          break;
   1369        case JSOp::Ursh:
   1370          masm.as_srli_d(dest, lhs, shift);
   1371          break;
   1372        default:
   1373          MOZ_CRASH("Unexpected shift op");
   1374      }
   1375    } else if (lhs != dest) {
   1376      masm.movePtr(lhs, dest);
   1377    }
   1378    return;
   1379  }
   1380 
   1381  Register shift = ToRegister(rhs);
   1382  switch (lir->bitop()) {
   1383    case JSOp::Lsh:
   1384      masm.as_sll_d(dest, lhs, shift);
   1385      break;
   1386    case JSOp::Rsh:
   1387      masm.as_sra_d(dest, lhs, shift);
   1388      break;
   1389    case JSOp::Ursh:
   1390      masm.as_srl_d(dest, lhs, shift);
   1391      break;
   1392    default:
   1393      MOZ_CRASH("Unexpected shift op");
   1394  }
   1395 }
   1396 
   1397 void CodeGenerator::visitUrshD(LUrshD* ins) {
   1398  Register lhs = ToRegister(ins->lhs());
   1399  Register temp = ToRegister(ins->temp0());
   1400 
   1401  const LAllocation* rhs = ins->rhs();
   1402  FloatRegister out = ToFloatRegister(ins->output());
   1403 
   1404  if (rhs->isConstant()) {
   1405    masm.as_srli_w(temp, lhs, ToInt32(rhs) & 0x1f);
   1406  } else {
   1407    masm.as_srl_w(temp, lhs, ToRegister(rhs));
   1408  }
   1409 
   1410  masm.convertUInt32ToDouble(temp, out);
   1411 }
   1412 
   1413 void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
   1414  FloatRegister input = ToFloatRegister(ins->input());
   1415  FloatRegister output = ToFloatRegister(ins->output());
   1416  ScratchDoubleScope fpscratch(masm);
   1417 
   1418  Label done, skip;
   1419 
   1420  // Masm.pow(-Infinity, 0.5) == Infinity.
   1421  masm.loadConstantDouble(NegativeInfinity<double>(), fpscratch);
   1422  masm.ma_bc_d(input, fpscratch, &skip, Assembler::DoubleNotEqualOrUnordered,
   1423               ShortJump);
   1424  masm.as_fneg_d(output, fpscratch);
   1425  masm.ma_b(&done, ShortJump);
   1426 
   1427  masm.bind(&skip);
   1428  // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
   1429  // Adding 0 converts any -0 to 0.
   1430  masm.loadConstantDouble(0.0, fpscratch);
   1431  masm.as_fadd_d(output, input, fpscratch);
   1432  masm.as_fsqrt_d(output, output);
   1433 
   1434  masm.bind(&done);
   1435 }
   1436 
   1437 void CodeGenerator::visitMathD(LMathD* math) {
   1438  FloatRegister src1 = ToFloatRegister(math->lhs());
   1439  FloatRegister src2 = ToFloatRegister(math->rhs());
   1440  FloatRegister output = ToFloatRegister(math->output());
   1441 
   1442  switch (math->jsop()) {
   1443    case JSOp::Add:
   1444      masm.as_fadd_d(output, src1, src2);
   1445      break;
   1446    case JSOp::Sub:
   1447      masm.as_fsub_d(output, src1, src2);
   1448      break;
   1449    case JSOp::Mul:
   1450      masm.as_fmul_d(output, src1, src2);
   1451      break;
   1452    case JSOp::Div:
   1453      masm.as_fdiv_d(output, src1, src2);
   1454      break;
   1455    default:
   1456      MOZ_CRASH("unexpected opcode");
   1457  }
   1458 }
   1459 
   1460 void CodeGenerator::visitMathF(LMathF* math) {
   1461  FloatRegister src1 = ToFloatRegister(math->lhs());
   1462  FloatRegister src2 = ToFloatRegister(math->rhs());
   1463  FloatRegister output = ToFloatRegister(math->output());
   1464 
   1465  switch (math->jsop()) {
   1466    case JSOp::Add:
   1467      masm.as_fadd_s(output, src1, src2);
   1468      break;
   1469    case JSOp::Sub:
   1470      masm.as_fsub_s(output, src1, src2);
   1471      break;
   1472    case JSOp::Mul:
   1473      masm.as_fmul_s(output, src1, src2);
   1474      break;
   1475    case JSOp::Div:
   1476      masm.as_fdiv_s(output, src1, src2);
   1477      break;
   1478    default:
   1479      MOZ_CRASH("unexpected opcode");
   1480  }
   1481 }
   1482 
   1483 void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
   1484  emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
   1485                     ins->mir());
   1486 }
   1487 
   1488 void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
   1489  emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
   1490                      ins->mir());
   1491 }
   1492 
   1493 void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
   1494    LWasmBuiltinTruncateDToInt32* lir) {
   1495  emitTruncateDouble(ToFloatRegister(lir->input()), ToRegister(lir->output()),
   1496                     lir->mir());
   1497 }
   1498 
   1499 void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
   1500    LWasmBuiltinTruncateFToInt32* lir) {
   1501  emitTruncateFloat32(ToFloatRegister(lir->input()), ToRegister(lir->output()),
   1502                      lir->mir());
   1503 }
   1504 
   1505 void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
   1506  auto input = ToFloatRegister(lir->input());
   1507  auto output = ToRegister(lir->output());
   1508 
   1509  MWasmTruncateToInt32* mir = lir->mir();
   1510  MIRType fromType = mir->input()->type();
   1511 
   1512  MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
   1513 
   1514  auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
   1515  addOutOfLineCode(ool, mir);
   1516 
   1517  Label* oolEntry = ool->entry();
   1518  if (mir->isUnsigned()) {
   1519    if (fromType == MIRType::Double) {
   1520      masm.wasmTruncateDoubleToUInt32(input, output, mir->isSaturating(),
   1521                                      oolEntry);
   1522    } else if (fromType == MIRType::Float32) {
   1523      masm.wasmTruncateFloat32ToUInt32(input, output, mir->isSaturating(),
   1524                                       oolEntry);
   1525    } else {
   1526      MOZ_CRASH("unexpected type");
   1527    }
   1528 
   1529    masm.bind(ool->rejoin());
   1530    return;
   1531  }
   1532 
   1533  if (fromType == MIRType::Double) {
   1534    masm.wasmTruncateDoubleToInt32(input, output, mir->isSaturating(),
   1535                                   oolEntry);
   1536  } else if (fromType == MIRType::Float32) {
   1537    masm.wasmTruncateFloat32ToInt32(input, output, mir->isSaturating(),
   1538                                    oolEntry);
   1539  } else {
   1540    MOZ_CRASH("unexpected type");
   1541  }
   1542 
   1543  masm.bind(ool->rejoin());
   1544 }
   1545 
   1546 void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
   1547  FloatRegister input = ToFloatRegister(test->input());
   1548  ScratchDoubleScope fpscratch(masm);
   1549 
   1550  MBasicBlock* ifTrue = test->ifTrue();
   1551  MBasicBlock* ifFalse = test->ifFalse();
   1552 
   1553  masm.loadConstantDouble(0.0, fpscratch);
   1554  // If 0, or NaN, the result is false.
   1555  if (isNextBlock(ifFalse->lir())) {
   1556    branchToBlock(Assembler::DoubleFloat, input, fpscratch, ifTrue,
   1557                  Assembler::DoubleNotEqual);
   1558  } else {
   1559    branchToBlock(Assembler::DoubleFloat, input, fpscratch, ifFalse,
   1560                  Assembler::DoubleEqualOrUnordered);
   1561    jumpToBlock(ifTrue);
   1562  }
   1563 }
   1564 
   1565 void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
   1566  FloatRegister input = ToFloatRegister(test->input());
   1567  ScratchFloat32Scope fpscratch(masm);
   1568 
   1569  MBasicBlock* ifTrue = test->ifTrue();
   1570  MBasicBlock* ifFalse = test->ifFalse();
   1571 
   1572  masm.loadConstantFloat32(0.0f, fpscratch);
   1573  // If 0, or NaN, the result is false.
   1574 
   1575  if (isNextBlock(ifFalse->lir())) {
   1576    branchToBlock(Assembler::SingleFloat, input, fpscratch, ifTrue,
   1577                  Assembler::DoubleNotEqual);
   1578  } else {
   1579    branchToBlock(Assembler::SingleFloat, input, fpscratch, ifFalse,
   1580                  Assembler::DoubleEqualOrUnordered);
   1581    jumpToBlock(ifTrue);
   1582  }
   1583 }
   1584 
   1585 void CodeGenerator::visitCompareD(LCompareD* comp) {
   1586  FloatRegister lhs = ToFloatRegister(comp->left());
   1587  FloatRegister rhs = ToFloatRegister(comp->right());
   1588  Register dest = ToRegister(comp->output());
   1589 
   1590  Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
   1591  masm.ma_cmp_set_double(dest, lhs, rhs, cond);
   1592 }
   1593 
   1594 void CodeGenerator::visitCompareF(LCompareF* comp) {
   1595  FloatRegister lhs = ToFloatRegister(comp->left());
   1596  FloatRegister rhs = ToFloatRegister(comp->right());
   1597  Register dest = ToRegister(comp->output());
   1598 
   1599  Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
   1600  masm.ma_cmp_set_float32(dest, lhs, rhs, cond);
   1601 }
   1602 
   1603 void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
   1604  FloatRegister lhs = ToFloatRegister(comp->left());
   1605  FloatRegister rhs = ToFloatRegister(comp->right());
   1606 
   1607  Assembler::DoubleCondition cond =
   1608      JSOpToDoubleCondition(comp->cmpMir()->jsop());
   1609  MBasicBlock* ifTrue = comp->ifTrue();
   1610  MBasicBlock* ifFalse = comp->ifFalse();
   1611 
   1612  if (isNextBlock(ifFalse->lir())) {
   1613    branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifTrue, cond);
   1614  } else {
   1615    branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifFalse,
   1616                  Assembler::InvertCondition(cond));
   1617    jumpToBlock(ifTrue);
   1618  }
   1619 }
   1620 
   1621 void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
   1622  FloatRegister lhs = ToFloatRegister(comp->left());
   1623  FloatRegister rhs = ToFloatRegister(comp->right());
   1624 
   1625  Assembler::DoubleCondition cond =
   1626      JSOpToDoubleCondition(comp->cmpMir()->jsop());
   1627  MBasicBlock* ifTrue = comp->ifTrue();
   1628  MBasicBlock* ifFalse = comp->ifFalse();
   1629 
   1630  if (isNextBlock(ifFalse->lir())) {
   1631    branchToBlock(Assembler::SingleFloat, lhs, rhs, ifTrue, cond);
   1632  } else {
   1633    branchToBlock(Assembler::SingleFloat, lhs, rhs, ifFalse,
   1634                  Assembler::InvertCondition(cond));
   1635    jumpToBlock(ifTrue);
   1636  }
   1637 }
   1638 
   1639 void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
   1640  masm.convertUInt32ToDouble(ToRegister(lir->input()),
   1641                             ToFloatRegister(lir->output()));
   1642 }
   1643 
   1644 void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
   1645  masm.convertUInt32ToFloat32(ToRegister(lir->input()),
   1646                              ToFloatRegister(lir->output()));
   1647 }
   1648 
   1649 void CodeGenerator::visitNotD(LNotD* ins) {
   1650  // Since this operation is not, we want to set a bit if
   1651  // the double is falsey, which means 0.0, -0.0 or NaN.
   1652  FloatRegister in = ToFloatRegister(ins->input());
   1653  Register dest = ToRegister(ins->output());
   1654  ScratchDoubleScope fpscratch(masm);
   1655 
   1656  masm.loadConstantDouble(0.0, fpscratch);
   1657  masm.ma_cmp_set_double(dest, in, fpscratch,
   1658                         Assembler::DoubleEqualOrUnordered);
   1659 }
   1660 
   1661 void CodeGenerator::visitNotF(LNotF* ins) {
   1662  // Since this operation is not, we want to set a bit if
   1663  // the float32 is falsey, which means 0.0, -0.0 or NaN.
   1664  FloatRegister in = ToFloatRegister(ins->input());
   1665  Register dest = ToRegister(ins->output());
   1666  ScratchFloat32Scope fpscratch(masm);
   1667 
   1668  masm.loadConstantFloat32(0.0f, fpscratch);
   1669  masm.ma_cmp_set_float32(dest, in, fpscratch,
   1670                          Assembler::DoubleEqualOrUnordered);
   1671 }
   1672 
   1673 void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
   1674 
   1675 void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
   1676 
   1677 void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
   1678  const MAsmJSLoadHeap* mir = ins->mir();
   1679  MOZ_ASSERT(!mir->hasMemoryBase());
   1680 
   1681  const LAllocation* ptr = ins->ptr();
   1682  const LDefinition* output = ins->output();
   1683  const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
   1684 
   1685  Register ptrReg = ToRegister(ptr);
   1686  Scalar::Type accessType = mir->accessType();
   1687  bool isFloat = accessType == Scalar::Float32 || accessType == Scalar::Float64;
   1688  Label done;
   1689 
   1690  if (mir->needsBoundsCheck()) {
   1691    Label boundsCheckPassed;
   1692    Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
   1693    masm.wasmBoundsCheck32(Assembler::Below, ptrReg, boundsCheckLimitReg,
   1694                           &boundsCheckPassed);
   1695    // Return a default value in case of a bounds-check failure.
   1696    if (isFloat) {
   1697      if (accessType == Scalar::Float32) {
   1698        masm.loadConstantFloat32(GenericNaN(), ToFloatRegister(output));
   1699      } else {
   1700        masm.loadConstantDouble(GenericNaN(), ToFloatRegister(output));
   1701      }
   1702    } else {
   1703      masm.mov(zero, ToRegister(output));
   1704    }
   1705    masm.jump(&done);
   1706    masm.bind(&boundsCheckPassed);
   1707  }
   1708 
   1709  // TODO(loong64): zero-extend index in asm.js?
   1710  UseScratchRegisterScope temps(masm);
   1711  Register scratch = temps.Acquire();
   1712  masm.move32To64ZeroExtend(ptrReg, Register64(scratch));
   1713 
   1714  switch (accessType) {
   1715    case Scalar::Int8:
   1716      masm.as_ldx_b(ToRegister(output), HeapReg, scratch);
   1717      break;
   1718    case Scalar::Uint8:
   1719      masm.as_ldx_bu(ToRegister(output), HeapReg, scratch);
   1720      break;
   1721    case Scalar::Int16:
   1722      masm.as_ldx_h(ToRegister(output), HeapReg, scratch);
   1723      break;
   1724    case Scalar::Uint16:
   1725      masm.as_ldx_hu(ToRegister(output), HeapReg, scratch);
   1726      break;
   1727    case Scalar::Int32:
   1728    case Scalar::Uint32:
   1729      masm.as_ldx_w(ToRegister(output), HeapReg, scratch);
   1730      break;
   1731    case Scalar::Float64:
   1732      masm.as_fldx_d(ToFloatRegister(output), HeapReg, scratch);
   1733      break;
   1734    case Scalar::Float32:
   1735      masm.as_fldx_s(ToFloatRegister(output), HeapReg, scratch);
   1736      break;
   1737    default:
   1738      MOZ_CRASH("unexpected array type");
   1739  }
   1740 
   1741  if (done.used()) {
   1742    masm.bind(&done);
   1743  }
   1744 }
   1745 
   1746 void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
   1747  const MAsmJSStoreHeap* mir = ins->mir();
   1748  MOZ_ASSERT(!mir->hasMemoryBase());
   1749 
   1750  const LAllocation* value = ins->value();
   1751  const LAllocation* ptr = ins->ptr();
   1752  const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
   1753 
   1754  Register ptrReg = ToRegister(ptr);
   1755 
   1756  Label done;
   1757  if (mir->needsBoundsCheck()) {
   1758    Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
   1759    masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg, boundsCheckLimitReg,
   1760                           &done);
   1761  }
   1762 
   1763  // TODO(loong64): zero-extend index in asm.js?
   1764  UseScratchRegisterScope temps(masm);
   1765  Register scratch = temps.Acquire();
   1766  masm.move32To64ZeroExtend(ptrReg, Register64(scratch));
   1767 
   1768  switch (mir->accessType()) {
   1769    case Scalar::Int8:
   1770    case Scalar::Uint8:
   1771      masm.as_stx_b(ToRegister(value), HeapReg, scratch);
   1772      break;
   1773    case Scalar::Int16:
   1774    case Scalar::Uint16:
   1775      masm.as_stx_h(ToRegister(value), HeapReg, scratch);
   1776      break;
   1777    case Scalar::Int32:
   1778    case Scalar::Uint32:
   1779      masm.as_stx_w(ToRegister(value), HeapReg, scratch);
   1780      break;
   1781    case Scalar::Float64:
   1782      masm.as_fstx_d(ToFloatRegister(value), HeapReg, scratch);
   1783      break;
   1784    case Scalar::Float32:
   1785      masm.as_fstx_s(ToFloatRegister(value), HeapReg, scratch);
   1786      break;
   1787    default:
   1788      MOZ_CRASH("unexpected array type");
   1789  }
   1790 
   1791  if (done.used()) {
   1792    masm.bind(&done);
   1793  }
   1794 }
   1795 
   1796 void CodeGenerator::visitWasmCompareExchangeHeap(
   1797    LWasmCompareExchangeHeap* ins) {
   1798  MWasmCompareExchangeHeap* mir = ins->mir();
   1799  Register memoryBase = ToRegister(ins->memoryBase());
   1800  Register ptrReg = ToRegister(ins->ptr());
   1801  BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset32());
   1802 
   1803  Register oldval = ToRegister(ins->oldValue());
   1804  Register newval = ToRegister(ins->newValue());
   1805  Register valueTemp = ToTempRegisterOrInvalid(ins->temp0());
   1806  Register offsetTemp = ToTempRegisterOrInvalid(ins->temp1());
   1807  Register maskTemp = ToTempRegisterOrInvalid(ins->temp2());
   1808 
   1809  masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, valueTemp,
   1810                           offsetTemp, maskTemp, ToRegister(ins->output()));
   1811 }
   1812 
   1813 void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
   1814  MWasmAtomicExchangeHeap* mir = ins->mir();
   1815  Register memoryBase = ToRegister(ins->memoryBase());
   1816  Register ptrReg = ToRegister(ins->ptr());
   1817  Register value = ToRegister(ins->value());
   1818  BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset32());
   1819 
   1820  Register valueTemp = ToTempRegisterOrInvalid(ins->temp0());
   1821  Register offsetTemp = ToTempRegisterOrInvalid(ins->temp1());
   1822  Register maskTemp = ToTempRegisterOrInvalid(ins->temp2());
   1823 
   1824  masm.wasmAtomicExchange(mir->access(), srcAddr, value, valueTemp, offsetTemp,
   1825                          maskTemp, ToRegister(ins->output()));
   1826 }
   1827 
   1828 void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
   1829  MOZ_ASSERT(ins->mir()->hasUses());
   1830 
   1831  MWasmAtomicBinopHeap* mir = ins->mir();
   1832  Register memoryBase = ToRegister(ins->memoryBase());
   1833  Register ptrReg = ToRegister(ins->ptr());
   1834  Register valueTemp = ToTempRegisterOrInvalid(ins->temp0());
   1835  Register offsetTemp = ToTempRegisterOrInvalid(ins->temp1());
   1836  Register maskTemp = ToTempRegisterOrInvalid(ins->temp2());
   1837 
   1838  BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset32());
   1839 
   1840  masm.wasmAtomicFetchOp(mir->access(), mir->operation(),
   1841                         ToRegister(ins->value()), srcAddr, valueTemp,
   1842                         offsetTemp, maskTemp, ToRegister(ins->output()));
   1843 }
   1844 
   1845 void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
   1846    LWasmAtomicBinopHeapForEffect* ins) {
   1847  MOZ_ASSERT(!ins->mir()->hasUses());
   1848 
   1849  MWasmAtomicBinopHeap* mir = ins->mir();
   1850  Register memoryBase = ToRegister(ins->memoryBase());
   1851  Register ptrReg = ToRegister(ins->ptr());
   1852  Register valueTemp = ToTempRegisterOrInvalid(ins->temp0());
   1853  Register offsetTemp = ToTempRegisterOrInvalid(ins->temp1());
   1854  Register maskTemp = ToTempRegisterOrInvalid(ins->temp2());
   1855 
   1856  BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset32());
   1857  masm.wasmAtomicEffectOp(mir->access(), mir->operation(),
   1858                          ToRegister(ins->value()), srcAddr, valueTemp,
   1859                          offsetTemp, maskTemp);
   1860 }
   1861 
   1862 void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
   1863  const MWasmStackArg* mir = ins->mir();
   1864  if (ins->arg()->isConstant()) {
   1865    masm.storePtr(ImmWord(ToInt32(ins->arg())),
   1866                  Address(StackPointer, mir->spOffset()));
   1867  } else {
   1868    if (ins->arg()->isGeneralReg()) {
   1869      masm.storePtr(ToRegister(ins->arg()),
   1870                    Address(StackPointer, mir->spOffset()));
   1871    } else if (mir->input()->type() == MIRType::Double) {
   1872      masm.storeDouble(ToFloatRegister(ins->arg()),
   1873                       Address(StackPointer, mir->spOffset()));
   1874    } else {
   1875      masm.storeFloat32(ToFloatRegister(ins->arg()),
   1876                        Address(StackPointer, mir->spOffset()));
   1877    }
   1878  }
   1879 }
   1880 
   1881 void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
   1882  const MWasmStackArg* mir = ins->mir();
   1883  Address dst(StackPointer, mir->spOffset());
   1884  if (IsConstant(ins->arg())) {
   1885    masm.store64(Imm64(ToInt64(ins->arg())), dst);
   1886  } else {
   1887    masm.store64(ToRegister64(ins->arg()), dst);
   1888  }
   1889 }
   1890 
   1891 void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
   1892  MIRType mirType = ins->mir()->type();
   1893 
   1894  Register cond = ToRegister(ins->condExpr());
   1895  const LAllocation* falseExpr = ins->falseExpr();
   1896 
   1897  if (mirType == MIRType::Int32 || mirType == MIRType::WasmAnyRef) {
   1898    Register out = ToRegister(ins->output());
   1899    MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
   1900               "true expr input is reused for output");
   1901    if (falseExpr->isGeneralReg()) {
   1902      masm.moveIfZero(out, ToRegister(falseExpr), cond);
   1903    } else {
   1904      masm.cmp32Load32(Assembler::Zero, cond, cond, ToAddress(falseExpr), out);
   1905    }
   1906    return;
   1907  }
   1908 
   1909  FloatRegister out = ToFloatRegister(ins->output());
   1910  MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
   1911             "true expr input is reused for output");
   1912 
   1913  if (falseExpr->isFloatReg()) {
   1914    if (mirType == MIRType::Float32) {
   1915      masm.ma_fmovz(Assembler::SingleFloat, out, ToFloatRegister(falseExpr),
   1916                    cond);
   1917    } else if (mirType == MIRType::Double) {
   1918      masm.ma_fmovz(Assembler::DoubleFloat, out, ToFloatRegister(falseExpr),
   1919                    cond);
   1920    } else {
   1921      MOZ_CRASH("unhandled type in visitWasmSelect!");
   1922    }
   1923  } else {
   1924    Label done;
   1925    masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
   1926 
   1927    if (mirType == MIRType::Float32) {
   1928      masm.loadFloat32(ToAddress(falseExpr), out);
   1929    } else if (mirType == MIRType::Double) {
   1930      masm.loadDouble(ToAddress(falseExpr), out);
   1931    } else {
   1932      MOZ_CRASH("unhandled type in visitWasmSelect!");
   1933    }
   1934 
   1935    masm.bind(&done);
   1936  }
   1937 }
   1938 
   1939 // We expect to handle only the case where compare is {U,}Int32 and select is
   1940 // {U,}Int32, and the "true" input is reused for the output.
   1941 void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
   1942  bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
   1943                    ins->compareType() == MCompare::Compare_UInt32;
   1944  bool selIs32bit = ins->mir()->type() == MIRType::Int32;
   1945 
   1946  MOZ_RELEASE_ASSERT(
   1947      cmpIs32bit && selIs32bit,
   1948      "CodeGenerator::visitWasmCompareAndSelect: unexpected types");
   1949 
   1950  Register trueExprAndDest = ToRegister(ins->output());
   1951  MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == trueExprAndDest,
   1952             "true expr input is reused for output");
   1953 
   1954  Assembler::Condition cond = Assembler::InvertCondition(
   1955      JSOpToCondition(ins->compareType(), ins->jsop()));
   1956  const LAllocation* rhs = ins->rightExpr();
   1957  const LAllocation* falseExpr = ins->ifFalseExpr();
   1958  Register lhs = ToRegister(ins->leftExpr());
   1959 
   1960  masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
   1961                   trueExprAndDest);
   1962 }
   1963 
   1964 void CodeGenerator::visitUDivOrMod(LUDivOrMod* ins) {
   1965  Register lhs = ToRegister(ins->lhs());
   1966  Register rhs = ToRegister(ins->rhs());
   1967  Register output = ToRegister(ins->output());
   1968  Label done;
   1969 
   1970  // Prevent divide by zero.
   1971  if (ins->canBeDivideByZero()) {
   1972    if (ins->mir()->isTruncated()) {
   1973      if (ins->trapOnError()) {
   1974        Label nonZero;
   1975        masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
   1976        masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->trapSiteDesc());
   1977        masm.bind(&nonZero);
   1978      } else {
   1979        // Infinity|0 == 0
   1980        Label notzero;
   1981        masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
   1982        masm.move32(Imm32(0), output);
   1983        masm.ma_b(&done, ShortJump);
   1984        masm.bind(&notzero);
   1985      }
   1986    } else {
   1987      bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
   1988    }
   1989  }
   1990 
   1991  masm.as_mod_wu(output, lhs, rhs);
   1992 
   1993  // If the remainder is > 0, bailout since this must be a double.
   1994  if (ins->mir()->isDiv()) {
   1995    if (!ins->mir()->toDiv()->canTruncateRemainder()) {
   1996      bailoutCmp32(Assembler::NonZero, output, output, ins->snapshot());
   1997    }
   1998    // Get quotient
   1999    masm.as_div_wu(output, lhs, rhs);
   2000  }
   2001 
   2002  if (!ins->mir()->isTruncated()) {
   2003    bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
   2004  }
   2005 
   2006  masm.bind(&done);
   2007 }
   2008 
   2009 void CodeGenerator::visitEffectiveAddress3(LEffectiveAddress3* ins) {
   2010  const MEffectiveAddress3* mir = ins->mir();
   2011  Register base = ToRegister(ins->base());
   2012  Register index = ToRegister(ins->index());
   2013  Register output = ToRegister(ins->output());
   2014 
   2015  BaseIndex address(base, index, mir->scale(), mir->displacement());
   2016  masm.computeEffectiveAddress32(address, output);
   2017 }
   2018 
   2019 void CodeGenerator::visitEffectiveAddress2(LEffectiveAddress2* ins) {
   2020  const MEffectiveAddress2* mir = ins->mir();
   2021  Register index = ToRegister(ins->index());
   2022  Register output = ToRegister(ins->output());
   2023 
   2024  BaseIndex address(zero, index, mir->scale(), mir->displacement());
   2025  masm.computeEffectiveAddress32(address, output);
   2026 }
   2027 
   2028 void CodeGenerator::visitNegI(LNegI* ins) {
   2029  Register input = ToRegister(ins->input());
   2030  Register output = ToRegister(ins->output());
   2031 
   2032  masm.as_sub_w(output, zero, input);
   2033 }
   2034 
   2035 void CodeGenerator::visitNegI64(LNegI64* ins) {
   2036  Register input = ToRegister64(ins->input()).reg;
   2037  Register output = ToOutRegister64(ins).reg;
   2038 
   2039  masm.as_sub_d(output, zero, input);
   2040 }
   2041 
   2042 void CodeGenerator::visitNegD(LNegD* ins) {
   2043  FloatRegister input = ToFloatRegister(ins->input());
   2044  FloatRegister output = ToFloatRegister(ins->output());
   2045 
   2046  masm.as_fneg_d(output, input);
   2047 }
   2048 
   2049 void CodeGenerator::visitNegF(LNegF* ins) {
   2050  FloatRegister input = ToFloatRegister(ins->input());
   2051  FloatRegister output = ToFloatRegister(ins->output());
   2052 
   2053  masm.as_fneg_s(output, input);
   2054 }
   2055 
   2056 void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
   2057  MWasmAddOffset* mir = lir->mir();
   2058  Register base = ToRegister(lir->base());
   2059  Register out = ToRegister(lir->output());
   2060 
   2061  Label ok;
   2062  masm.ma_add32TestCarry(Assembler::CarryClear, out, base, Imm32(mir->offset()),
   2063                         &ok);
   2064  masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
   2065  masm.bind(&ok);
   2066 }
   2067 
   2068 void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) {
   2069  MWasmAddOffset* mir = lir->mir();
   2070  Register64 base = ToRegister64(lir->base());
   2071  Register64 out = ToOutRegister64(lir);
   2072 
   2073  Label ok;
   2074  masm.ma_addPtrTestCarry(Assembler::CarryClear, out.reg, base.reg,
   2075                          ImmWord(mir->offset()), &ok);
   2076  masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
   2077  masm.bind(&ok);
   2078 }
   2079 
   2080 void CodeGenerator::visitAtomicTypedArrayElementBinop(
   2081    LAtomicTypedArrayElementBinop* lir) {
   2082  MOZ_ASSERT(!lir->mir()->isForEffect());
   2083 
   2084  AnyRegister output = ToAnyRegister(lir->output());
   2085  Register elements = ToRegister(lir->elements());
   2086  Register outTemp = ToTempRegisterOrInvalid(lir->temp0());
   2087  Register valueTemp = ToTempRegisterOrInvalid(lir->temp1());
   2088  Register offsetTemp = ToTempRegisterOrInvalid(lir->temp2());
   2089  Register maskTemp = ToTempRegisterOrInvalid(lir->temp3());
   2090  Register value = ToRegister(lir->value());
   2091  Scalar::Type arrayType = lir->mir()->arrayType();
   2092 
   2093  auto mem = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   2094 
   2095  mem.match([&](const auto& mem) {
   2096    masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
   2097                         lir->mir()->operation(), value, mem, valueTemp,
   2098                         offsetTemp, maskTemp, outTemp, output);
   2099  });
   2100 }
   2101 
   2102 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
   2103    LAtomicTypedArrayElementBinopForEffect* lir) {
   2104  MOZ_ASSERT(lir->mir()->isForEffect());
   2105 
   2106  Register elements = ToRegister(lir->elements());
   2107  Register valueTemp = ToTempRegisterOrInvalid(lir->temp0());
   2108  Register offsetTemp = ToTempRegisterOrInvalid(lir->temp1());
   2109  Register maskTemp = ToTempRegisterOrInvalid(lir->temp2());
   2110  Register value = ToRegister(lir->value());
   2111  Scalar::Type arrayType = lir->mir()->arrayType();
   2112 
   2113  auto mem = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   2114 
   2115  mem.match([&](const auto& mem) {
   2116    masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
   2117                          lir->mir()->operation(), value, mem, valueTemp,
   2118                          offsetTemp, maskTemp);
   2119  });
   2120 }
   2121 
   2122 void CodeGenerator::visitCompareExchangeTypedArrayElement(
   2123    LCompareExchangeTypedArrayElement* lir) {
   2124  Register elements = ToRegister(lir->elements());
   2125  AnyRegister output = ToAnyRegister(lir->output());
   2126  Register outTemp = ToTempRegisterOrInvalid(lir->temp0());
   2127 
   2128  Register oldval = ToRegister(lir->oldval());
   2129  Register newval = ToRegister(lir->newval());
   2130  Register valueTemp = ToTempRegisterOrInvalid(lir->temp1());
   2131  Register offsetTemp = ToTempRegisterOrInvalid(lir->temp2());
   2132  Register maskTemp = ToTempRegisterOrInvalid(lir->temp3());
   2133  Scalar::Type arrayType = lir->mir()->arrayType();
   2134 
   2135  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   2136 
   2137  dest.match([&](const auto& dest) {
   2138    masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
   2139                           newval, valueTemp, offsetTemp, maskTemp, outTemp,
   2140                           output);
   2141  });
   2142 }
   2143 
   2144 void CodeGenerator::visitAtomicExchangeTypedArrayElement(
   2145    LAtomicExchangeTypedArrayElement* lir) {
   2146  Register elements = ToRegister(lir->elements());
   2147  AnyRegister output = ToAnyRegister(lir->output());
   2148  Register outTemp = ToTempRegisterOrInvalid(lir->temp0());
   2149 
   2150  Register value = ToRegister(lir->value());
   2151  Register valueTemp = ToTempRegisterOrInvalid(lir->temp1());
   2152  Register offsetTemp = ToTempRegisterOrInvalid(lir->temp2());
   2153  Register maskTemp = ToTempRegisterOrInvalid(lir->temp3());
   2154  Scalar::Type arrayType = lir->mir()->arrayType();
   2155 
   2156  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   2157 
   2158  dest.match([&](const auto& dest) {
   2159    masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
   2160                          valueTemp, offsetTemp, maskTemp, outTemp, output);
   2161  });
   2162 }
   2163 
   2164 void CodeGenerator::visitCompareExchangeTypedArrayElement64(
   2165    LCompareExchangeTypedArrayElement64* lir) {
   2166  Register elements = ToRegister(lir->elements());
   2167  Register64 oldval = ToRegister64(lir->oldval());
   2168  Register64 newval = ToRegister64(lir->newval());
   2169  Register64 out = ToOutRegister64(lir);
   2170  Scalar::Type arrayType = lir->mir()->arrayType();
   2171 
   2172  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   2173 
   2174  dest.match([&](const auto& dest) {
   2175    masm.compareExchange64(Synchronization::Full(), dest, oldval, newval, out);
   2176  });
   2177 }
   2178 
   2179 void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
   2180    LAtomicExchangeTypedArrayElement64* lir) {
   2181  Register elements = ToRegister(lir->elements());
   2182  Register64 value = ToRegister64(lir->value());
   2183  Register64 out = ToOutRegister64(lir);
   2184  Scalar::Type arrayType = lir->mir()->arrayType();
   2185 
   2186  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   2187 
   2188  dest.match([&](const auto& dest) {
   2189    masm.atomicExchange64(Synchronization::Full(), dest, value, out);
   2190  });
   2191 }
   2192 
   2193 void CodeGenerator::visitAtomicTypedArrayElementBinop64(
   2194    LAtomicTypedArrayElementBinop64* lir) {
   2195  MOZ_ASSERT(lir->mir()->hasUses());
   2196 
   2197  Register elements = ToRegister(lir->elements());
   2198  Register64 value = ToRegister64(lir->value());
   2199  Register64 temp = ToRegister64(lir->temp0());
   2200  Register64 out = ToOutRegister64(lir);
   2201 
   2202  Scalar::Type arrayType = lir->mir()->arrayType();
   2203  AtomicOp atomicOp = lir->mir()->operation();
   2204 
   2205  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   2206 
   2207  dest.match([&](const auto& dest) {
   2208    masm.atomicFetchOp64(Synchronization::Full(), atomicOp, value, dest, temp,
   2209                         out);
   2210  });
   2211 }
   2212 
   2213 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
   2214    LAtomicTypedArrayElementBinopForEffect64* lir) {
   2215  MOZ_ASSERT(!lir->mir()->hasUses());
   2216 
   2217  Register elements = ToRegister(lir->elements());
   2218  Register64 value = ToRegister64(lir->value());
   2219  Register64 temp = ToRegister64(lir->temp0());
   2220 
   2221  Scalar::Type arrayType = lir->mir()->arrayType();
   2222  AtomicOp atomicOp = lir->mir()->operation();
   2223 
   2224  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   2225 
   2226  dest.match([&](const auto& dest) {
   2227    masm.atomicEffectOp64(Synchronization::Full(), atomicOp, value, dest, temp);
   2228  });
   2229 }
   2230 
   2231 void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
   2232  Register elements = ToRegister(lir->elements());
   2233  Register64 out = ToOutRegister64(lir);
   2234 
   2235  Scalar::Type storageType = lir->mir()->storageType();
   2236 
   2237  auto source = ToAddressOrBaseIndex(elements, lir->index(), storageType);
   2238 
   2239  auto sync = Synchronization::Load();
   2240  masm.memoryBarrierBefore(sync);
   2241  source.match([&](const auto& source) { masm.load64(source, out); });
   2242  masm.memoryBarrierAfter(sync);
   2243 }
   2244 
   2245 void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
   2246  Register elements = ToRegister(lir->elements());
   2247  Register64 value = ToRegister64(lir->value());
   2248 
   2249  Scalar::Type writeType = lir->mir()->writeType();
   2250 
   2251  auto dest = ToAddressOrBaseIndex(elements, lir->index(), writeType);
   2252 
   2253  auto sync = Synchronization::Store();
   2254  masm.memoryBarrierBefore(sync);
   2255  dest.match([&](const auto& dest) { masm.store64(value, dest); });
   2256  masm.memoryBarrierAfter(sync);
   2257 }
   2258 
   2259 void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir) {
   2260  Register memoryBase = ToRegister(lir->memoryBase());
   2261  Register ptr = ToRegister(lir->ptr());
   2262  Register64 oldValue = ToRegister64(lir->oldValue());
   2263  Register64 newValue = ToRegister64(lir->newValue());
   2264  Register64 output = ToOutRegister64(lir);
   2265  uint32_t offset = lir->mir()->access().offset32();
   2266 
   2267  BaseIndex addr(memoryBase, ptr, TimesOne, offset);
   2268  masm.wasmCompareExchange64(lir->mir()->access(), addr, oldValue, newValue,
   2269                             output);
   2270 }
   2271 
   2272 void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir) {
   2273  Register memoryBase = ToRegister(lir->memoryBase());
   2274  Register ptr = ToRegister(lir->ptr());
   2275  Register64 value = ToRegister64(lir->value());
   2276  Register64 output = ToOutRegister64(lir);
   2277  uint32_t offset = lir->mir()->access().offset32();
   2278 
   2279  BaseIndex addr(memoryBase, ptr, TimesOne, offset);
   2280  masm.wasmAtomicExchange64(lir->mir()->access(), addr, value, output);
   2281 }
   2282 
   2283 void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir) {
   2284  Register memoryBase = ToRegister(lir->memoryBase());
   2285  Register ptr = ToRegister(lir->ptr());
   2286  Register64 value = ToRegister64(lir->value());
   2287  Register64 output = ToOutRegister64(lir);
   2288  Register64 temp = ToRegister64(lir->temp0());
   2289  uint32_t offset = lir->mir()->access().offset32();
   2290 
   2291  BaseIndex addr(memoryBase, ptr, TimesOne, offset);
   2292 
   2293  masm.wasmAtomicFetchOp64(lir->mir()->access(), lir->mir()->operation(), value,
   2294                           addr, temp, output);
   2295 }
   2296 
   2297 void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH("No SIMD"); }
   2298 
   2299 void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
   2300  MOZ_CRASH("No SIMD");
   2301 }
   2302 
   2303 void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
   2304  MOZ_CRASH("No SIMD");
   2305 }
   2306 
   2307 void CodeGenerator::visitWasmBinarySimd128WithConstant(
   2308    LWasmBinarySimd128WithConstant* ins) {
   2309  MOZ_CRASH("No SIMD");
   2310 }
   2311 
   2312 void CodeGenerator::visitWasmVariableShiftSimd128(
   2313    LWasmVariableShiftSimd128* ins) {
   2314  MOZ_CRASH("No SIMD");
   2315 }
   2316 
   2317 void CodeGenerator::visitWasmConstantShiftSimd128(
   2318    LWasmConstantShiftSimd128* ins) {
   2319  MOZ_CRASH("No SIMD");
   2320 }
   2321 
   2322 void CodeGenerator::visitWasmSignReplicationSimd128(
   2323    LWasmSignReplicationSimd128* ins) {
   2324  MOZ_CRASH("No SIMD");
   2325 }
   2326 
   2327 void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
   2328  MOZ_CRASH("No SIMD");
   2329 }
   2330 
   2331 void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
   2332  MOZ_CRASH("No SIMD");
   2333 }
   2334 
   2335 void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
   2336  MOZ_CRASH("No SIMD");
   2337 }
   2338 
   2339 void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
   2340    LWasmReplaceInt64LaneSimd128* ins) {
   2341  MOZ_CRASH("No SIMD");
   2342 }
   2343 
   2344 void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
   2345  MOZ_CRASH("No SIMD");
   2346 }
   2347 
   2348 void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
   2349  MOZ_CRASH("No SIMD");
   2350 }
   2351 
   2352 void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
   2353  MOZ_CRASH("No SIMD");
   2354 }
   2355 
   2356 void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
   2357  MOZ_CRASH("No SIMD");
   2358 }
   2359 
   2360 void CodeGenerator::visitWasmReduceAndBranchSimd128(
   2361    LWasmReduceAndBranchSimd128* ins) {
   2362  MOZ_CRASH("No SIMD");
   2363 }
   2364 
   2365 void CodeGenerator::visitWasmReduceSimd128ToInt64(
   2366    LWasmReduceSimd128ToInt64* ins) {
   2367  MOZ_CRASH("No SIMD");
   2368 }
   2369 
   2370 void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
   2371  MOZ_CRASH("No SIMD");
   2372 }
   2373 
   2374 void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
   2375  MOZ_CRASH("No SIMD");
   2376 }