tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

CodeGenerator-riscv64.cpp (79449B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/riscv64/CodeGenerator-riscv64.h"
      8 
      9 #include "mozilla/MathAlgorithms.h"
     10 
     11 #include "jsnum.h"
     12 
     13 #include "jit/CodeGenerator.h"
     14 #include "jit/InlineScriptTree.h"
     15 #include "jit/JitRuntime.h"
     16 #include "jit/MIR-wasm.h"
     17 #include "jit/MIR.h"
     18 #include "jit/MIRGraph.h"
     19 #include "vm/JSContext.h"
     20 #include "vm/Realm.h"
     21 #include "vm/Shape.h"
     22 
     23 #include "jit/shared/CodeGenerator-shared-inl.h"
     24 #include "vm/JSScript-inl.h"
     25 
     26 using namespace js;
     27 using namespace js::jit;
     28 
     29 using JS::GenericNaN;
     30 using mozilla::NegativeInfinity;
     31 
     32 // shared
     33 CodeGeneratorRiscv64::CodeGeneratorRiscv64(
     34    MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm,
     35    const wasm::CodeMetadata* wasmCodeMeta)
     36    : CodeGeneratorShared(gen, graph, masm, wasmCodeMeta) {}
     37 
     38 Operand CodeGeneratorRiscv64::ToOperand(const LAllocation& a) {
     39  if (a.isGeneralReg()) {
     40    return Operand(a.toGeneralReg()->reg());
     41  }
     42  if (a.isFloatReg()) {
     43    return Operand(a.toFloatReg()->reg());
     44  }
     45  return Operand(ToAddress(a));
     46 }
     47 
     48 Operand CodeGeneratorRiscv64::ToOperand(const LAllocation* a) {
     49  return ToOperand(*a);
     50 }
     51 
     52 Operand CodeGeneratorRiscv64::ToOperand(const LDefinition* def) {
     53  return ToOperand(def->output());
     54 }
     55 
     56 void CodeGeneratorRiscv64::branchToBlock(FloatFormat fmt, FloatRegister lhs,
     57                                         FloatRegister rhs, MBasicBlock* mir,
     58                                         Assembler::DoubleCondition cond) {
     59  // Skip past trivial blocks.
     60  Label* label = skipTrivialBlocks(mir)->lir()->label();
     61  if (fmt == DoubleFloat) {
     62    masm.branchDouble(cond, lhs, rhs, label);
     63  } else {
     64    masm.branchFloat(cond, lhs, rhs, label);
     65  }
     66 }
     67 
     68 MoveOperand CodeGeneratorRiscv64::toMoveOperand(LAllocation a) const {
     69  if (a.isGeneralReg()) {
     70    return MoveOperand(ToRegister(a));
     71  }
     72  if (a.isFloatReg()) {
     73    return MoveOperand(ToFloatRegister(a));
     74  }
     75  MoveOperand::Kind kind = a.isStackArea() ? MoveOperand::Kind::EffectiveAddress
     76                                           : MoveOperand::Kind::Memory;
     77  Address address = ToAddress(a);
     78  MOZ_ASSERT((address.offset & 3) == 0);
     79 
     80  return MoveOperand(address, kind);
     81 }
     82 
     83 void CodeGeneratorRiscv64::bailoutFrom(Label* label, LSnapshot* snapshot) {
     84  MOZ_ASSERT_IF(!masm.oom(), label->used());
     85  MOZ_ASSERT_IF(!masm.oom(), !label->bound());
     86 
     87  encode(snapshot);
     88 
     89  InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
     90  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
     91    // Push snapshotOffset and make sure stack is aligned.
     92    masm.subPtr(Imm32(sizeof(Value)), StackPointer);
     93    masm.storePtr(ImmWord(snapshot->snapshotOffset()),
     94                  Address(StackPointer, 0));
     95 
     96    masm.jump(&deoptLabel_);
     97  });
     98  addOutOfLineCode(ool,
     99                   new (alloc()) BytecodeSite(tree, tree->script()->code()));
    100 
    101  masm.retarget(label, ool->entry());
    102 }
    103 
    104 void CodeGeneratorRiscv64::bailout(LSnapshot* snapshot) {
    105  Label label;
    106  masm.jump(&label);
    107  bailoutFrom(&label, snapshot);
    108 }
    109 
    110 bool CodeGeneratorRiscv64::generateOutOfLineCode() {
    111  if (!CodeGeneratorShared::generateOutOfLineCode()) {
    112    return false;
    113  }
    114 
    115  if (deoptLabel_.used()) {
    116    // All non-table-based bailouts will go here.
    117    masm.bind(&deoptLabel_);
    118 
    119    // Push the frame size, so the handler can recover the IonScript.
    120    // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk
    121    // We have to use 'ra' because generateBailoutTable will implicitly do
    122    // the same.
    123    masm.move32(Imm32(frameSize()), ra);
    124 
    125    TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
    126    masm.jump(handler);
    127  }
    128 
    129  return !masm.oom();
    130 }
    131 
    132 class js::jit::OutOfLineTableSwitch
    133    : public OutOfLineCodeBase<CodeGeneratorRiscv64> {
    134  MTableSwitch* mir_;
    135  CodeLabel jumpLabel_;
    136 
    137  void accept(CodeGeneratorRiscv64* codegen) {
    138    codegen->visitOutOfLineTableSwitch(this);
    139  }
    140 
    141 public:
    142  explicit OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
    143 
    144  MTableSwitch* mir() const { return mir_; }
    145 
    146  CodeLabel* jumpLabel() { return &jumpLabel_; }
    147 };
    148 
    149 void CodeGeneratorRiscv64::emitTableSwitchDispatch(MTableSwitch* mir,
    150                                                   Register index,
    151                                                   Register base) {
    152  Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
    153 
    154  // Lower value with low value
    155  if (mir->low() != 0) {
    156    masm.subPtr(Imm32(mir->low()), index);
    157  }
    158 
    159  // Jump to default case if input is out of range
    160  int32_t cases = mir->numCases();
    161  masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase);
    162 
    163  // To fill in the CodeLabels for the case entries, we need to first
    164  // generate the case entries (we don't yet know their offsets in the
    165  // instruction stream).
    166  OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
    167  addOutOfLineCode(ool, mir);
    168 
    169  // Compute the position where a pointer to the right case stands.
    170  masm.ma_li(base, ool->jumpLabel());
    171 
    172  BaseIndex pointer(base, index, ScalePointer);
    173 
    174  // Jump to the right case
    175  masm.branchToComputedAddress(pointer);
    176 }
    177 
    178 template <typename T>
    179 void CodeGeneratorRiscv64::emitWasmLoad(T* lir) {
    180  const MWasmLoad* mir = lir->mir();
    181  UseScratchRegisterScope temps(&masm);
    182  Register scratch2 = temps.Acquire();
    183 
    184  Register memoryBase = ToRegister(lir->memoryBase());
    185  Register ptr = ToRegister(lir->ptr());
    186  Register ptrScratch = ToTempRegisterOrInvalid(lir->temp0());
    187 
    188  if (mir->base()->type() == MIRType::Int32) {
    189    masm.move32To64ZeroExtend(ptr, Register64(scratch2));
    190    ptr = scratch2;
    191    ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
    192  }
    193 
    194  // ptr is a GPR and is either a 32-bit value zero-extended to 64-bit, or a
    195  // true 64-bit value.
    196  masm.wasmLoad(mir->access(), memoryBase, ptr, ptrScratch,
    197                ToAnyRegister(lir->output()));
    198 }
    199 
    200 template <typename T>
    201 void CodeGeneratorRiscv64::emitWasmStore(T* lir) {
    202  const MWasmStore* mir = lir->mir();
    203  UseScratchRegisterScope temps(&masm);
    204  Register scratch2 = temps.Acquire();
    205 
    206  Register memoryBase = ToRegister(lir->memoryBase());
    207  Register ptr = ToRegister(lir->ptr());
    208  Register ptrScratch = ToTempRegisterOrInvalid(lir->temp0());
    209 
    210  if (mir->base()->type() == MIRType::Int32) {
    211    masm.move32To64ZeroExtend(ptr, Register64(scratch2));
    212    ptr = scratch2;
    213    ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
    214  }
    215 
    216  // ptr is a GPR and is either a 32-bit value zero-extended to 64-bit, or a
    217  // true 64-bit value.
    218  masm.wasmStore(mir->access(), ToAnyRegister(lir->value()), memoryBase, ptr,
    219                 ptrScratch);
    220 }
    221 
    222 void CodeGeneratorRiscv64::generateInvalidateEpilogue() {
    223  // Ensure that there is enough space in the buffer for the OsiPoint
    224  // patching to occur. Otherwise, we could overwrite the invalidation
    225  // epilogue
    226  for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
    227    masm.nop();
    228  }
    229 
    230  masm.bind(&invalidate_);
    231 
    232  // Push the return address of the point that we bailed out at to the stack
    233  masm.Push(ra);
    234 
    235  // Push the Ion script onto the stack (when we determine what that
    236  // pointer is).
    237  invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
    238 
    239  // Jump to the invalidator which will replace the current frame.
    240  TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
    241 
    242  masm.jump(thunk);
    243 }
    244 
    245 void CodeGeneratorRiscv64::visitOutOfLineTableSwitch(
    246    OutOfLineTableSwitch* ool) {
    247  MTableSwitch* mir = ool->mir();
    248  masm.nop();
    249  masm.haltingAlign(sizeof(void*));
    250  masm.bind(ool->jumpLabel());
    251  masm.addCodeLabel(*ool->jumpLabel());
    252  BlockTrampolinePoolScope block_trampoline_pool(
    253      &masm, mir->numCases() * sizeof(uint64_t));
    254  for (size_t i = 0; i < mir->numCases(); i++) {
    255    LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
    256    Label* caseheader = caseblock->label();
    257    uint32_t caseoffset = caseheader->offset();
    258 
    259    // The entries of the jump table need to be absolute addresses and thus
    260    // must be patched after codegen is finished.
    261    CodeLabel cl;
    262    masm.writeCodePointer(&cl);
    263    cl.target()->bind(caseoffset);
    264    masm.addCodeLabel(cl);
    265  }
    266 }
    267 
    268 void CodeGeneratorRiscv64::visitOutOfLineWasmTruncateCheck(
    269    OutOfLineWasmTruncateCheck* ool) {
    270  MOZ_ASSERT(!ool->isSaturating(),
    271             "saturating case doesn't require an OOL path");
    272 
    273  FloatRegister input = ool->input();
    274  Register output = ool->output();
    275  Register64 output64 = ool->output64();
    276  MIRType fromType = ool->fromType();
    277  MIRType toType = ool->toType();
    278  Label* oolRejoin = ool->rejoin();
    279  TruncFlags flags = ool->flags();
    280  wasm::TrapSiteDesc off = ool->trapSiteDesc();
    281 
    282  if (fromType == MIRType::Float32) {
    283    if (toType == MIRType::Int32) {
    284      masm.oolWasmTruncateCheckF32ToI32(input, output, flags, off, oolRejoin);
    285    } else if (toType == MIRType::Int64) {
    286      masm.oolWasmTruncateCheckF32ToI64(input, output64, flags, off, oolRejoin);
    287    } else {
    288      MOZ_CRASH("unexpected type");
    289    }
    290  } else if (fromType == MIRType::Double) {
    291    if (toType == MIRType::Int32) {
    292      masm.oolWasmTruncateCheckF64ToI32(input, output, flags, off, oolRejoin);
    293    } else if (toType == MIRType::Int64) {
    294      masm.oolWasmTruncateCheckF64ToI64(input, output64, flags, off, oolRejoin);
    295    } else {
    296      MOZ_CRASH("unexpected type");
    297    }
    298  } else {
    299    MOZ_CRASH("unexpected type");
    300  }
    301 
    302  // The OOL path is only used to execute the correct trap.
    303  MOZ_ASSERT(!oolRejoin->bound(), "ool path doesn't return");
    304 }
    305 
    306 void CodeGenerator::visitBox(LBox* box) {
    307  const LAllocation* in = box->payload();
    308  ValueOperand result = ToOutValue(box);
    309 
    310  masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
    311 }
    312 
    313 void CodeGenerator::visitUnbox(LUnbox* unbox) {
    314  MUnbox* mir = unbox->mir();
    315 
    316  Register result = ToRegister(unbox->output());
    317 
    318  if (mir->fallible()) {
    319    ValueOperand value = ToValue(unbox->input());
    320    Label bail;
    321    switch (mir->type()) {
    322      case MIRType::Int32:
    323        masm.fallibleUnboxInt32(value, result, &bail);
    324        break;
    325      case MIRType::Boolean:
    326        masm.fallibleUnboxBoolean(value, result, &bail);
    327        break;
    328      case MIRType::Object:
    329        masm.fallibleUnboxObject(value, result, &bail);
    330        break;
    331      case MIRType::String:
    332        masm.fallibleUnboxString(value, result, &bail);
    333        break;
    334      case MIRType::Symbol:
    335        masm.fallibleUnboxSymbol(value, result, &bail);
    336        break;
    337      case MIRType::BigInt:
    338        masm.fallibleUnboxBigInt(value, result, &bail);
    339        break;
    340      default:
    341        MOZ_CRASH("Given MIRType cannot be unboxed.");
    342    }
    343    bailoutFrom(&bail, unbox->snapshot());
    344    return;
    345  }
    346 
    347  LAllocation* input = unbox->getOperand(LUnbox::Input);
    348  if (input->isGeneralReg()) {
    349    Register inputReg = ToRegister(input);
    350    switch (mir->type()) {
    351      case MIRType::Int32:
    352        masm.unboxInt32(inputReg, result);
    353        break;
    354      case MIRType::Boolean:
    355        masm.unboxBoolean(inputReg, result);
    356        break;
    357      case MIRType::Object:
    358        masm.unboxObject(inputReg, result);
    359        break;
    360      case MIRType::String:
    361        masm.unboxString(inputReg, result);
    362        break;
    363      case MIRType::Symbol:
    364        masm.unboxSymbol(inputReg, result);
    365        break;
    366      case MIRType::BigInt:
    367        masm.unboxBigInt(inputReg, result);
    368        break;
    369      default:
    370        MOZ_CRASH("Given MIRType cannot be unboxed.");
    371    }
    372    return;
    373  }
    374 
    375  Address inputAddr = ToAddress(input);
    376  switch (mir->type()) {
    377    case MIRType::Int32:
    378      masm.unboxInt32(inputAddr, result);
    379      break;
    380    case MIRType::Boolean:
    381      masm.unboxBoolean(inputAddr, result);
    382      break;
    383    case MIRType::Object:
    384      masm.unboxObject(inputAddr, result);
    385      break;
    386    case MIRType::String:
    387      masm.unboxString(inputAddr, result);
    388      break;
    389    case MIRType::Symbol:
    390      masm.unboxSymbol(inputAddr, result);
    391      break;
    392    case MIRType::BigInt:
    393      masm.unboxBigInt(inputAddr, result);
    394      break;
    395    default:
    396      MOZ_CRASH("Given MIRType cannot be unboxed.");
    397  }
    398 }
    399 
    400 void CodeGeneratorRiscv64::emitBigIntPtrDiv(LBigIntPtrDiv* ins,
    401                                            Register dividend, Register divisor,
    402                                            Register output) {
    403  masm.ma_div64(output, dividend, divisor);
    404 }
    405 
    406 void CodeGeneratorRiscv64::emitBigIntPtrMod(LBigIntPtrMod* ins,
    407                                            Register dividend, Register divisor,
    408                                            Register output) {
    409  masm.ma_mod64(output, dividend, divisor);
    410 }
    411 
    412 template <class LIR>
    413 static void TrapIfDivideByZero(MacroAssembler& masm, LIR* lir, Register rhs) {
    414  auto* mir = lir->mir();
    415  MOZ_ASSERT(mir->trapOnError());
    416 
    417  if (mir->canBeDivideByZero()) {
    418    Label nonZero;
    419    masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero, ShortJump);
    420    masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->trapSiteDesc());
    421    masm.bind(&nonZero);
    422  }
    423 }
    424 
    425 void CodeGenerator::visitDivI64(LDivI64* lir) {
    426  Register lhs = ToRegister(lir->lhs());
    427  Register rhs = ToRegister(lir->rhs());
    428  Register output = ToRegister(lir->output());
    429 
    430  MDiv* div = lir->mir();
    431 
    432  // Handle divide by zero.
    433  TrapIfDivideByZero(masm, lir, rhs);
    434 
    435  // Handle an integer overflow exception from INT64_MIN / -1.
    436  if (div->canBeNegativeOverflow()) {
    437    Label notOverflow;
    438    masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notOverflow);
    439    masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notOverflow);
    440    masm.wasmTrap(wasm::Trap::IntegerOverflow, div->trapSiteDesc());
    441    masm.bind(&notOverflow);
    442  }
    443 
    444  masm.ma_div64(output, lhs, rhs);
    445 }
    446 
    447 void CodeGenerator::visitModI64(LModI64* lir) {
    448  Register lhs = ToRegister(lir->lhs());
    449  Register rhs = ToRegister(lir->rhs());
    450  Register output = ToRegister(lir->output());
    451 
    452  // rem result table:
    453  // --------------------------------
    454  // | Dividend  | Divisor | Result |
    455  // |------------------------------|
    456  // |    X      |    0    |   X    |
    457  // | INT64_MIN |   -1    |   0    |
    458  // --------------------------------
    459  //
    460  // NOTE: INT64_MIN % -1 returns 0, which is the expected result.
    461 
    462  // Handle divide by zero.
    463  TrapIfDivideByZero(masm, lir, rhs);
    464 
    465  masm.ma_mod64(output, lhs, rhs);
    466 }
    467 
    468 void CodeGenerator::visitUDivI64(LUDivI64* lir) {
    469  Register lhs = ToRegister(lir->lhs());
    470  Register rhs = ToRegister(lir->rhs());
    471  Register output = ToRegister(lir->output());
    472 
    473  // Prevent divide by zero.
    474  TrapIfDivideByZero(masm, lir, rhs);
    475 
    476  masm.ma_divu64(output, lhs, rhs);
    477 }
    478 
    479 void CodeGenerator::visitUModI64(LUModI64* lir) {
    480  Register lhs = ToRegister(lir->lhs());
    481  Register rhs = ToRegister(lir->rhs());
    482  Register output = ToRegister(lir->output());
    483 
    484  // Prevent divide by zero.
    485  TrapIfDivideByZero(masm, lir, rhs);
    486 
    487  masm.ma_modu64(output, lhs, rhs);
    488 }
    489 
    490 void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) {
    491  const MWasmLoad* mir = lir->mir();
    492 
    493  Register memoryBase = ToRegister(lir->memoryBase());
    494  Register ptrScratch = ToTempRegisterOrInvalid(lir->temp0());
    495 
    496  Register ptrReg = ToRegister(lir->ptr());
    497  if (mir->base()->type() == MIRType::Int32) {
    498    // See comment in visitWasmLoad re the type of 'base'.
    499    masm.move32ZeroExtendToPtr(ptrReg, ptrReg);
    500  }
    501 
    502  masm.wasmLoadI64(mir->access(), memoryBase, ptrReg, ptrScratch,
    503                   ToOutRegister64(lir));
    504 }
    505 
    506 void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
    507  const MWasmStore* mir = lir->mir();
    508 
    509  Register memoryBase = ToRegister(lir->memoryBase());
    510  Register ptrScratch = ToTempRegisterOrInvalid(lir->temp0());
    511 
    512  Register ptrReg = ToRegister(lir->ptr());
    513  if (mir->base()->type() == MIRType::Int32) {
    514    // See comment in visitWasmLoad re the type of 'base'.
    515    masm.move32ZeroExtendToPtr(ptrReg, ptrReg);
    516  }
    517 
    518  masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), memoryBase,
    519                    ptrReg, ptrScratch);
    520 }
    521 
    522 void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
    523  MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
    524 
    525  Register cond = ToRegister(lir->condExpr());
    526  LInt64Allocation falseExpr = lir->falseExpr();
    527 
    528  Register64 out = ToOutRegister64(lir);
    529  MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
    530             "true expr is reused for input");
    531 
    532  if (falseExpr.value().isGeneralReg()) {
    533    masm.moveIfZero(out.reg, ToRegister(falseExpr.value()), cond);
    534  } else {
    535    Label done;
    536    masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
    537    masm.loadPtr(ToAddress(falseExpr.value()), out.reg);
    538    masm.bind(&done);
    539  }
    540 }
    541 
    542 void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
    543  const LAllocation* input = lir->input();
    544  Register output = ToRegister(lir->output());
    545 
    546  if (lir->mir()->isUnsigned()) {
    547    masm.move32To64ZeroExtend(ToRegister(input), Register64(output));
    548  } else {
    549    masm.SignExtendWord(output, ToRegister(input));
    550  }
    551 }
    552 
    553 void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
    554  LInt64Allocation input = lir->input();
    555  Register output = ToRegister(lir->output());
    556 
    557  if (lir->mir()->bottomHalf()) {
    558    if (input.value().isMemory()) {
    559      masm.load32(ToAddress(input), output);
    560    } else {
    561      masm.move64To32(ToRegister64(input), output);
    562    }
    563  } else {
    564    MOZ_CRASH("Not implemented.");
    565  }
    566 }
    567 
    568 void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
    569  Register64 input = ToRegister64(lir->input());
    570  Register64 output = ToOutRegister64(lir);
    571  switch (lir->mir()->mode()) {
    572    case MSignExtendInt64::Byte:
    573      masm.move32To64SignExtend(input.reg, output);
    574      masm.move8SignExtend(output.reg, output.reg);
    575      break;
    576    case MSignExtendInt64::Half:
    577      masm.move32To64SignExtend(input.reg, output);
    578      masm.move16SignExtend(output.reg, output.reg);
    579      break;
    580    case MSignExtendInt64::Word:
    581      masm.move32To64SignExtend(input.reg, output);
    582      break;
    583  }
    584 }
    585 
    586 void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index* lir) {
    587  Register input = ToRegister(lir->input());
    588  Register output = ToRegister(lir->output());
    589  MOZ_ASSERT(input == output);
    590  masm.move32To64ZeroExtend(input, Register64(output));
    591 }
    592 
    593 void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
    594  Register input = ToRegister(lir->input());
    595  Register output = ToRegister(lir->output());
    596  MOZ_ASSERT(input == output);
    597  masm.move64To32(Register64(input), output);
    598 }
    599 
    600 void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
    601  FloatRegister input = ToFloatRegister(lir->input());
    602  Register64 output = ToOutRegister64(lir);
    603 
    604  MWasmTruncateToInt64* mir = lir->mir();
    605  MIRType fromType = mir->input()->type();
    606 
    607  MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
    608 
    609  bool isSaturating = mir->isSaturating();
    610 
    611  // RISCV saturating instructions don't require an OOL path.
    612  OutOfLineWasmTruncateCheck* ool = nullptr;
    613  Label* oolEntry = nullptr;
    614  Label* oolRejoin = nullptr;
    615  if (!isSaturating) {
    616    ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
    617    addOutOfLineCode(ool, mir);
    618 
    619    oolEntry = ool->entry();
    620    oolRejoin = ool->rejoin();
    621  }
    622 
    623  if (fromType == MIRType::Double) {
    624    if (mir->isUnsigned()) {
    625      masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, oolEntry,
    626                                      oolRejoin, InvalidFloatReg);
    627    } else {
    628      masm.wasmTruncateDoubleToInt64(input, output, isSaturating, oolEntry,
    629                                     oolRejoin, InvalidFloatReg);
    630    }
    631  } else {
    632    if (mir->isUnsigned()) {
    633      masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating, oolEntry,
    634                                       oolRejoin, InvalidFloatReg);
    635    } else {
    636      masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, oolEntry,
    637                                      oolRejoin, InvalidFloatReg);
    638    }
    639  }
    640 
    641  // RISCV can handle all success case. The OOL path is only used to execute
    642  // the correct trap.
    643  MOZ_ASSERT(!ool || !ool->rejoin()->bound(), "ool path doesn't return");
    644 }
    645 
    646 void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
    647  Register64 input = ToRegister64(lir->input());
    648  FloatRegister output = ToFloatRegister(lir->output());
    649 
    650  MIRType outputType = lir->mir()->type();
    651  MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
    652 
    653  if (outputType == MIRType::Double) {
    654    if (lir->mir()->isUnsigned()) {
    655      masm.convertUInt64ToDouble(input, output, Register::Invalid());
    656    } else {
    657      masm.convertInt64ToDouble(input, output);
    658    }
    659  } else {
    660    if (lir->mir()->isUnsigned()) {
    661      masm.convertUInt64ToFloat32(input, output, Register::Invalid());
    662    } else {
    663      masm.convertInt64ToFloat32(input, output);
    664    }
    665  }
    666 }
    667 
    668 void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
    669  FloatRegister first = ToFloatRegister(ins->first());
    670  FloatRegister second = ToFloatRegister(ins->second());
    671 
    672  MOZ_ASSERT(first == ToFloatRegister(ins->output()));
    673 
    674  if (ins->mir()->isMax()) {
    675    masm.maxDouble(second, first, true);
    676  } else {
    677    masm.minDouble(second, first, true);
    678  }
    679 }
    680 
    681 void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
    682  FloatRegister first = ToFloatRegister(ins->first());
    683  FloatRegister second = ToFloatRegister(ins->second());
    684 
    685  MOZ_ASSERT(first == ToFloatRegister(ins->output()));
    686 
    687  if (ins->mir()->isMax()) {
    688    masm.maxFloat32(second, first, true);
    689  } else {
    690    masm.minFloat32(second, first, true);
    691  }
    692 }
    693 
    694 void CodeGenerator::visitAddI(LAddI* ins) {
    695  const LAllocation* lhs = ins->lhs();
    696  const LAllocation* rhs = ins->rhs();
    697  const LDefinition* dest = ins->output();
    698 
    699  MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
    700 
    701  // If there is no snapshot, we don't need to check for overflow
    702  if (!ins->snapshot()) {
    703    if (rhs->isConstant()) {
    704      masm.ma_add32(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
    705    } else {
    706      masm.addw(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
    707    }
    708    return;
    709  }
    710 
    711  Label overflow;
    712  if (rhs->isConstant()) {
    713    masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
    714                              Imm32(ToInt32(rhs)), &overflow);
    715  } else {
    716    masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
    717                              ToRegister(rhs), &overflow);
    718  }
    719 
    720  bailoutFrom(&overflow, ins->snapshot());
    721 }
    722 
    723 void CodeGenerator::visitAddIntPtr(LAddIntPtr* ins) {
    724  Register lhs = ToRegister(ins->lhs());
    725  const LAllocation* rhs = ins->rhs();
    726  Register dest = ToRegister(ins->output());
    727 
    728  if (rhs->isConstant()) {
    729    masm.ma_add64(dest, lhs, Operand(ToIntPtr(rhs)));
    730  } else {
    731    masm.ma_add64(dest, lhs, ToRegister(rhs));
    732  }
    733 }
    734 
    735 void CodeGenerator::visitAddI64(LAddI64* lir) {
    736  Register lhs = ToRegister64(lir->lhs()).reg;
    737  LInt64Allocation rhs = lir->rhs();
    738  Register dest = ToOutRegister64(lir).reg;
    739 
    740  if (IsConstant(rhs)) {
    741    masm.ma_add64(dest, lhs, Operand(ToInt64(rhs)));
    742  } else {
    743    masm.ma_add64(dest, lhs, ToRegister64(rhs).reg);
    744  }
    745 }
    746 
    747 void CodeGenerator::visitSubI(LSubI* ins) {
    748  const LAllocation* lhs = ins->lhs();
    749  const LAllocation* rhs = ins->rhs();
    750  const LDefinition* dest = ins->output();
    751 
    752  MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
    753 
    754  // If there is no snapshot, we don't need to check for overflow
    755 
    756  if (!ins->snapshot()) {
    757    if (rhs->isConstant()) {
    758      masm.ma_sub32(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
    759    } else {
    760      masm.ma_sub32(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
    761    }
    762    return;
    763  }
    764 
    765  Label overflow;
    766  if (rhs->isConstant()) {
    767    masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
    768                              Imm32(ToInt32(rhs)), &overflow);
    769  } else {
    770    masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
    771                              ToRegister(rhs), &overflow);
    772  }
    773 
    774  bailoutFrom(&overflow, ins->snapshot());
    775 }
    776 
    777 void CodeGenerator::visitSubIntPtr(LSubIntPtr* ins) {
    778  Register lhs = ToRegister(ins->lhs());
    779  const LAllocation* rhs = ins->rhs();
    780  Register dest = ToRegister(ins->output());
    781 
    782  if (rhs->isConstant()) {
    783    masm.ma_sub64(dest, lhs, Operand(ToIntPtr(rhs)));
    784  } else {
    785    masm.ma_sub64(dest, lhs, ToRegister(rhs));
    786  }
    787 }
    788 
    789 void CodeGenerator::visitSubI64(LSubI64* lir) {
    790  Register lhs = ToRegister64(lir->lhs()).reg;
    791  LInt64Allocation rhs = lir->rhs();
    792  Register dest = ToOutRegister64(lir).reg;
    793 
    794  if (IsConstant(rhs)) {
    795    masm.ma_sub64(dest, lhs, Operand(ToInt64(rhs)));
    796  } else {
    797    masm.ma_sub64(dest, lhs, ToRegister64(rhs).reg);
    798  }
    799 }
    800 
    801 void CodeGenerator::visitMulI(LMulI* ins) {
    802  Register lhs = ToRegister(ins->lhs());
    803  const LAllocation* rhs = ins->rhs();
    804  Register dest = ToRegister(ins->output());
    805  MMul* mul = ins->mir();
    806 
    807  MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
    808                !mul->canBeNegativeZero() && !mul->canOverflow());
    809 
    810  if (rhs->isConstant()) {
    811    int32_t constant = ToInt32(rhs);
    812 
    813    // Bailout on -0.0
    814    if (mul->canBeNegativeZero() && constant <= 0) {
    815      Assembler::Condition cond =
    816          (constant == 0) ? Assembler::LessThan : Assembler::Equal;
    817      bailoutCmp32(cond, lhs, Imm32(0), ins->snapshot());
    818    }
    819 
    820    switch (constant) {
    821      case -1:
    822        if (mul->canOverflow()) {
    823          bailoutCmp32(Assembler::Equal, lhs, Imm32(INT32_MIN),
    824                       ins->snapshot());
    825        }
    826 
    827        masm.negw(dest, lhs);
    828        return;
    829      case 0:
    830        masm.move32(zero, dest);
    831        return;
    832      case 1:
    833        masm.move32(lhs, dest);
    834        return;
    835      case 2:
    836        if (mul->canOverflow()) {
    837          Label mulTwoOverflow;
    838          masm.ma_add32TestOverflow(dest, lhs, lhs, &mulTwoOverflow);
    839 
    840          bailoutFrom(&mulTwoOverflow, ins->snapshot());
    841        } else {
    842          masm.addw(dest, lhs, lhs);
    843        }
    844        return;
    845    }
    846 
    847    if (constant > 0) {
    848      uint32_t shift = mozilla::FloorLog2(constant);
    849 
    850      if (!mul->canOverflow()) {
    851        // If it cannot overflow, we can do lots of optimizations.
    852 
    853        // See if the constant has one bit set, meaning it can be
    854        // encoded as a bitshift.
    855        if ((1 << shift) == constant) {
    856          masm.slliw(dest, lhs, shift);
    857          return;
    858        }
    859 
    860        // If the constant cannot be encoded as (1<<C1), see if it can
    861        // be encoded as (1<<C1) | (1<<C2), which can be computed
    862        // using an add and a shift.
    863        uint32_t rest = constant - (1 << shift);
    864        uint32_t shift_rest = mozilla::FloorLog2(rest);
    865        if ((1u << shift_rest) == rest) {
    866          UseScratchRegisterScope temps(masm);
    867          Register scratch = temps.Acquire();
    868 
    869          masm.slliw(scratch, lhs, (shift - shift_rest));
    870          masm.addw(dest, scratch, lhs);
    871          if (shift_rest != 0) {
    872            masm.slliw(dest, dest, shift_rest);
    873          }
    874          return;
    875        }
    876      } else {
    877        // To stay on the safe side, only optimize things that are a power of 2.
    878        if ((1 << shift) == constant) {
    879          UseScratchRegisterScope temps(&masm);
    880          Register scratch = temps.Acquire();
    881 
    882          // dest = lhs * pow(2, shift)
    883          masm.slli(dest, lhs, shift);
    884 
    885          // At runtime, check (dest >> shift == intptr_t(dest) >> shift), if
    886          // this does not hold, some bits were lost due to overflow, and the
    887          // computation should be resumed as a double.
    888          masm.sext_w(scratch, dest);
    889          bailoutCmp32(Assembler::NotEqual, dest, scratch, ins->snapshot());
    890          return;
    891        }
    892      }
    893    }
    894 
    895    if (mul->canOverflow()) {
    896      Label mulConstOverflow;
    897      masm.ma_mul32TestOverflow(dest, lhs, Imm32(constant), &mulConstOverflow);
    898 
    899      bailoutFrom(&mulConstOverflow, ins->snapshot());
    900    } else {
    901      masm.ma_mul32(dest, lhs, Imm32(constant));
    902    }
    903  } else {
    904    if (mul->canOverflow()) {
    905      Label multRegOverflow;
    906      masm.ma_mul32TestOverflow(dest, lhs, ToRegister(rhs), &multRegOverflow);
    907 
    908      bailoutFrom(&multRegOverflow, ins->snapshot());
    909    } else {
    910      masm.mulw(dest, lhs, ToRegister(rhs));
    911    }
    912 
    913    if (mul->canBeNegativeZero()) {
    914      Label done;
    915      masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump);
    916 
    917      // Result is -0 if lhs or rhs is negative.
    918      // In that case result must be double value so bailout
    919      UseScratchRegisterScope temps(&masm);
    920      Register scratch = temps.Acquire();
    921      masm.or_(scratch, lhs, ToRegister(rhs));
    922      bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot());
    923 
    924      masm.bind(&done);
    925    }
    926  }
    927 }
    928 
    929 void CodeGeneratorRiscv64::emitMulI64(Register lhs, int64_t rhs,
    930                                      Register dest) {
    931  switch (rhs) {
    932    case -1:
    933      masm.neg(dest, lhs);
    934      return;
    935    case 0:
    936      masm.movePtr(zero, dest);
    937      return;
    938    case 1:
    939      if (dest != lhs) {
    940        masm.movePtr(lhs, dest);
    941      }
    942      return;
    943    case 2:
    944      masm.add(dest, lhs, lhs);
    945      return;
    946  }
    947 
    948  if (rhs > 0) {
    949    if (mozilla::IsPowerOfTwo(static_cast<uint64_t>(rhs + 1))) {
    950      int32_t shift = mozilla::FloorLog2(rhs + 1);
    951 
    952      UseScratchRegisterScope temps(&masm);
    953      Register savedLhs = lhs;
    954      if (dest == lhs) {
    955        savedLhs = temps.Acquire();
    956        masm.mv(savedLhs, lhs);
    957      }
    958      masm.slli(dest, lhs, shift);
    959      masm.sub(dest, dest, savedLhs);
    960      return;
    961    }
    962 
    963    if (mozilla::IsPowerOfTwo(static_cast<uint64_t>(rhs - 1))) {
    964      int32_t shift = mozilla::FloorLog2(rhs - 1);
    965 
    966      UseScratchRegisterScope temps(&masm);
    967      Register savedLhs = lhs;
    968      if (dest == lhs) {
    969        savedLhs = temps.Acquire();
    970        masm.mv(savedLhs, lhs);
    971      }
    972      masm.slli(dest, lhs, shift);
    973      masm.add(dest, dest, savedLhs);
    974      return;
    975    }
    976 
    977    // Use shift if constant is power of 2.
    978    uint8_t shift = mozilla::FloorLog2(rhs);
    979    if (int64_t(1) << shift == rhs) {
    980      masm.slli(dest, lhs, shift);
    981      return;
    982    }
    983  }
    984 
    985  UseScratchRegisterScope temps(&masm);
    986  Register scratch = temps.Acquire();
    987  masm.ma_li(scratch, Imm64(rhs));
    988  masm.mul(dest, lhs, scratch);
    989 }
    990 
    991 void CodeGenerator::visitMulIntPtr(LMulIntPtr* ins) {
    992  Register lhs = ToRegister(ins->lhs());
    993  const LAllocation* rhs = ins->rhs();
    994  Register dest = ToRegister(ins->output());
    995 
    996  if (rhs->isConstant()) {
    997    emitMulI64(lhs, ToIntPtr(rhs), dest);
    998  } else {
    999    masm.mul(dest, lhs, ToRegister(rhs));
   1000  }
   1001 }
   1002 
   1003 void CodeGenerator::visitMulI64(LMulI64* lir) {
   1004  Register lhs = ToRegister64(lir->lhs()).reg;
   1005  LInt64Allocation rhs = lir->rhs();
   1006  Register dest = ToOutRegister64(lir).reg;
   1007 
   1008  if (IsConstant(rhs)) {
   1009    emitMulI64(lhs, ToInt64(rhs), dest);
   1010  } else {
   1011    masm.mul(dest, lhs, ToRegister64(rhs).reg);
   1012  }
   1013 }
   1014 
   1015 void CodeGenerator::visitDivI(LDivI* ins) {
   1016  Register lhs = ToRegister(ins->lhs());
   1017  Register rhs = ToRegister(ins->rhs());
   1018  Register dest = ToRegister(ins->output());
   1019  MDiv* mir = ins->mir();
   1020 
   1021  // divw result table:
   1022  // ------------------------------------
   1023  // | Dividend   | Divisor |   Result  |
   1024  // |----------------------------------|
   1025  // |    X       |    0    |    -1     |
   1026  // | INT32_MIN  |   -1    | INT32_MIN |
   1027  // ------------------------------------
   1028  //
   1029  // NOTE: INT32_MIN / -1 returns INT32_MIN, which is the expected (truncated)
   1030  // result. Division by zero returns -1, whereas the truncated result should
   1031  // be 0, so it needs to be handled explicitly.
   1032 
   1033  Label done;
   1034 
   1035  // Handle divide by zero.
   1036  if (mir->canBeDivideByZero()) {
   1037    if (mir->trapOnError()) {
   1038      TrapIfDivideByZero(masm, ins, rhs);
   1039    } else if (mir->canTruncateInfinities()) {
   1040      // Truncated division by zero is zero (Infinity|0 == 0)
   1041      Label notzero;
   1042      masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
   1043      masm.move32(Imm32(0), dest);
   1044      masm.ma_branch(&done, ShortJump);
   1045      masm.bind(&notzero);
   1046    } else {
   1047      MOZ_ASSERT(mir->fallible());
   1048      bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot());
   1049    }
   1050  }
   1051 
   1052  // Handle an integer overflow from (INT32_MIN / -1).
   1053  // The integer division gives INT32_MIN, but should be -(double)INT32_MIN.
   1054  if (mir->canBeNegativeOverflow() &&
   1055      (mir->trapOnError() || !mir->canTruncateOverflow())) {
   1056    Label notMinInt;
   1057    masm.ma_b(lhs, Imm32(INT32_MIN), &notMinInt, Assembler::NotEqual,
   1058              ShortJump);
   1059 
   1060    if (mir->trapOnError()) {
   1061      Label ok;
   1062      masm.ma_b(rhs, Imm32(-1), &ok, Assembler::NotEqual, ShortJump);
   1063      masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->trapSiteDesc());
   1064      masm.bind(&ok);
   1065    } else {
   1066      MOZ_ASSERT(mir->fallible());
   1067      bailoutCmp32(Assembler::Equal, rhs, Imm32(-1), ins->snapshot());
   1068    }
   1069    masm.bind(&notMinInt);
   1070  }
   1071 
   1072  // Handle negative zero: lhs == 0 && rhs < 0.
   1073  if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
   1074    Label nonzero;
   1075    masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump);
   1076    bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot());
   1077    masm.bind(&nonzero);
   1078  }
   1079 
   1080  // All regular. Lets call div.
   1081  if (mir->canTruncateRemainder()) {
   1082    masm.ma_div32(dest, lhs, rhs);
   1083  } else {
   1084    MOZ_ASSERT(mir->fallible());
   1085    MOZ_ASSERT(lhs != dest && rhs != dest);
   1086 
   1087    UseScratchRegisterScope temps(masm);
   1088    Register temp = temps.Acquire();
   1089 
   1090    // The recommended code sequence to obtain both the quotient and remainder
   1091    // is div[u] followed by mod[u].
   1092    masm.ma_div32(dest, lhs, rhs);
   1093    masm.ma_mod32(temp, lhs, rhs);
   1094 
   1095    // If the remainder is != 0, bailout since this must be a double.
   1096    bailoutCmp32(Assembler::NonZero, temp, temp, ins->snapshot());
   1097  }
   1098 
   1099  masm.bind(&done);
   1100 }
   1101 
   1102 void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
   1103  Register lhs = ToRegister(ins->numerator());
   1104  Register dest = ToRegister(ins->output());
   1105  int32_t shift = ins->shift();
   1106  MOZ_ASSERT(0 <= shift && shift <= 31);
   1107 
   1108  if (shift != 0) {
   1109    UseScratchRegisterScope temps(masm);
   1110    Register tmp = temps.Acquire();
   1111 
   1112    MDiv* mir = ins->mir();
   1113    if (!mir->isTruncated()) {
   1114      // If the remainder is going to be != 0, bailout since this must
   1115      // be a double.
   1116      masm.slliw(tmp, lhs, (32 - shift));
   1117      bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot());
   1118    }
   1119 
   1120    if (!mir->canBeNegativeDividend()) {
   1121      // Numerator is unsigned, so needs no adjusting. Do the shift.
   1122      masm.sraiw(dest, lhs, shift);
   1123      return;
   1124    }
   1125 
   1126    // Adjust the value so that shifting produces a correctly rounded result
   1127    // when the numerator is negative. See 10-1 "Signed Division by a Known
   1128    // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
   1129    if (shift > 1) {
   1130      masm.sraiw(tmp, lhs, 31);
   1131      masm.srliw(tmp, tmp, (32 - shift));
   1132      masm.add32(lhs, tmp);
   1133    } else {
   1134      masm.srliw(tmp, lhs, (32 - shift));
   1135      masm.add32(lhs, tmp);
   1136    }
   1137 
   1138    // Do the shift.
   1139    masm.sraiw(dest, tmp, shift);
   1140  } else {
   1141    masm.move32(lhs, dest);
   1142  }
   1143 }
   1144 
   1145 void CodeGenerator::visitModI(LModI* ins) {
   1146  Register lhs = ToRegister(ins->lhs());
   1147  Register rhs = ToRegister(ins->rhs());
   1148  Register dest = ToRegister(ins->output());
   1149  MMod* mir = ins->mir();
   1150  Label done;
   1151 
   1152  // remw result table:
   1153  // --------------------------------
   1154  // | Dividend  | Divisor | Result |
   1155  // |------------------------------|
   1156  // |    X      |    0    |   X    |
   1157  // | INT32_MIN |   -1    |   0    |
   1158  // --------------------------------
   1159  //
   1160  // NOTE: INT32_MIN % -1 returns 0, which is the expected result.
   1161 
   1162  // Prevent divide by zero.
   1163  if (mir->canBeDivideByZero()) {
   1164    if (mir->trapOnError()) {
   1165      TrapIfDivideByZero(masm, ins, rhs);
   1166    } else if (mir->isTruncated()) {
   1167      // Truncated division by zero yields integer zero.
   1168      Label yNonZero;
   1169      masm.ma_b(rhs, Imm32(0), &yNonZero, Assembler::NotEqual, ShortJump);
   1170      masm.move32(Imm32(0), dest);
   1171      masm.ma_branch(&done, ShortJump);
   1172      masm.bind(&yNonZero);
   1173    } else {
   1174      // Non-truncated division by zero produces a non-integer.
   1175      MOZ_ASSERT(mir->fallible());
   1176      bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot());
   1177    }
   1178  }
   1179 
   1180  masm.ma_mod32(dest, lhs, rhs);
   1181 
   1182  if (mir->canBeNegativeDividend() && !mir->isTruncated()) {
   1183    MOZ_ASSERT(mir->fallible());
   1184    MOZ_ASSERT(lhs != dest);
   1185 
   1186    // If dest == 0 and lhs < 0, then the result should be double -0.0.
   1187    // Note that this guard handles lhs == INT_MIN and rhs == -1.
   1188 
   1189    masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump);
   1190    bailoutCmp32(Assembler::Signed, lhs, lhs, ins->snapshot());
   1191  }
   1192  masm.bind(&done);
   1193 }
   1194 
   1195 void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
   1196  Register in = ToRegister(ins->input());
   1197  Register out = ToRegister(ins->output());
   1198  MMod* mir = ins->mir();
   1199  Label negative, done;
   1200 
   1201  // Switch based on sign of the lhs.
   1202  // Positive numbers are just a bitmask
   1203  masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump);
   1204  {
   1205    masm.ma_and(out, in, Imm32((1 << ins->shift()) - 1));
   1206    masm.ma_branch(&done, ShortJump);
   1207  }
   1208 
   1209  // Negative numbers need a negate, bitmask, negate
   1210  {
   1211    masm.bind(&negative);
   1212    masm.negw(out, in);
   1213    masm.ma_and(out, out, Imm32((1 << ins->shift()) - 1));
   1214    masm.negw(out, out);
   1215  }
   1216  if (mir->canBeNegativeDividend()) {
   1217    if (!mir->isTruncated()) {
   1218      MOZ_ASSERT(mir->fallible());
   1219      bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot());
   1220    } else {
   1221      // -0|0 == 0
   1222    }
   1223  }
   1224  masm.bind(&done);
   1225 }
   1226 
   1227 void CodeGenerator::visitModMaskI(LModMaskI* ins) {
   1228  Register src = ToRegister(ins->input());
   1229  Register dest = ToRegister(ins->output());
   1230  Register tmp0 = ToRegister(ins->temp0());
   1231  Register tmp1 = ToRegister(ins->temp1());
   1232  MMod* mir = ins->mir();
   1233 
   1234  if (!mir->isTruncated() && mir->canBeNegativeDividend()) {
   1235    MOZ_ASSERT(mir->fallible());
   1236 
   1237    Label bail;
   1238    masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), &bail);
   1239    bailoutFrom(&bail, ins->snapshot());
   1240  } else {
   1241    masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), nullptr);
   1242  }
   1243 }
   1244 
   1245 void CodeGenerator::visitBitNotI(LBitNotI* ins) {
   1246  Register input = ToRegister(ins->input());
   1247  Register dest = ToRegister(ins->output());
   1248  masm.not_(dest, input);
   1249 }
   1250 
   1251 void CodeGenerator::visitBitNotI64(LBitNotI64* ins) {
   1252  Register input = ToRegister64(ins->input()).reg;
   1253  Register dest = ToOutRegister64(ins).reg;
   1254  masm.not_(dest, input);
   1255 }
   1256 
   1257 void CodeGenerator::visitBitOpI(LBitOpI* ins) {
   1258  Register lhs = ToRegister(ins->lhs());
   1259  const LAllocation* rhs = ins->rhs();
   1260  Register dest = ToRegister(ins->output());
   1261 
   1262  // all of these bitops should be either imm32's, or integer registers.
   1263  switch (ins->bitop()) {
   1264    case JSOp::BitOr:
   1265      if (rhs->isConstant()) {
   1266        masm.ma_or(dest, lhs, Imm32(ToInt32(rhs)));
   1267      } else {
   1268        masm.or_(dest, lhs, ToRegister(rhs));
   1269        masm.SignExtendWord(dest, dest);
   1270      }
   1271      break;
   1272    case JSOp::BitXor:
   1273      if (rhs->isConstant()) {
   1274        masm.ma_xor(dest, lhs, Imm32(ToInt32(rhs)));
   1275      } else {
   1276        masm.xor_(dest, lhs, ToRegister(rhs));
   1277        masm.SignExtendWord(dest, dest);
   1278      }
   1279      break;
   1280    case JSOp::BitAnd:
   1281      if (rhs->isConstant()) {
   1282        masm.ma_and(dest, lhs, Imm32(ToInt32(rhs)));
   1283      } else {
   1284        masm.and_(dest, lhs, ToRegister(rhs));
   1285        masm.SignExtendWord(dest, dest);
   1286      }
   1287      break;
   1288    default:
   1289      MOZ_CRASH("unexpected binary opcode");
   1290  }
   1291 }
   1292 
   1293 void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
   1294  Register lhs = ToRegister64(lir->lhs()).reg;
   1295  LInt64Allocation rhs = lir->rhs();
   1296  Register dest = ToOutRegister64(lir).reg;
   1297 
   1298  switch (lir->bitop()) {
   1299    case JSOp::BitOr:
   1300      if (IsConstant(rhs)) {
   1301        masm.ma_or(dest, lhs, Operand(ToInt64(rhs)));
   1302      } else {
   1303        masm.or_(dest, lhs, ToRegister64(rhs).reg);
   1304      }
   1305      break;
   1306    case JSOp::BitXor:
   1307      if (IsConstant(rhs)) {
   1308        masm.ma_xor(dest, lhs, Operand(ToInt64(rhs)));
   1309      } else {
   1310        masm.xor_(dest, lhs, ToRegister64(rhs).reg);
   1311      }
   1312      break;
   1313    case JSOp::BitAnd:
   1314      if (IsConstant(rhs)) {
   1315        masm.ma_and(dest, lhs, Operand(ToInt64(rhs)));
   1316      } else {
   1317        masm.and_(dest, lhs, ToRegister64(rhs).reg);
   1318      }
   1319      break;
   1320    default:
   1321      MOZ_CRASH("unexpected binary opcode");
   1322  }
   1323 }
   1324 
   1325 void CodeGenerator::visitShiftI(LShiftI* ins) {
   1326  Register lhs = ToRegister(ins->lhs());
   1327  const LAllocation* rhs = ins->rhs();
   1328  Register dest = ToRegister(ins->output());
   1329 
   1330  if (rhs->isConstant()) {
   1331    int32_t shift = ToInt32(rhs) & 0x1F;
   1332    switch (ins->bitop()) {
   1333      case JSOp::Lsh:
   1334        if (shift) {
   1335          masm.slliw(dest, lhs, shift);
   1336        } else {
   1337          masm.move32(lhs, dest);
   1338        }
   1339        break;
   1340      case JSOp::Rsh:
   1341        if (shift) {
   1342          masm.sraiw(dest, lhs, shift);
   1343        } else {
   1344          masm.move32(lhs, dest);
   1345        }
   1346        break;
   1347      case JSOp::Ursh:
   1348        if (shift) {
   1349          masm.srliw(dest, lhs, shift);
   1350        } else {
   1351          // x >>> 0 can overflow.
   1352          if (ins->mir()->toUrsh()->fallible()) {
   1353            bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
   1354          }
   1355          masm.move32(lhs, dest);
   1356        }
   1357        break;
   1358      default:
   1359        MOZ_CRASH("Unexpected shift op");
   1360    }
   1361  } else {
   1362    switch (ins->bitop()) {
   1363      case JSOp::Lsh:
   1364        masm.sllw(dest, lhs, ToRegister(rhs));
   1365        break;
   1366      case JSOp::Rsh:
   1367        masm.sraw(dest, lhs, ToRegister(rhs));
   1368        break;
   1369      case JSOp::Ursh:
   1370        masm.srlw(dest, lhs, ToRegister(rhs));
   1371        if (ins->mir()->toUrsh()->fallible()) {
   1372          // x >>> 0 can overflow.
   1373          bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot());
   1374        }
   1375        break;
   1376      default:
   1377        MOZ_CRASH("Unexpected shift op");
   1378    }
   1379  }
   1380 }
   1381 
   1382 void CodeGenerator::visitShiftIntPtr(LShiftIntPtr* ins) {
   1383  Register lhs = ToRegister(ins->lhs());
   1384  const LAllocation* rhs = ins->rhs();
   1385  Register dest = ToRegister(ins->output());
   1386 
   1387  if (rhs->isConstant()) {
   1388    auto shamt = ToIntPtr(rhs) & 0x3F;
   1389    if (shamt) {
   1390      switch (ins->bitop()) {
   1391        case JSOp::Lsh:
   1392          masm.slli(dest, lhs, shamt);
   1393          break;
   1394        case JSOp::Rsh:
   1395          masm.srai(dest, lhs, shamt);
   1396          break;
   1397        case JSOp::Ursh:
   1398          masm.srli(dest, lhs, shamt);
   1399          break;
   1400        default:
   1401          MOZ_CRASH("Unexpected shift op");
   1402      }
   1403    } else if (lhs != dest) {
   1404      masm.movePtr(lhs, dest);
   1405    }
   1406  } else {
   1407    Register shift = ToRegister(rhs);
   1408    switch (ins->bitop()) {
   1409      case JSOp::Lsh:
   1410        masm.sll(dest, lhs, shift);
   1411        break;
   1412      case JSOp::Rsh:
   1413        masm.sra(dest, lhs, shift);
   1414        break;
   1415      case JSOp::Ursh:
   1416        masm.srl(dest, lhs, shift);
   1417        break;
   1418      default:
   1419        MOZ_CRASH("Unexpected shift op");
   1420    }
   1421  }
   1422 }
   1423 
   1424 void CodeGenerator::visitShiftI64(LShiftI64* lir) {
   1425  Register lhs = ToRegister64(lir->lhs()).reg;
   1426  const LAllocation* rhs = lir->rhs();
   1427  Register dest = ToOutRegister64(lir).reg;
   1428 
   1429  if (rhs->isConstant()) {
   1430    int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
   1431    if (shift) {
   1432      switch (lir->bitop()) {
   1433        case JSOp::Lsh:
   1434          masm.slli(dest, lhs, shift);
   1435          break;
   1436        case JSOp::Rsh:
   1437          masm.srai(dest, lhs, shift);
   1438          break;
   1439        case JSOp::Ursh:
   1440          masm.srli(dest, lhs, shift);
   1441          break;
   1442        default:
   1443          MOZ_CRASH("Unexpected shift op");
   1444      }
   1445    } else if (lhs != dest) {
   1446      masm.movePtr(lhs, dest);
   1447    }
   1448    return;
   1449  }
   1450 
   1451  Register shift = ToRegister(rhs);
   1452  switch (lir->bitop()) {
   1453    case JSOp::Lsh:
   1454      masm.sll(dest, lhs, shift);
   1455      break;
   1456    case JSOp::Rsh:
   1457      masm.sra(dest, lhs, shift);
   1458      break;
   1459    case JSOp::Ursh:
   1460      masm.srl(dest, lhs, shift);
   1461      break;
   1462    default:
   1463      MOZ_CRASH("Unexpected shift op");
   1464  }
   1465 }
   1466 
   1467 void CodeGenerator::visitUrshD(LUrshD* ins) {
   1468  Register lhs = ToRegister(ins->lhs());
   1469  Register temp = ToRegister(ins->temp0());
   1470 
   1471  const LAllocation* rhs = ins->rhs();
   1472  FloatRegister out = ToFloatRegister(ins->output());
   1473 
   1474  if (rhs->isConstant()) {
   1475    masm.srliw(temp, lhs, ToInt32(rhs) & 0x1f);
   1476  } else {
   1477    masm.srlw(temp, lhs, ToRegister(rhs));
   1478  }
   1479 
   1480  masm.convertUInt32ToDouble(temp, out);
   1481 }
   1482 
   1483 void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
   1484  FloatRegister input = ToFloatRegister(ins->input());
   1485  FloatRegister output = ToFloatRegister(ins->output());
   1486  ScratchDoubleScope fpscratch(masm);
   1487 
   1488  Label done, skip;
   1489 
   1490  // Masm.pow(-Infinity, 0.5) == Infinity.
   1491  masm.loadConstantDouble(NegativeInfinity<double>(), fpscratch);
   1492  masm.BranchFloat64(Assembler::DoubleNotEqualOrUnordered, input, fpscratch,
   1493                     &skip, ShortJump);
   1494  {
   1495    masm.fneg_d(output, fpscratch);
   1496    masm.ma_branch(&done, ShortJump);
   1497  }
   1498  masm.bind(&skip);
   1499 
   1500  // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
   1501  // Adding 0 converts any -0 to 0.
   1502  masm.loadConstantDouble(0.0, fpscratch);
   1503  masm.fadd_d(output, input, fpscratch);
   1504  masm.fsqrt_d(output, output);
   1505 
   1506  masm.bind(&done);
   1507 }
   1508 
   1509 void CodeGenerator::visitMathD(LMathD* math) {
   1510  FloatRegister src1 = ToFloatRegister(math->lhs());
   1511  FloatRegister src2 = ToFloatRegister(math->rhs());
   1512  FloatRegister output = ToFloatRegister(math->output());
   1513 
   1514  switch (math->jsop()) {
   1515    case JSOp::Add:
   1516      masm.fadd_d(output, src1, src2);
   1517      break;
   1518    case JSOp::Sub:
   1519      masm.fsub_d(output, src1, src2);
   1520      break;
   1521    case JSOp::Mul:
   1522      masm.fmul_d(output, src1, src2);
   1523      break;
   1524    case JSOp::Div:
   1525      masm.fdiv_d(output, src1, src2);
   1526      break;
   1527    default:
   1528      MOZ_CRASH("unexpected opcode");
   1529  }
   1530 }
   1531 
   1532 void CodeGenerator::visitMathF(LMathF* math) {
   1533  FloatRegister src1 = ToFloatRegister(math->lhs());
   1534  FloatRegister src2 = ToFloatRegister(math->rhs());
   1535  FloatRegister output = ToFloatRegister(math->output());
   1536 
   1537  switch (math->jsop()) {
   1538    case JSOp::Add:
   1539      masm.fadd_s(output, src1, src2);
   1540      break;
   1541    case JSOp::Sub:
   1542      masm.fsub_s(output, src1, src2);
   1543      break;
   1544    case JSOp::Mul:
   1545      masm.fmul_s(output, src1, src2);
   1546      break;
   1547    case JSOp::Div:
   1548      masm.fdiv_s(output, src1, src2);
   1549      break;
   1550    default:
   1551      MOZ_CRASH("unexpected opcode");
   1552  }
   1553 }
   1554 
   1555 void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
   1556  emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
   1557                     ins->mir());
   1558 }
   1559 
   1560 void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
   1561  masm.truncateFloat32ModUint32(ToFloatRegister(ins->input()),
   1562                                ToRegister(ins->output()));
   1563 }
   1564 
   1565 void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
   1566    LWasmBuiltinTruncateDToInt32* lir) {
   1567  emitTruncateDouble(ToFloatRegister(lir->input()), ToRegister(lir->output()),
   1568                     lir->mir());
   1569 }
   1570 
   1571 void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
   1572    LWasmBuiltinTruncateFToInt32* lir) {
   1573  MOZ_ASSERT(lir->instance()->isBogus(), "instance not used for riscv64");
   1574  masm.truncateFloat32ModUint32(ToFloatRegister(lir->input()),
   1575                                ToRegister(lir->output()));
   1576 }
   1577 
   1578 void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
   1579  auto input = ToFloatRegister(lir->input());
   1580  auto output = ToRegister(lir->output());
   1581 
   1582  MWasmTruncateToInt32* mir = lir->mir();
   1583  MIRType fromType = mir->input()->type();
   1584 
   1585  MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
   1586 
   1587  bool isSaturating = mir->isSaturating();
   1588 
   1589  // RISCV saturating instructions don't require an OOL path.
   1590  OutOfLineWasmTruncateCheck* ool = nullptr;
   1591  Label* oolEntry = nullptr;
   1592  if (!isSaturating) {
   1593    ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
   1594    addOutOfLineCode(ool, mir);
   1595 
   1596    oolEntry = ool->entry();
   1597  }
   1598 
   1599  if (fromType == MIRType::Double) {
   1600    if (mir->isUnsigned()) {
   1601      masm.wasmTruncateDoubleToUInt32(input, output, isSaturating, oolEntry);
   1602    } else {
   1603      masm.wasmTruncateDoubleToInt32(input, output, isSaturating, oolEntry);
   1604    }
   1605  } else {
   1606    if (mir->isUnsigned()) {
   1607      masm.wasmTruncateFloat32ToUInt32(input, output, isSaturating, oolEntry);
   1608    } else {
   1609      masm.wasmTruncateFloat32ToInt32(input, output, isSaturating, oolEntry);
   1610    }
   1611  }
   1612 
   1613  // RISCV can handle all success case. The OOL path is only used to execute
   1614  // the correct trap.
   1615  MOZ_ASSERT(!ool || !ool->rejoin()->bound(), "ool path doesn't return");
   1616 }
   1617 
   1618 void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
   1619  FloatRegister input = ToFloatRegister(test->input());
   1620  ScratchDoubleScope fpscratch(masm);
   1621 
   1622  MBasicBlock* ifTrue = test->ifTrue();
   1623  MBasicBlock* ifFalse = test->ifFalse();
   1624 
   1625  masm.loadConstantDouble(0.0, fpscratch);
   1626  // If 0, or NaN, the result is false.
   1627  if (isNextBlock(ifFalse->lir())) {
   1628    branchToBlock(DoubleFloat, input, fpscratch, ifTrue,
   1629                  Assembler::DoubleNotEqual);
   1630  } else {
   1631    branchToBlock(DoubleFloat, input, fpscratch, ifFalse,
   1632                  Assembler::DoubleEqualOrUnordered);
   1633    jumpToBlock(ifTrue);
   1634  }
   1635 }
   1636 
   1637 void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
   1638  FloatRegister input = ToFloatRegister(test->input());
   1639  ScratchFloat32Scope fpscratch(masm);
   1640 
   1641  MBasicBlock* ifTrue = test->ifTrue();
   1642  MBasicBlock* ifFalse = test->ifFalse();
   1643 
   1644  masm.loadConstantFloat32(0.0f, fpscratch);
   1645  // If 0, or NaN, the result is false.
   1646 
   1647  if (isNextBlock(ifFalse->lir())) {
   1648    branchToBlock(SingleFloat, input, fpscratch, ifTrue,
   1649                  Assembler::DoubleNotEqual);
   1650  } else {
   1651    branchToBlock(SingleFloat, input, fpscratch, ifFalse,
   1652                  Assembler::DoubleEqualOrUnordered);
   1653    jumpToBlock(ifTrue);
   1654  }
   1655 }
   1656 
   1657 void CodeGenerator::visitCompareD(LCompareD* comp) {
   1658  FloatRegister lhs = ToFloatRegister(comp->left());
   1659  FloatRegister rhs = ToFloatRegister(comp->right());
   1660  Register dest = ToRegister(comp->output());
   1661 
   1662  Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
   1663  masm.ma_compareF64(dest, cond, lhs, rhs);
   1664 }
   1665 
   1666 void CodeGenerator::visitCompareF(LCompareF* comp) {
   1667  FloatRegister lhs = ToFloatRegister(comp->left());
   1668  FloatRegister rhs = ToFloatRegister(comp->right());
   1669  Register dest = ToRegister(comp->output());
   1670 
   1671  Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
   1672  masm.ma_compareF32(dest, cond, lhs, rhs);
   1673 }
   1674 
   1675 void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
   1676  FloatRegister lhs = ToFloatRegister(comp->left());
   1677  FloatRegister rhs = ToFloatRegister(comp->right());
   1678 
   1679  Assembler::DoubleCondition cond =
   1680      JSOpToDoubleCondition(comp->cmpMir()->jsop());
   1681  MBasicBlock* ifTrue = comp->ifTrue();
   1682  MBasicBlock* ifFalse = comp->ifFalse();
   1683 
   1684  if (isNextBlock(ifFalse->lir())) {
   1685    branchToBlock(DoubleFloat, lhs, rhs, ifTrue, cond);
   1686  } else {
   1687    branchToBlock(DoubleFloat, lhs, rhs, ifFalse,
   1688                  Assembler::InvertCondition(cond));
   1689    jumpToBlock(ifTrue);
   1690  }
   1691 }
   1692 
   1693 void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
   1694  FloatRegister lhs = ToFloatRegister(comp->left());
   1695  FloatRegister rhs = ToFloatRegister(comp->right());
   1696 
   1697  Assembler::DoubleCondition cond =
   1698      JSOpToDoubleCondition(comp->cmpMir()->jsop());
   1699  MBasicBlock* ifTrue = comp->ifTrue();
   1700  MBasicBlock* ifFalse = comp->ifFalse();
   1701 
   1702  if (isNextBlock(ifFalse->lir())) {
   1703    branchToBlock(SingleFloat, lhs, rhs, ifTrue, cond);
   1704  } else {
   1705    branchToBlock(SingleFloat, lhs, rhs, ifFalse,
   1706                  Assembler::InvertCondition(cond));
   1707    jumpToBlock(ifTrue);
   1708  }
   1709 }
   1710 
   1711 void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
   1712  masm.convertUInt32ToDouble(ToRegister(lir->input()),
   1713                             ToFloatRegister(lir->output()));
   1714 }
   1715 
   1716 void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
   1717  masm.convertUInt32ToFloat32(ToRegister(lir->input()),
   1718                              ToFloatRegister(lir->output()));
   1719 }
   1720 
   1721 void CodeGenerator::visitNotD(LNotD* ins) {
   1722  // Since this operation is not, we want to set a bit if
   1723  // the double is falsey, which means 0.0, -0.0 or NaN.
   1724  FloatRegister in = ToFloatRegister(ins->input());
   1725  Register dest = ToRegister(ins->output());
   1726  ScratchDoubleScope fpscratch(masm);
   1727 
   1728  masm.loadConstantDouble(0.0, fpscratch);
   1729  masm.ma_compareF64(dest, Assembler::DoubleEqualOrUnordered, in, fpscratch);
   1730 }
   1731 
   1732 void CodeGenerator::visitNotF(LNotF* ins) {
   1733  // Since this operation is not, we want to set a bit if
   1734  // the float32 is falsey, which means 0.0, -0.0 or NaN.
   1735  FloatRegister in = ToFloatRegister(ins->input());
   1736  Register dest = ToRegister(ins->output());
   1737  ScratchFloat32Scope fpscratch(masm);
   1738 
   1739  masm.loadConstantFloat32(0.0f, fpscratch);
   1740  masm.ma_compareF32(dest, Assembler::DoubleEqualOrUnordered, in, fpscratch);
   1741 }
   1742 
   1743 void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
   1744 
   1745 void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
   1746 
   1747 void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
   1748  const MAsmJSLoadHeap* mir = ins->mir();
   1749  const LAllocation* ptr = ins->ptr();
   1750  const LDefinition* out = ins->output();
   1751  const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
   1752 
   1753  Scalar::Type accessType = mir->access().type();
   1754  bool isSigned = Scalar::isSignedIntType(accessType);
   1755  int size = Scalar::byteSize(accessType) * 8;
   1756  bool isFloat = Scalar::isFloatingType(accessType);
   1757 
   1758  if (ptr->isConstant()) {
   1759    MOZ_ASSERT(!mir->needsBoundsCheck());
   1760    int32_t ptrImm = ptr->toConstant()->toInt32();
   1761    MOZ_ASSERT(ptrImm >= 0);
   1762    if (isFloat) {
   1763      if (size == 32) {
   1764        masm.loadFloat32(Address(HeapReg, ptrImm), ToFloatRegister(out));
   1765      } else {
   1766        masm.loadDouble(Address(HeapReg, ptrImm), ToFloatRegister(out));
   1767      }
   1768    } else {
   1769      masm.ma_load(ToRegister(out), Address(HeapReg, ptrImm),
   1770                   static_cast<LoadStoreSize>(size),
   1771                   isSigned ? SignExtend : ZeroExtend);
   1772    }
   1773    return;
   1774  }
   1775 
   1776  Register ptrReg = ToRegister(ptr);
   1777 
   1778  if (!mir->needsBoundsCheck()) {
   1779    if (isFloat) {
   1780      if (size == 32) {
   1781        masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
   1782                         ToFloatRegister(out));
   1783      } else {
   1784        masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
   1785                        ToFloatRegister(out));
   1786      }
   1787    } else {
   1788      masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
   1789                   static_cast<LoadStoreSize>(size),
   1790                   isSigned ? SignExtend : ZeroExtend);
   1791    }
   1792    return;
   1793  }
   1794 
   1795  Label done, outOfRange;
   1796  masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg,
   1797                         ToRegister(boundsCheckLimit), &outOfRange);
   1798  // Offset is ok, let's load value.
   1799  if (isFloat) {
   1800    if (size == 32) {
   1801      masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
   1802                       ToFloatRegister(out));
   1803    } else {
   1804      masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
   1805                      ToFloatRegister(out));
   1806    }
   1807  } else {
   1808    masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
   1809                 static_cast<LoadStoreSize>(size),
   1810                 isSigned ? SignExtend : ZeroExtend);
   1811  }
   1812  masm.ma_branch(&done, ShortJump);
   1813  masm.bind(&outOfRange);
   1814  // Offset is out of range. Load default values.
   1815  if (isFloat) {
   1816    if (size == 32) {
   1817      masm.loadConstantFloat32(float(GenericNaN()), ToFloatRegister(out));
   1818    } else {
   1819      masm.loadConstantDouble(GenericNaN(), ToFloatRegister(out));
   1820    }
   1821  } else {
   1822    masm.move32(Imm32(0), ToRegister(out));
   1823  }
   1824  masm.bind(&done);
   1825 }
   1826 
   1827 void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
   1828  const MAsmJSStoreHeap* mir = ins->mir();
   1829  const LAllocation* value = ins->value();
   1830  const LAllocation* ptr = ins->ptr();
   1831  const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
   1832 
   1833  Scalar::Type accessType = mir->access().type();
   1834  bool isSigned = Scalar::isSignedIntType(accessType);
   1835  int size = Scalar::byteSize(accessType) * 8;
   1836  bool isFloat = Scalar::isFloatingType(accessType);
   1837 
   1838  if (ptr->isConstant()) {
   1839    MOZ_ASSERT(!mir->needsBoundsCheck());
   1840    int32_t ptrImm = ptr->toConstant()->toInt32();
   1841    MOZ_ASSERT(ptrImm >= 0);
   1842 
   1843    if (isFloat) {
   1844      FloatRegister freg = ToFloatRegister(value);
   1845      Address addr(HeapReg, ptrImm);
   1846      if (size == 32) {
   1847        masm.storeFloat32(freg, addr);
   1848      } else {
   1849        masm.storeDouble(freg, addr);
   1850      }
   1851    } else {
   1852      masm.ma_store(ToRegister(value), Address(HeapReg, ptrImm),
   1853                    static_cast<LoadStoreSize>(size),
   1854                    isSigned ? SignExtend : ZeroExtend);
   1855    }
   1856    return;
   1857  }
   1858 
   1859  Register ptrReg = ToRegister(ptr);
   1860  Address dstAddr(ptrReg, 0);
   1861 
   1862  if (!mir->needsBoundsCheck()) {
   1863    if (isFloat) {
   1864      FloatRegister freg = ToFloatRegister(value);
   1865      BaseIndex bi(HeapReg, ptrReg, TimesOne);
   1866      if (size == 32) {
   1867        masm.storeFloat32(freg, bi);
   1868      } else {
   1869        masm.storeDouble(freg, bi);
   1870      }
   1871    } else {
   1872      masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
   1873                    static_cast<LoadStoreSize>(size),
   1874                    isSigned ? SignExtend : ZeroExtend);
   1875    }
   1876    return;
   1877  }
   1878 
   1879  Label outOfRange;
   1880  masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg,
   1881                         ToRegister(boundsCheckLimit), &outOfRange);
   1882 
   1883  // Offset is ok, let's store value.
   1884  if (isFloat) {
   1885    if (size == 32) {
   1886      masm.storeFloat32(ToFloatRegister(value),
   1887                        BaseIndex(HeapReg, ptrReg, TimesOne));
   1888    } else
   1889      masm.storeDouble(ToFloatRegister(value),
   1890                       BaseIndex(HeapReg, ptrReg, TimesOne));
   1891  } else {
   1892    masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
   1893                  static_cast<LoadStoreSize>(size),
   1894                  isSigned ? SignExtend : ZeroExtend);
   1895  }
   1896 
   1897  masm.bind(&outOfRange);
   1898 }
   1899 
   1900 void CodeGenerator::visitWasmCompareExchangeHeap(
   1901    LWasmCompareExchangeHeap* ins) {
   1902  MWasmCompareExchangeHeap* mir = ins->mir();
   1903  Register memoryBase = ToRegister(ins->memoryBase());
   1904  Register ptrReg = ToRegister(ins->ptr());
   1905  BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset32());
   1906 
   1907  Register oldval = ToRegister(ins->oldValue());
   1908  Register newval = ToRegister(ins->newValue());
   1909  Register valueTemp = ToTempRegisterOrInvalid(ins->temp0());
   1910  Register offsetTemp = ToTempRegisterOrInvalid(ins->temp1());
   1911  Register maskTemp = ToTempRegisterOrInvalid(ins->temp2());
   1912 
   1913  masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, valueTemp,
   1914                           offsetTemp, maskTemp, ToRegister(ins->output()));
   1915 }
   1916 
   1917 void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
   1918  MWasmAtomicExchangeHeap* mir = ins->mir();
   1919  Register memoryBase = ToRegister(ins->memoryBase());
   1920  Register ptrReg = ToRegister(ins->ptr());
   1921  Register value = ToRegister(ins->value());
   1922  BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset32());
   1923 
   1924  Register valueTemp = ToTempRegisterOrInvalid(ins->temp0());
   1925  Register offsetTemp = ToTempRegisterOrInvalid(ins->temp1());
   1926  Register maskTemp = ToTempRegisterOrInvalid(ins->temp2());
   1927 
   1928  masm.wasmAtomicExchange(mir->access(), srcAddr, value, valueTemp, offsetTemp,
   1929                          maskTemp, ToRegister(ins->output()));
   1930 }
   1931 
   1932 void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
   1933  MOZ_ASSERT(ins->mir()->hasUses());
   1934 
   1935  MWasmAtomicBinopHeap* mir = ins->mir();
   1936  Register memoryBase = ToRegister(ins->memoryBase());
   1937  Register ptrReg = ToRegister(ins->ptr());
   1938  Register valueTemp = ToTempRegisterOrInvalid(ins->temp0());
   1939  Register offsetTemp = ToTempRegisterOrInvalid(ins->temp1());
   1940  Register maskTemp = ToTempRegisterOrInvalid(ins->temp2());
   1941 
   1942  BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset32());
   1943 
   1944  masm.wasmAtomicFetchOp(mir->access(), mir->operation(),
   1945                         ToRegister(ins->value()), srcAddr, valueTemp,
   1946                         offsetTemp, maskTemp, ToRegister(ins->output()));
   1947 }
   1948 
   1949 void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
   1950    LWasmAtomicBinopHeapForEffect* ins) {
   1951  MOZ_ASSERT(!ins->mir()->hasUses());
   1952 
   1953  MWasmAtomicBinopHeap* mir = ins->mir();
   1954  Register memoryBase = ToRegister(ins->memoryBase());
   1955  Register ptrReg = ToRegister(ins->ptr());
   1956  Register valueTemp = ToTempRegisterOrInvalid(ins->temp0());
   1957  Register offsetTemp = ToTempRegisterOrInvalid(ins->temp1());
   1958  Register maskTemp = ToTempRegisterOrInvalid(ins->temp2());
   1959 
   1960  BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset32());
   1961  masm.wasmAtomicEffectOp(mir->access(), mir->operation(),
   1962                          ToRegister(ins->value()), srcAddr, valueTemp,
   1963                          offsetTemp, maskTemp);
   1964 }
   1965 
   1966 void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
   1967  const MWasmStackArg* mir = ins->mir();
   1968  if (ins->arg()->isConstant()) {
   1969    masm.storePtr(ImmWord(ToInt32(ins->arg())),
   1970                  Address(StackPointer, mir->spOffset()));
   1971  } else {
   1972    if (ins->arg()->isGeneralReg()) {
   1973      masm.storePtr(ToRegister(ins->arg()),
   1974                    Address(StackPointer, mir->spOffset()));
   1975    } else if (mir->input()->type() == MIRType::Double) {
   1976      masm.storeDouble(ToFloatRegister(ins->arg()),
   1977                       Address(StackPointer, mir->spOffset()));
   1978    } else {
   1979      masm.storeFloat32(ToFloatRegister(ins->arg()),
   1980                        Address(StackPointer, mir->spOffset()));
   1981    }
   1982  }
   1983 }
   1984 
   1985 void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
   1986  const MWasmStackArg* mir = ins->mir();
   1987  Address dst(StackPointer, mir->spOffset());
   1988  if (IsConstant(ins->arg())) {
   1989    masm.store64(Imm64(ToInt64(ins->arg())), dst);
   1990  } else {
   1991    masm.store64(ToRegister64(ins->arg()), dst);
   1992  }
   1993 }
   1994 
   1995 void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
   1996  MIRType mirType = ins->mir()->type();
   1997 
   1998  Register cond = ToRegister(ins->condExpr());
   1999  const LAllocation* falseExpr = ins->falseExpr();
   2000 
   2001  if (mirType == MIRType::Int32 || mirType == MIRType::WasmAnyRef) {
   2002    Register out = ToRegister(ins->output());
   2003    MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
   2004               "true expr input is reused for output");
   2005    if (falseExpr->isGeneralReg()) {
   2006      masm.moveIfZero(out, ToRegister(falseExpr), cond);
   2007    } else {
   2008      masm.cmp32Load32(Assembler::Zero, cond, cond, ToAddress(falseExpr), out);
   2009    }
   2010    return;
   2011  }
   2012 
   2013  FloatRegister out = ToFloatRegister(ins->output());
   2014  MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
   2015             "true expr input is reused for output");
   2016 
   2017  if (falseExpr->isFloatReg()) {
   2018    if (mirType == MIRType::Float32) {
   2019      masm.ma_fmovz(SingleFloat, out, ToFloatRegister(falseExpr), cond);
   2020    } else if (mirType == MIRType::Double) {
   2021      masm.ma_fmovz(DoubleFloat, out, ToFloatRegister(falseExpr), cond);
   2022    } else {
   2023      MOZ_CRASH("unhandled type in visitWasmSelect!");
   2024    }
   2025  } else {
   2026    Label done;
   2027    masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
   2028 
   2029    if (mirType == MIRType::Float32) {
   2030      masm.loadFloat32(ToAddress(falseExpr), out);
   2031    } else if (mirType == MIRType::Double) {
   2032      masm.loadDouble(ToAddress(falseExpr), out);
   2033    } else {
   2034      MOZ_CRASH("unhandled type in visitWasmSelect!");
   2035    }
   2036 
   2037    masm.bind(&done);
   2038  }
   2039 }
   2040 
   2041 // We expect to handle only the case where compare is {U,}Int32 and select is
   2042 // {U,}Int32, and the "true" input is reused for the output.
   2043 void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
   2044  bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
   2045                    ins->compareType() == MCompare::Compare_UInt32;
   2046  bool selIs32bit = ins->mir()->type() == MIRType::Int32;
   2047 
   2048  MOZ_RELEASE_ASSERT(
   2049      cmpIs32bit && selIs32bit,
   2050      "CodeGenerator::visitWasmCompareAndSelect: unexpected types");
   2051 
   2052  Register trueExprAndDest = ToRegister(ins->output());
   2053  MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == trueExprAndDest,
   2054             "true expr input is reused for output");
   2055 
   2056  Assembler::Condition cond = Assembler::InvertCondition(
   2057      JSOpToCondition(ins->compareType(), ins->jsop()));
   2058  const LAllocation* rhs = ins->rightExpr();
   2059  const LAllocation* falseExpr = ins->ifFalseExpr();
   2060  Register lhs = ToRegister(ins->leftExpr());
   2061 
   2062  masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
   2063                   trueExprAndDest);
   2064 }
   2065 
   2066 void CodeGenerator::visitUDiv(LUDiv* ins) {
   2067  Register lhs = ToRegister(ins->lhs());
   2068  Register rhs = ToRegister(ins->rhs());
   2069  Register output = ToRegister(ins->output());
   2070  Label done;
   2071 
   2072  MDiv* mir = ins->mir();
   2073 
   2074  // Prevent divide by zero.
   2075  if (mir->canBeDivideByZero()) {
   2076    if (mir->trapOnError()) {
   2077      TrapIfDivideByZero(masm, ins, rhs);
   2078    } else if (mir->isTruncated()) {
   2079      // Infinity|0 == 0
   2080      Label nonZero;
   2081      masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero, ShortJump);
   2082      masm.move32(Imm32(0), output);
   2083      masm.ma_branch(&done, ShortJump);
   2084      masm.bind(&nonZero);
   2085    } else {
   2086      bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
   2087    }
   2088  }
   2089 
   2090  // If the remainder is > 0, bailout since this must be a double.
   2091  if (mir->canTruncateRemainder()) {
   2092    masm.ma_divu32(output, lhs, rhs);
   2093  } else {
   2094    MOZ_ASSERT(lhs != output && rhs != output);
   2095 
   2096    UseScratchRegisterScope temps(&masm);
   2097    Register scratch = temps.Acquire();
   2098 
   2099    // The recommended code sequence to obtain both the quotient and remainder
   2100    // is div[u] followed by mod[u].
   2101    masm.ma_divu32(output, lhs, rhs);
   2102    masm.ma_modu32(scratch, lhs, rhs);
   2103 
   2104    bailoutCmp32(Assembler::NonZero, scratch, scratch, ins->snapshot());
   2105  }
   2106 
   2107  // Unsigned div can return a value that's not a signed int32.
   2108  // If our users aren't expecting that, bail.
   2109  if (!mir->isTruncated()) {
   2110    bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
   2111  }
   2112 
   2113  masm.bind(&done);
   2114 }
   2115 
   2116 void CodeGenerator::visitUMod(LUMod* ins) {
   2117  Register lhs = ToRegister(ins->lhs());
   2118  Register rhs = ToRegister(ins->rhs());
   2119  Register output = ToRegister(ins->output());
   2120  Label done;
   2121 
   2122  MMod* mir = ins->mir();
   2123 
   2124  // Prevent divide by zero.
   2125  if (mir->canBeDivideByZero()) {
   2126    if (mir->trapOnError()) {
   2127      TrapIfDivideByZero(masm, ins, rhs);
   2128    } else if (mir->isTruncated()) {
   2129      // NaN|0 == 0
   2130      Label nonZero;
   2131      masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero, ShortJump);
   2132      masm.move32(Imm32(0), output);
   2133      masm.ma_branch(&done, ShortJump);
   2134      masm.bind(&nonZero);
   2135    } else {
   2136      bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
   2137    }
   2138  }
   2139 
   2140  masm.ma_modu32(output, lhs, rhs);
   2141 
   2142  // Bail if the output would be negative.
   2143  //
   2144  // LUMod inputs may be Uint32, so care is taken to ensure the result is not
   2145  // unexpectedly signed.
   2146  if (!mir->isTruncated()) {
   2147    bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
   2148  }
   2149 
   2150  masm.bind(&done);
   2151 }
   2152 
   2153 void CodeGenerator::visitEffectiveAddress3(LEffectiveAddress3* ins) {
   2154  const MEffectiveAddress3* mir = ins->mir();
   2155  Register base = ToRegister(ins->base());
   2156  Register index = ToRegister(ins->index());
   2157  Register output = ToRegister(ins->output());
   2158 
   2159  BaseIndex address(base, index, mir->scale(), mir->displacement());
   2160  masm.computeEffectiveAddress32(address, output);
   2161 }
   2162 
   2163 void CodeGenerator::visitEffectiveAddress2(LEffectiveAddress2* ins) {
   2164  const MEffectiveAddress2* mir = ins->mir();
   2165  Register index = ToRegister(ins->index());
   2166  Register output = ToRegister(ins->output());
   2167 
   2168  BaseIndex address(zero, index, mir->scale(), mir->displacement());
   2169  masm.computeEffectiveAddress32(address, output);
   2170 }
   2171 
   2172 void CodeGenerator::visitNegI(LNegI* ins) {
   2173  Register input = ToRegister(ins->input());
   2174  Register output = ToRegister(ins->output());
   2175 
   2176  masm.negw(output, input);
   2177 }
   2178 
   2179 void CodeGenerator::visitNegI64(LNegI64* ins) {
   2180  Register input = ToRegister64(ins->input()).reg;
   2181  Register output = ToOutRegister64(ins).reg;
   2182 
   2183  masm.neg(output, input);
   2184 }
   2185 
   2186 void CodeGenerator::visitNegD(LNegD* ins) {
   2187  FloatRegister input = ToFloatRegister(ins->input());
   2188  FloatRegister output = ToFloatRegister(ins->output());
   2189 
   2190  masm.fneg_d(output, input);
   2191 }
   2192 
   2193 void CodeGenerator::visitNegF(LNegF* ins) {
   2194  FloatRegister input = ToFloatRegister(ins->input());
   2195  FloatRegister output = ToFloatRegister(ins->output());
   2196 
   2197  masm.fneg_s(output, input);
   2198 }
   2199 
   2200 void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
   2201  MWasmAddOffset* mir = lir->mir();
   2202  Register base = ToRegister(lir->base());
   2203  Register out = ToRegister(lir->output());
   2204 
   2205  Label ok;
   2206  masm.ma_add32TestCarry(Assembler::CarryClear, out, base, Imm32(mir->offset()),
   2207                         &ok);
   2208  masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
   2209  masm.bind(&ok);
   2210 }
   2211 
   2212 void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) {
   2213  MWasmAddOffset* mir = lir->mir();
   2214  Register64 base = ToRegister64(lir->base());
   2215  Register64 out = ToOutRegister64(lir);
   2216 
   2217  Label ok;
   2218  masm.ma_addPtrTestCarry(Assembler::CarryClear, out.reg, base.reg,
   2219                          ImmWord(mir->offset()), &ok);
   2220  masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
   2221  masm.bind(&ok);
   2222 }
   2223 
   2224 void CodeGenerator::visitAtomicTypedArrayElementBinop(
   2225    LAtomicTypedArrayElementBinop* lir) {
   2226  MOZ_ASSERT(!lir->mir()->isForEffect());
   2227 
   2228  AnyRegister output = ToAnyRegister(lir->output());
   2229  Register elements = ToRegister(lir->elements());
   2230  Register outTemp = ToTempRegisterOrInvalid(lir->temp0());
   2231  Register valueTemp = ToTempRegisterOrInvalid(lir->temp1());
   2232  Register offsetTemp = ToTempRegisterOrInvalid(lir->temp2());
   2233  Register maskTemp = ToTempRegisterOrInvalid(lir->temp3());
   2234  Register value = ToRegister(lir->value());
   2235  Scalar::Type arrayType = lir->mir()->arrayType();
   2236 
   2237  auto mem = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   2238 
   2239  mem.match([&](const auto& mem) {
   2240    masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
   2241                         lir->mir()->operation(), value, mem, valueTemp,
   2242                         offsetTemp, maskTemp, outTemp, output);
   2243  });
   2244 }
   2245 
   2246 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
   2247    LAtomicTypedArrayElementBinopForEffect* lir) {
   2248  MOZ_ASSERT(lir->mir()->isForEffect());
   2249 
   2250  Register elements = ToRegister(lir->elements());
   2251  Register valueTemp = ToTempRegisterOrInvalid(lir->temp0());
   2252  Register offsetTemp = ToTempRegisterOrInvalid(lir->temp1());
   2253  Register maskTemp = ToTempRegisterOrInvalid(lir->temp2());
   2254  Register value = ToRegister(lir->value());
   2255  Scalar::Type arrayType = lir->mir()->arrayType();
   2256 
   2257  auto mem = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   2258 
   2259  mem.match([&](const auto& mem) {
   2260    masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
   2261                          lir->mir()->operation(), value, mem, valueTemp,
   2262                          offsetTemp, maskTemp);
   2263  });
   2264 }
   2265 
   2266 void CodeGenerator::visitCompareExchangeTypedArrayElement(
   2267    LCompareExchangeTypedArrayElement* lir) {
   2268  Register elements = ToRegister(lir->elements());
   2269  AnyRegister output = ToAnyRegister(lir->output());
   2270  Register outTemp = ToTempRegisterOrInvalid(lir->temp0());
   2271 
   2272  Register oldval = ToRegister(lir->oldval());
   2273  Register newval = ToRegister(lir->newval());
   2274  Register valueTemp = ToTempRegisterOrInvalid(lir->temp1());
   2275  Register offsetTemp = ToTempRegisterOrInvalid(lir->temp2());
   2276  Register maskTemp = ToTempRegisterOrInvalid(lir->temp3());
   2277  Scalar::Type arrayType = lir->mir()->arrayType();
   2278 
   2279  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   2280 
   2281  dest.match([&](const auto& dest) {
   2282    masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
   2283                           newval, valueTemp, offsetTemp, maskTemp, outTemp,
   2284                           output);
   2285  });
   2286 }
   2287 
   2288 void CodeGenerator::visitAtomicExchangeTypedArrayElement(
   2289    LAtomicExchangeTypedArrayElement* lir) {
   2290  Register elements = ToRegister(lir->elements());
   2291  AnyRegister output = ToAnyRegister(lir->output());
   2292  Register outTemp = ToTempRegisterOrInvalid(lir->temp0());
   2293 
   2294  Register value = ToRegister(lir->value());
   2295  Register valueTemp = ToTempRegisterOrInvalid(lir->temp1());
   2296  Register offsetTemp = ToTempRegisterOrInvalid(lir->temp2());
   2297  Register maskTemp = ToTempRegisterOrInvalid(lir->temp3());
   2298  Scalar::Type arrayType = lir->mir()->arrayType();
   2299 
   2300  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   2301 
   2302  dest.match([&](const auto& dest) {
   2303    masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
   2304                          valueTemp, offsetTemp, maskTemp, outTemp, output);
   2305  });
   2306 }
   2307 
   2308 void CodeGenerator::visitCompareExchangeTypedArrayElement64(
   2309    LCompareExchangeTypedArrayElement64* lir) {
   2310  Register elements = ToRegister(lir->elements());
   2311  Register64 oldval = ToRegister64(lir->oldval());
   2312  Register64 newval = ToRegister64(lir->newval());
   2313  Register64 out = ToOutRegister64(lir);
   2314  Scalar::Type arrayType = lir->mir()->arrayType();
   2315 
   2316  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   2317 
   2318  dest.match([&](const auto& dest) {
   2319    masm.compareExchange64(Synchronization::Full(), dest, oldval, newval, out);
   2320  });
   2321 }
   2322 
   2323 void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
   2324    LAtomicExchangeTypedArrayElement64* lir) {
   2325  Register elements = ToRegister(lir->elements());
   2326  Register64 value = ToRegister64(lir->value());
   2327  Register64 out = ToOutRegister64(lir);
   2328  Scalar::Type arrayType = lir->mir()->arrayType();
   2329 
   2330  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   2331 
   2332  dest.match([&](const auto& dest) {
   2333    masm.atomicExchange64(Synchronization::Full(), dest, value, out);
   2334  });
   2335 }
   2336 
   2337 void CodeGenerator::visitAtomicTypedArrayElementBinop64(
   2338    LAtomicTypedArrayElementBinop64* lir) {
   2339  MOZ_ASSERT(lir->mir()->hasUses());
   2340 
   2341  Register elements = ToRegister(lir->elements());
   2342  Register64 value = ToRegister64(lir->value());
   2343  Register64 temp = ToRegister64(lir->temp0());
   2344  Register64 out = ToOutRegister64(lir);
   2345 
   2346  Scalar::Type arrayType = lir->mir()->arrayType();
   2347  AtomicOp atomicOp = lir->mir()->operation();
   2348 
   2349  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   2350 
   2351  dest.match([&](const auto& dest) {
   2352    masm.atomicFetchOp64(Synchronization::Full(), atomicOp, value, dest, temp,
   2353                         out);
   2354  });
   2355 }
   2356 
   2357 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
   2358    LAtomicTypedArrayElementBinopForEffect64* lir) {
   2359  MOZ_ASSERT(!lir->mir()->hasUses());
   2360 
   2361  Register elements = ToRegister(lir->elements());
   2362  Register64 value = ToRegister64(lir->value());
   2363  Register64 temp = ToRegister64(lir->temp0());
   2364 
   2365  Scalar::Type arrayType = lir->mir()->arrayType();
   2366  AtomicOp atomicOp = lir->mir()->operation();
   2367 
   2368  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   2369 
   2370  dest.match([&](const auto& dest) {
   2371    masm.atomicEffectOp64(Synchronization::Full(), atomicOp, value, dest, temp);
   2372  });
   2373 }
   2374 
   2375 void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
   2376  Register elements = ToRegister(lir->elements());
   2377  Register64 out = ToOutRegister64(lir);
   2378 
   2379  Scalar::Type storageType = lir->mir()->storageType();
   2380 
   2381  auto source = ToAddressOrBaseIndex(elements, lir->index(), storageType);
   2382 
   2383  auto sync = Synchronization::Load();
   2384  masm.memoryBarrierBefore(sync);
   2385  source.match([&](const auto& source) { masm.load64(source, out); });
   2386  masm.memoryBarrierAfter(sync);
   2387 }
   2388 
   2389 void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
   2390  Register elements = ToRegister(lir->elements());
   2391  Register64 value = ToRegister64(lir->value());
   2392 
   2393  Scalar::Type writeType = lir->mir()->writeType();
   2394 
   2395  auto dest = ToAddressOrBaseIndex(elements, lir->index(), writeType);
   2396 
   2397  auto sync = Synchronization::Store();
   2398  masm.memoryBarrierBefore(sync);
   2399  dest.match([&](const auto& dest) { masm.store64(value, dest); });
   2400  masm.memoryBarrierAfter(sync);
   2401 }
   2402 
   2403 void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir) {
   2404  Register memoryBase = ToRegister(lir->memoryBase());
   2405  Register ptr = ToRegister(lir->ptr());
   2406  Register64 oldValue = ToRegister64(lir->oldValue());
   2407  Register64 newValue = ToRegister64(lir->newValue());
   2408  Register64 output = ToOutRegister64(lir);
   2409  uint32_t offset = lir->mir()->access().offset32();
   2410 
   2411  BaseIndex addr(memoryBase, ptr, TimesOne, offset);
   2412  masm.wasmCompareExchange64(lir->mir()->access(), addr, oldValue, newValue,
   2413                             output);
   2414 }
   2415 
   2416 void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir) {
   2417  Register memoryBase = ToRegister(lir->memoryBase());
   2418  Register ptr = ToRegister(lir->ptr());
   2419  Register64 value = ToRegister64(lir->value());
   2420  Register64 output = ToOutRegister64(lir);
   2421  uint32_t offset = lir->mir()->access().offset32();
   2422 
   2423  BaseIndex addr(memoryBase, ptr, TimesOne, offset);
   2424  masm.wasmAtomicExchange64(lir->mir()->access(), addr, value, output);
   2425 }
   2426 
   2427 void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir) {
   2428  Register memoryBase = ToRegister(lir->memoryBase());
   2429  Register ptr = ToRegister(lir->ptr());
   2430  Register64 value = ToRegister64(lir->value());
   2431  Register64 output = ToOutRegister64(lir);
   2432  Register64 temp = ToRegister64(lir->temp0());
   2433  uint32_t offset = lir->mir()->access().offset32();
   2434 
   2435  BaseIndex addr(memoryBase, ptr, TimesOne, offset);
   2436 
   2437  masm.wasmAtomicFetchOp64(lir->mir()->access(), lir->mir()->operation(), value,
   2438                           addr, temp, output);
   2439 }
   2440 
   2441 void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH("No SIMD"); }
   2442 
   2443 void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
   2444  MOZ_CRASH("No SIMD");
   2445 }
   2446 
   2447 void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
   2448  MOZ_CRASH("No SIMD");
   2449 }
   2450 
   2451 void CodeGenerator::visitWasmBinarySimd128WithConstant(
   2452    LWasmBinarySimd128WithConstant* ins) {
   2453  MOZ_CRASH("No SIMD");
   2454 }
   2455 
   2456 void CodeGenerator::visitWasmVariableShiftSimd128(
   2457    LWasmVariableShiftSimd128* ins) {
   2458  MOZ_CRASH("No SIMD");
   2459 }
   2460 
   2461 void CodeGenerator::visitWasmConstantShiftSimd128(
   2462    LWasmConstantShiftSimd128* ins) {
   2463  MOZ_CRASH("No SIMD");
   2464 }
   2465 
   2466 void CodeGenerator::visitWasmSignReplicationSimd128(
   2467    LWasmSignReplicationSimd128* ins) {
   2468  MOZ_CRASH("No SIMD");
   2469 }
   2470 
   2471 void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
   2472  MOZ_CRASH("No SIMD");
   2473 }
   2474 
   2475 void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
   2476  MOZ_CRASH("No SIMD");
   2477 }
   2478 
   2479 void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
   2480  MOZ_CRASH("No SIMD");
   2481 }
   2482 
   2483 void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
   2484    LWasmReplaceInt64LaneSimd128* ins) {
   2485  MOZ_CRASH("No SIMD");
   2486 }
   2487 
   2488 void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
   2489  MOZ_CRASH("No SIMD");
   2490 }
   2491 
   2492 void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
   2493  MOZ_CRASH("No SIMD");
   2494 }
   2495 
   2496 void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
   2497  MOZ_CRASH("No SIMD");
   2498 }
   2499 
   2500 void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
   2501  MOZ_CRASH("No SIMD");
   2502 }
   2503 
   2504 void CodeGenerator::visitWasmReduceAndBranchSimd128(
   2505    LWasmReduceAndBranchSimd128* ins) {
   2506  MOZ_CRASH("No SIMD");
   2507 }
   2508 
   2509 void CodeGenerator::visitWasmReduceSimd128ToInt64(
   2510    LWasmReduceSimd128ToInt64* ins) {
   2511  MOZ_CRASH("No SIMD");
   2512 }
   2513 
   2514 void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
   2515  MOZ_CRASH("No SIMD");
   2516 }
   2517 
   2518 void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
   2519  MOZ_CRASH("No SIMD");
   2520 }