tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

CodeGenerator-arm.cpp (89030B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/arm/CodeGenerator-arm.h"
      8 
      9 #include "mozilla/DebugOnly.h"
     10 #include "mozilla/MathAlgorithms.h"
     11 #include "mozilla/Maybe.h"
     12 
     13 #include "jsnum.h"
     14 
     15 #include "jit/CodeGenerator.h"
     16 #include "jit/InlineScriptTree.h"
     17 #include "jit/JitRuntime.h"
     18 #include "jit/MIR-wasm.h"
     19 #include "jit/MIR.h"
     20 #include "jit/MIRGraph.h"
     21 #include "js/Conversions.h"
     22 #include "js/ScalarType.h"  // js::Scalar::Type
     23 #include "vm/JSContext.h"
     24 #include "vm/Realm.h"
     25 #include "vm/Shape.h"
     26 
     27 #include "jit/MacroAssembler-inl.h"
     28 #include "jit/shared/CodeGenerator-shared-inl.h"
     29 #include "vm/JSScript-inl.h"
     30 
     31 using namespace js;
     32 using namespace js::jit;
     33 
     34 using JS::GenericNaN;
     35 using JS::ToInt32;
     36 using mozilla::DebugOnly;
     37 using mozilla::FloorLog2;
     38 using mozilla::NegativeInfinity;
     39 
     40 // shared
     41 CodeGeneratorARM::CodeGeneratorARM(MIRGenerator* gen, LIRGraph* graph,
     42                                   MacroAssembler* masm,
     43                                   const wasm::CodeMetadata* wasmCodeMeta)
     44    : CodeGeneratorShared(gen, graph, masm, wasmCodeMeta) {}
     45 
     46 void CodeGeneratorARM::emitBranch(Assembler::Condition cond,
     47                                  MBasicBlock* mirTrue, MBasicBlock* mirFalse) {
     48  if (isNextBlock(mirFalse->lir())) {
     49    jumpToBlock(mirTrue, cond);
     50  } else {
     51    jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
     52    jumpToBlock(mirTrue);
     53  }
     54 }
     55 
     56 bool CodeGeneratorARM::generateOutOfLineCode() {
     57  if (!CodeGeneratorShared::generateOutOfLineCode()) {
     58    return false;
     59  }
     60 
     61  if (deoptLabel_.used()) {
     62    // All non-table-based bailouts will go here.
     63    masm.bind(&deoptLabel_);
     64 
     65    // Push the frame size, so the handler can recover the IonScript.
     66    masm.push(Imm32(frameSize()));
     67 
     68    TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
     69    masm.jump(handler);
     70  }
     71 
     72  return !masm.oom();
     73 }
     74 
     75 void CodeGeneratorARM::emitBailoutOOL(LSnapshot* snapshot) {
     76  masm.push(Imm32(snapshot->snapshotOffset()));
     77  masm.ma_b(&deoptLabel_);
     78 }
     79 
     80 void CodeGeneratorARM::bailoutIf(Assembler::Condition condition,
     81                                 LSnapshot* snapshot) {
     82  encode(snapshot);
     83 
     84  InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
     85  auto* ool = new (alloc()) LambdaOutOfLineCode(
     86      [=, this](OutOfLineCode& ool) { emitBailoutOOL(snapshot); });
     87 
     88  // All bailout code is associated with the bytecodeSite of the block we are
     89  // bailing out from.
     90  addOutOfLineCode(ool,
     91                   new (alloc()) BytecodeSite(tree, tree->script()->code()));
     92 
     93  masm.ma_b(ool->entry(), condition);
     94 }
     95 
     96 void CodeGeneratorARM::bailoutFrom(Label* label, LSnapshot* snapshot) {
     97  MOZ_ASSERT_IF(!masm.oom(), label->used());
     98  MOZ_ASSERT_IF(!masm.oom(), !label->bound());
     99 
    100  encode(snapshot);
    101 
    102  InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
    103  auto* ool = new (alloc()) LambdaOutOfLineCode(
    104      [=, this](OutOfLineCode& ool) { emitBailoutOOL(snapshot); });
    105 
    106  // All bailout code is associated with the bytecodeSite of the block we are
    107  // bailing out from.
    108  addOutOfLineCode(ool,
    109                   new (alloc()) BytecodeSite(tree, tree->script()->code()));
    110 
    111  masm.retarget(label, ool->entry());
    112 }
    113 
    114 void CodeGeneratorARM::bailout(LSnapshot* snapshot) {
    115  Label label;
    116  masm.ma_b(&label);
    117  bailoutFrom(&label, snapshot);
    118 }
    119 
    120 void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
    121  FloatRegister first = ToFloatRegister(ins->first());
    122  FloatRegister second = ToFloatRegister(ins->second());
    123 
    124  MOZ_ASSERT(first == ToFloatRegister(ins->output()));
    125 
    126  if (ins->mir()->isMax()) {
    127    masm.maxDouble(second, first, true);
    128  } else {
    129    masm.minDouble(second, first, true);
    130  }
    131 }
    132 
    133 void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
    134  FloatRegister first = ToFloatRegister(ins->first());
    135  FloatRegister second = ToFloatRegister(ins->second());
    136 
    137  MOZ_ASSERT(first == ToFloatRegister(ins->output()));
    138 
    139  if (ins->mir()->isMax()) {
    140    masm.maxFloat32(second, first, true);
    141  } else {
    142    masm.minFloat32(second, first, true);
    143  }
    144 }
    145 
    146 void CodeGenerator::visitAddI(LAddI* ins) {
    147  const LAllocation* lhs = ins->lhs();
    148  const LAllocation* rhs = ins->rhs();
    149  const LDefinition* dest = ins->output();
    150 
    151  ScratchRegisterScope scratch(masm);
    152 
    153  if (rhs->isConstant()) {
    154    masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), scratch,
    155                SetCC);
    156  } else if (rhs->isGeneralReg()) {
    157    masm.ma_add(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), SetCC);
    158  } else {
    159    masm.ma_add(ToRegister(lhs), Operand(ToAddress(rhs)), ToRegister(dest),
    160                SetCC);
    161  }
    162 
    163  if (ins->snapshot()) {
    164    bailoutIf(Assembler::Overflow, ins->snapshot());
    165  }
    166 }
    167 
    168 void CodeGenerator::visitAddIntPtr(LAddIntPtr* ins) {
    169  const LAllocation* lhs = ins->lhs();
    170  const LAllocation* rhs = ins->rhs();
    171  const LDefinition* dest = ins->output();
    172 
    173  ScratchRegisterScope scratch(masm);
    174 
    175  if (rhs->isConstant()) {
    176    masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), scratch,
    177                LeaveCC);
    178  } else if (rhs->isGeneralReg()) {
    179    masm.ma_add(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), LeaveCC);
    180  } else {
    181    masm.ma_add(ToRegister(lhs), Operand(ToAddress(rhs)), ToRegister(dest),
    182                LeaveCC);
    183  }
    184 }
    185 
    186 void CodeGenerator::visitAddI64(LAddI64* lir) {
    187  LInt64Allocation lhs = lir->lhs();
    188  LInt64Allocation rhs = lir->rhs();
    189 
    190  MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
    191 
    192  if (IsConstant(rhs)) {
    193    masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
    194    return;
    195  }
    196 
    197  masm.add64(ToRegister64(rhs), ToRegister64(lhs));
    198 }
    199 
    200 void CodeGenerator::visitSubI(LSubI* ins) {
    201  const LAllocation* lhs = ins->lhs();
    202  const LAllocation* rhs = ins->rhs();
    203  const LDefinition* dest = ins->output();
    204 
    205  ScratchRegisterScope scratch(masm);
    206 
    207  if (rhs->isConstant()) {
    208    masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), scratch,
    209                SetCC);
    210  } else if (rhs->isGeneralReg()) {
    211    masm.ma_sub(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), SetCC);
    212  } else {
    213    masm.ma_sub(ToRegister(lhs), Operand(ToAddress(rhs)), ToRegister(dest),
    214                SetCC);
    215  }
    216 
    217  if (ins->snapshot()) {
    218    bailoutIf(Assembler::Overflow, ins->snapshot());
    219  }
    220 }
    221 
    222 void CodeGenerator::visitSubIntPtr(LSubIntPtr* ins) {
    223  const LAllocation* lhs = ins->lhs();
    224  const LAllocation* rhs = ins->rhs();
    225  const LDefinition* dest = ins->output();
    226 
    227  ScratchRegisterScope scratch(masm);
    228 
    229  if (rhs->isConstant()) {
    230    masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), scratch,
    231                LeaveCC);
    232  } else if (rhs->isGeneralReg()) {
    233    masm.ma_sub(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), LeaveCC);
    234  } else {
    235    masm.ma_sub(ToRegister(lhs), Operand(ToAddress(rhs)), ToRegister(dest),
    236                LeaveCC);
    237  }
    238 }
    239 
    240 void CodeGenerator::visitSubI64(LSubI64* lir) {
    241  LInt64Allocation lhs = lir->lhs();
    242  LInt64Allocation rhs = lir->rhs();
    243 
    244  MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
    245 
    246  if (IsConstant(rhs)) {
    247    masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
    248    return;
    249  }
    250 
    251  masm.sub64(ToRegister64(rhs), ToRegister64(lhs));
    252 }
    253 
    254 void CodeGenerator::visitMulI(LMulI* ins) {
    255  const LAllocation* lhs = ins->lhs();
    256  const LAllocation* rhs = ins->rhs();
    257  const LDefinition* dest = ins->output();
    258  MMul* mul = ins->mir();
    259  MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
    260                !mul->canBeNegativeZero() && !mul->canOverflow());
    261 
    262  if (rhs->isConstant()) {
    263    // Bailout when this condition is met.
    264    Assembler::Condition c = Assembler::Overflow;
    265    // Bailout on -0.0
    266    int32_t constant = ToInt32(rhs);
    267    if (mul->canBeNegativeZero() && constant <= 0) {
    268      Assembler::Condition bailoutCond =
    269          (constant == 0) ? Assembler::LessThan : Assembler::Equal;
    270      masm.as_cmp(ToRegister(lhs), Imm8(0));
    271      bailoutIf(bailoutCond, ins->snapshot());
    272    }
    273    // TODO: move these to ma_mul.
    274    switch (constant) {
    275      case -1:
    276        masm.as_rsb(ToRegister(dest), ToRegister(lhs), Imm8(0), SetCC);
    277        break;
    278      case 0:
    279        masm.ma_mov(Imm32(0), ToRegister(dest));
    280        return;  // Escape overflow check;
    281      case 1:
    282        // Nop
    283        masm.ma_mov(ToRegister(lhs), ToRegister(dest));
    284        return;  // Escape overflow check;
    285      case 2:
    286        masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest), SetCC);
    287        // Overflow is handled later.
    288        break;
    289      default: {
    290        bool handled = false;
    291        if (constant > 0) {
    292          // Try shift and add sequences for a positive constant.
    293          if (!mul->canOverflow()) {
    294            // If it cannot overflow, we can do lots of optimizations.
    295            Register src = ToRegister(lhs);
    296            uint32_t shift = FloorLog2(constant);
    297            uint32_t rest = constant - (1 << shift);
    298            // See if the constant has one bit set, meaning it can be
    299            // encoded as a bitshift.
    300            if ((1 << shift) == constant) {
    301              masm.ma_lsl(Imm32(shift), src, ToRegister(dest));
    302              handled = true;
    303            } else {
    304              // If the constant cannot be encoded as (1 << C1), see
    305              // if it can be encoded as (1 << C1) | (1 << C2), which
    306              // can be computed using an add and a shift.
    307              uint32_t shift_rest = FloorLog2(rest);
    308              if ((1u << shift_rest) == rest) {
    309                masm.as_add(ToRegister(dest), src,
    310                            lsl(src, shift - shift_rest));
    311                if (shift_rest != 0) {
    312                  masm.ma_lsl(Imm32(shift_rest), ToRegister(dest),
    313                              ToRegister(dest));
    314                }
    315                handled = true;
    316              }
    317            }
    318          } else if (ToRegister(lhs) != ToRegister(dest)) {
    319            // To stay on the safe side, only optimize things that are a
    320            // power of 2.
    321 
    322            uint32_t shift = FloorLog2(constant);
    323            if ((1 << shift) == constant) {
    324              // dest = lhs * pow(2,shift)
    325              masm.ma_lsl(Imm32(shift), ToRegister(lhs), ToRegister(dest));
    326              // At runtime, check (lhs == dest >> shift), if this
    327              // does not hold, some bits were lost due to overflow,
    328              // and the computation should be resumed as a double.
    329              masm.as_cmp(ToRegister(lhs), asr(ToRegister(dest), shift));
    330              c = Assembler::NotEqual;
    331              handled = true;
    332            }
    333          }
    334        }
    335 
    336        if (!handled) {
    337          ScratchRegisterScope scratch(masm);
    338          if (mul->canOverflow()) {
    339            c = masm.ma_check_mul(ToRegister(lhs), Imm32(ToInt32(rhs)),
    340                                  ToRegister(dest), scratch, c);
    341          } else {
    342            masm.ma_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest),
    343                        scratch);
    344          }
    345        }
    346      }
    347    }
    348    // Bailout on overflow.
    349    if (mul->canOverflow()) {
    350      bailoutIf(c, ins->snapshot());
    351    }
    352  } else {
    353    Assembler::Condition c = Assembler::Overflow;
    354 
    355    if (mul->canOverflow()) {
    356      ScratchRegisterScope scratch(masm);
    357      c = masm.ma_check_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest),
    358                            scratch, c);
    359    } else {
    360      masm.ma_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest));
    361    }
    362 
    363    // Bailout on overflow.
    364    if (mul->canOverflow()) {
    365      bailoutIf(c, ins->snapshot());
    366    }
    367 
    368    if (mul->canBeNegativeZero()) {
    369      Label done;
    370      masm.as_cmp(ToRegister(dest), Imm8(0));
    371      masm.ma_b(&done, Assembler::NotEqual);
    372 
    373      // Result is -0 if lhs or rhs is negative.
    374      masm.ma_cmn(ToRegister(lhs), ToRegister(rhs));
    375      bailoutIf(Assembler::Signed, ins->snapshot());
    376 
    377      masm.bind(&done);
    378    }
    379  }
    380 }
    381 
    382 void CodeGenerator::visitMulIntPtr(LMulIntPtr* ins) {
    383  const LAllocation* lhs = ins->lhs();
    384  const LAllocation* rhs = ins->rhs();
    385  const LDefinition* dest = ins->output();
    386 
    387  if (rhs->isConstant()) {
    388    intptr_t constant = ToIntPtr(rhs);
    389 
    390    switch (constant) {
    391      case -1:
    392        masm.as_rsb(ToRegister(dest), ToRegister(lhs), Imm8(0), LeaveCC);
    393        return;
    394      case 0:
    395        masm.ma_mov(Imm32(0), ToRegister(dest));
    396        return;
    397      case 1:
    398        masm.ma_mov(ToRegister(lhs), ToRegister(dest));
    399        return;
    400      case 2:
    401        masm.ma_add(ToRegister(lhs), ToRegister(lhs), ToRegister(dest),
    402                    LeaveCC);
    403        return;
    404    }
    405 
    406    // Use shift if constant is a power of 2.
    407    if (constant > 0 && mozilla::IsPowerOfTwo(uintptr_t(constant))) {
    408      uint32_t shift = mozilla::FloorLog2(constant);
    409      masm.ma_lsl(Imm32(shift), ToRegister(lhs), ToRegister(dest));
    410      return;
    411    }
    412 
    413    ScratchRegisterScope scratch(masm);
    414    masm.ma_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest),
    415                scratch);
    416  } else {
    417    masm.ma_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest));
    418  }
    419 }
    420 
    421 void CodeGenerator::visitMulI64(LMulI64* lir) {
    422  LInt64Allocation lhs = lir->lhs();
    423  LInt64Allocation rhs = lir->rhs();
    424 
    425  MOZ_ASSERT(ToRegister64(lhs) == ToOutRegister64(lir));
    426 
    427  if (IsConstant(rhs)) {
    428    int64_t constant = ToInt64(rhs);
    429    switch (constant) {
    430      case -1:
    431        masm.neg64(ToRegister64(lhs));
    432        return;
    433      case 0:
    434        masm.move64(Imm64(0), ToRegister64(lhs));
    435        return;
    436      case 1:
    437        // nop
    438        return;
    439      case 2:
    440        masm.add64(ToRegister64(lhs), ToRegister64(lhs));
    441        return;
    442      default:
    443        if (constant > 0) {
    444          // Use shift if constant is power of 2.
    445          int32_t shift = mozilla::FloorLog2(constant);
    446          if (int64_t(1) << shift == constant) {
    447            masm.lshift64(Imm32(shift), ToRegister64(lhs));
    448            return;
    449          }
    450        }
    451        Register temp = ToTempRegisterOrInvalid(lir->temp0());
    452        masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
    453    }
    454  } else {
    455    Register temp = ToTempRegisterOrInvalid(lir->temp0());
    456    masm.mul64(ToRegister64(rhs), ToRegister64(lhs), temp);
    457  }
    458 }
    459 
    460 void CodeGeneratorARM::divICommon(MDiv* mir, Register lhs, Register rhs,
    461                                  Register output, LSnapshot* snapshot,
    462                                  Label& done) {
    463  ScratchRegisterScope scratch(masm);
    464 
    465  if (mir->canBeNegativeOverflow()) {
    466    // Handle INT32_MIN / -1;
    467    // The integer division will give INT32_MIN, but we want -(double)INT32_MIN.
    468 
    469    // Sets EQ if lhs == INT32_MIN.
    470    masm.ma_cmp(lhs, Imm32(INT32_MIN), scratch);
    471    // If EQ (LHS == INT32_MIN), sets EQ if rhs == -1.
    472    masm.ma_cmp(rhs, Imm32(-1), scratch, Assembler::Equal);
    473    if (mir->canTruncateOverflow()) {
    474      if (mir->trapOnError()) {
    475        Label ok;
    476        masm.ma_b(&ok, Assembler::NotEqual);
    477        masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->trapSiteDesc());
    478        masm.bind(&ok);
    479      } else {
    480        // (-INT32_MIN)|0 = INT32_MIN
    481        Label skip;
    482        masm.ma_b(&skip, Assembler::NotEqual);
    483        masm.ma_mov(Imm32(INT32_MIN), output);
    484        masm.ma_b(&done);
    485        masm.bind(&skip);
    486      }
    487    } else {
    488      MOZ_ASSERT(mir->fallible());
    489      bailoutIf(Assembler::Equal, snapshot);
    490    }
    491  }
    492 
    493  // Handle divide by zero.
    494  if (mir->canBeDivideByZero()) {
    495    masm.as_cmp(rhs, Imm8(0));
    496    if (mir->canTruncateInfinities()) {
    497      if (mir->trapOnError()) {
    498        Label nonZero;
    499        masm.ma_b(&nonZero, Assembler::NotEqual);
    500        masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->trapSiteDesc());
    501        masm.bind(&nonZero);
    502      } else {
    503        // Infinity|0 == 0
    504        Label skip;
    505        masm.ma_b(&skip, Assembler::NotEqual);
    506        masm.ma_mov(Imm32(0), output);
    507        masm.ma_b(&done);
    508        masm.bind(&skip);
    509      }
    510    } else {
    511      MOZ_ASSERT(mir->fallible());
    512      bailoutIf(Assembler::Equal, snapshot);
    513    }
    514  }
    515 
    516  // Handle negative 0.
    517  if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
    518    Label nonzero;
    519    masm.as_cmp(lhs, Imm8(0));
    520    masm.ma_b(&nonzero, Assembler::NotEqual);
    521    masm.as_cmp(rhs, Imm8(0));
    522    MOZ_ASSERT(mir->fallible());
    523    bailoutIf(Assembler::LessThan, snapshot);
    524    masm.bind(&nonzero);
    525  }
    526 }
    527 
    528 void CodeGenerator::visitDivI(LDivI* ins) {
    529  Register lhs = ToRegister(ins->lhs());
    530  Register rhs = ToRegister(ins->rhs());
    531  Register temp = ToRegister(ins->temp0());
    532  Register output = ToRegister(ins->output());
    533  MDiv* mir = ins->mir();
    534 
    535  Label done;
    536  divICommon(mir, lhs, rhs, output, ins->snapshot(), done);
    537 
    538  if (mir->canTruncateRemainder()) {
    539    masm.ma_sdiv(lhs, rhs, output);
    540  } else {
    541    {
    542      ScratchRegisterScope scratch(masm);
    543      masm.ma_sdiv(lhs, rhs, temp);
    544      masm.ma_mul(temp, rhs, scratch);
    545      masm.ma_cmp(lhs, scratch);
    546    }
    547    bailoutIf(Assembler::NotEqual, ins->snapshot());
    548    masm.ma_mov(temp, output);
    549  }
    550 
    551  masm.bind(&done);
    552 }
    553 
    554 extern "C" {
    555 extern MOZ_EXPORT int64_t __aeabi_idivmod(int, int);
    556 extern MOZ_EXPORT int64_t __aeabi_uidivmod(int, int);
    557 }
    558 
    559 void CodeGenerator::visitSoftDivI(LSoftDivI* ins) {
    560  Register lhs = ToRegister(ins->lhs());
    561  Register rhs = ToRegister(ins->rhs());
    562  Register output = ToRegister(ins->output());
    563  MDiv* mir = ins->mir();
    564 
    565  Label done;
    566  divICommon(mir, lhs, rhs, output, ins->snapshot(), done);
    567 
    568  if (gen->compilingWasm()) {
    569    masm.Push(InstanceReg);
    570    int32_t framePushedAfterInstance = masm.framePushed();
    571    masm.setupWasmABICall(wasm::SymbolicAddress::aeabi_idivmod);
    572    masm.passABIArg(lhs);
    573    masm.passABIArg(rhs);
    574    int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
    575    masm.callWithABI(mir->trapSiteDesc().bytecodeOffset,
    576                     wasm::SymbolicAddress::aeabi_idivmod,
    577                     mozilla::Some(instanceOffset));
    578    masm.Pop(InstanceReg);
    579  } else {
    580    using Fn = int64_t (*)(int, int);
    581    masm.setupAlignedABICall();
    582    masm.passABIArg(lhs);
    583    masm.passABIArg(rhs);
    584    masm.callWithABI<Fn, __aeabi_idivmod>(
    585        ABIType::Int64, CheckUnsafeCallWithABI::DontCheckOther);
    586  }
    587 
    588  // idivmod returns the quotient in r0, and the remainder in r1.
    589  if (!mir->canTruncateRemainder()) {
    590    MOZ_ASSERT(mir->fallible());
    591    masm.as_cmp(r1, Imm8(0));
    592    bailoutIf(Assembler::NonZero, ins->snapshot());
    593  }
    594 
    595  masm.bind(&done);
    596 }
    597 
    598 void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
    599  MDiv* mir = ins->mir();
    600  Register lhs = ToRegister(ins->numerator());
    601  Register output = ToRegister(ins->output());
    602  int32_t shift = ins->shift();
    603 
    604  if (shift == 0) {
    605    masm.ma_mov(lhs, output);
    606    return;
    607  }
    608 
    609  if (!mir->isTruncated()) {
    610    // If the remainder is != 0, bailout since this must be a double.
    611    {
    612      // The bailout code also needs the scratch register.
    613      // Here it is only used as a dummy target to set CC flags.
    614      ScratchRegisterScope scratch(masm);
    615      masm.as_mov(scratch, lsl(lhs, 32 - shift), SetCC);
    616    }
    617    bailoutIf(Assembler::NonZero, ins->snapshot());
    618  }
    619 
    620  if (!mir->canBeNegativeDividend()) {
    621    // Numerator is unsigned, so needs no adjusting. Do the shift.
    622    masm.as_mov(output, asr(lhs, shift));
    623    return;
    624  }
    625 
    626  // Adjust the value so that shifting produces a correctly rounded result
    627  // when the numerator is negative. See 10-1 "Signed Division by a Known
    628  // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
    629  ScratchRegisterScope scratch(masm);
    630 
    631  if (shift > 1) {
    632    masm.as_mov(scratch, asr(lhs, 31));
    633    masm.as_add(scratch, lhs, lsr(scratch, 32 - shift));
    634  } else {
    635    masm.as_add(scratch, lhs, lsr(lhs, 32 - shift));
    636  }
    637 
    638  // Do the shift.
    639  masm.as_mov(output, asr(scratch, shift));
    640 }
    641 
    642 void CodeGeneratorARM::modICommon(MMod* mir, Register lhs, Register rhs,
    643                                  Register output, LSnapshot* snapshot,
    644                                  Label& done) {
    645  // X % 0 is bad because it will give garbage (or abort), when it should give
    646  // NaN.
    647 
    648  if (mir->canBeDivideByZero()) {
    649    masm.as_cmp(rhs, Imm8(0));
    650    if (mir->isTruncated()) {
    651      Label nonZero;
    652      masm.ma_b(&nonZero, Assembler::NotEqual);
    653      if (mir->trapOnError()) {
    654        masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->trapSiteDesc());
    655      } else {
    656        // NaN|0 == 0
    657        masm.ma_mov(Imm32(0), output);
    658        masm.ma_b(&done);
    659      }
    660      masm.bind(&nonZero);
    661    } else {
    662      MOZ_ASSERT(mir->fallible());
    663      bailoutIf(Assembler::Equal, snapshot);
    664    }
    665  }
    666 }
    667 
    668 void CodeGenerator::visitModI(LModI* ins) {
    669  Register lhs = ToRegister(ins->lhs());
    670  Register rhs = ToRegister(ins->rhs());
    671  Register output = ToRegister(ins->output());
    672  MMod* mir = ins->mir();
    673 
    674  // Contrary to other architectures (notably x86) INT_MIN % -1 doesn't need to
    675  // be handled separately. |ma_smod| computes the remainder using the |SDIV|
    676  // and the |MLS| instructions. On overflow, |SDIV| truncates the result to
    677  // 32-bit and returns INT_MIN, see ARM Architecture Reference Manual, SDIV
    678  // instruction.
    679  //
    680  //   mls(INT_MIN, sdiv(INT_MIN, -1), -1)
    681  // = INT_MIN - (sdiv(INT_MIN, -1) * -1)
    682  // = INT_MIN - (INT_MIN * -1)
    683  // = INT_MIN - INT_MIN
    684  // = 0
    685  //
    686  // And a zero remainder with a negative dividend is already handled below.
    687 
    688  Label done;
    689  modICommon(mir, lhs, rhs, output, ins->snapshot(), done);
    690 
    691  {
    692    ScratchRegisterScope scratch(masm);
    693    masm.ma_smod(lhs, rhs, output, scratch);
    694  }
    695 
    696  // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0.
    697  if (mir->canBeNegativeDividend()) {
    698    if (mir->isTruncated()) {
    699      // -0.0|0 == 0
    700    } else {
    701      MOZ_ASSERT(mir->fallible());
    702      // See if X < 0
    703      masm.as_cmp(output, Imm8(0));
    704      masm.ma_b(&done, Assembler::NotEqual);
    705      masm.as_cmp(lhs, Imm8(0));
    706      bailoutIf(Assembler::Signed, ins->snapshot());
    707    }
    708  }
    709 
    710  masm.bind(&done);
    711 }
    712 
    713 void CodeGenerator::visitSoftModI(LSoftModI* ins) {
    714  // Extract the registers from this instruction.
    715  Register lhs = ToRegister(ins->lhs());
    716  Register rhs = ToRegister(ins->rhs());
    717  Register output = ToRegister(ins->output());
    718  Register callTemp = ToRegister(ins->temp0());
    719  MMod* mir = ins->mir();
    720  Label done;
    721 
    722  // Save the lhs in case we end up with a 0 that should be a -0.0 because lhs <
    723  // 0.
    724  MOZ_ASSERT(callTemp != lhs);
    725  MOZ_ASSERT(callTemp != rhs);
    726  masm.ma_mov(lhs, callTemp);
    727 
    728  // Prevent INT_MIN % -1.
    729  //
    730  // |aeabi_idivmod| is allowed to return any arbitrary value when called with
    731  // |(INT_MIN, -1)|, see "Run-time ABI for the ARM architecture manual". Most
    732  // implementations perform a non-trapping signed integer division and
    733  // return the expected result, i.e. INT_MIN. But since we can't rely on this
    734  // behavior, handle this case separately here.
    735  if (mir->canBeNegativeDividend()) {
    736    {
    737      ScratchRegisterScope scratch(masm);
    738      // Sets EQ if lhs == INT_MIN
    739      masm.ma_cmp(lhs, Imm32(INT_MIN), scratch);
    740      // If EQ (LHS == INT_MIN), sets EQ if rhs == -1
    741      masm.ma_cmp(rhs, Imm32(-1), scratch, Assembler::Equal);
    742    }
    743    if (mir->isTruncated()) {
    744      // (INT_MIN % -1)|0 == 0
    745      Label skip;
    746      masm.ma_b(&skip, Assembler::NotEqual);
    747      masm.ma_mov(Imm32(0), output);
    748      masm.ma_b(&done);
    749      masm.bind(&skip);
    750    } else {
    751      MOZ_ASSERT(mir->fallible());
    752      bailoutIf(Assembler::Equal, ins->snapshot());
    753    }
    754  }
    755 
    756  modICommon(mir, lhs, rhs, output, ins->snapshot(), done);
    757 
    758  if (gen->compilingWasm()) {
    759    masm.Push(InstanceReg);
    760    int32_t framePushedAfterInstance = masm.framePushed();
    761    masm.setupWasmABICall(wasm::SymbolicAddress::aeabi_idivmod);
    762    masm.passABIArg(lhs);
    763    masm.passABIArg(rhs);
    764    int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
    765    masm.callWithABI(mir->trapSiteDesc().bytecodeOffset,
    766                     wasm::SymbolicAddress::aeabi_idivmod,
    767                     mozilla::Some(instanceOffset));
    768    masm.Pop(InstanceReg);
    769  } else {
    770    using Fn = int64_t (*)(int, int);
    771    masm.setupAlignedABICall();
    772    masm.passABIArg(lhs);
    773    masm.passABIArg(rhs);
    774    masm.callWithABI<Fn, __aeabi_idivmod>(
    775        ABIType::Int64, CheckUnsafeCallWithABI::DontCheckOther);
    776  }
    777 
    778  MOZ_ASSERT(r1 != output);
    779  masm.move32(r1, output);
    780 
    781  // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
    782  if (mir->canBeNegativeDividend()) {
    783    if (mir->isTruncated()) {
    784      // -0.0|0 == 0
    785    } else {
    786      MOZ_ASSERT(mir->fallible());
    787      // See if X < 0
    788      masm.as_cmp(output, Imm8(0));
    789      masm.ma_b(&done, Assembler::NotEqual);
    790      masm.as_cmp(callTemp, Imm8(0));
    791      bailoutIf(Assembler::Signed, ins->snapshot());
    792    }
    793  }
    794 
    795  masm.bind(&done);
    796 }
    797 
    798 void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
    799  Register in = ToRegister(ins->input());
    800  Register out = ToRegister(ins->output());
    801  MMod* mir = ins->mir();
    802  Label fin;
    803  // bug 739870, jbramley has a different sequence that may help with speed
    804  // here.
    805 
    806  masm.ma_mov(in, out, SetCC);
    807  masm.ma_b(&fin, Assembler::Zero);
    808  masm.as_rsb(out, out, Imm8(0), LeaveCC, Assembler::Signed);
    809  {
    810    ScratchRegisterScope scratch(masm);
    811    masm.ma_and(Imm32((1 << ins->shift()) - 1), out, scratch);
    812  }
    813  masm.as_rsb(out, out, Imm8(0), SetCC, Assembler::Signed);
    814  if (mir->canBeNegativeDividend()) {
    815    if (!mir->isTruncated()) {
    816      MOZ_ASSERT(mir->fallible());
    817      bailoutIf(Assembler::Zero, ins->snapshot());
    818    } else {
    819      // -0|0 == 0
    820    }
    821  }
    822  masm.bind(&fin);
    823 }
    824 
    825 void CodeGenerator::visitModMaskI(LModMaskI* ins) {
    826  Register src = ToRegister(ins->input());
    827  Register dest = ToRegister(ins->output());
    828  Register tmp1 = ToRegister(ins->temp0());
    829  Register tmp2 = ToRegister(ins->temp1());
    830  MMod* mir = ins->mir();
    831 
    832  ScratchRegisterScope scratch(masm);
    833  SecondScratchRegisterScope scratch2(masm);
    834 
    835  masm.ma_mod_mask(src, dest, tmp1, tmp2, scratch, scratch2, ins->shift());
    836 
    837  if (mir->canBeNegativeDividend()) {
    838    if (!mir->isTruncated()) {
    839      MOZ_ASSERT(mir->fallible());
    840      bailoutIf(Assembler::Zero, ins->snapshot());
    841    } else {
    842      // -0|0 == 0
    843    }
    844  }
    845 }
    846 
    847 void CodeGeneratorARM::emitBigIntPtrDiv(LBigIntPtrDiv* ins, Register dividend,
    848                                        Register divisor, Register output) {
    849  // Callers handle division by zero and integer overflow.
    850 
    851  if (ARMFlags::HasIDIV()) {
    852    masm.ma_sdiv(dividend, divisor, /* result= */ output);
    853    return;
    854  }
    855 
    856  // idivmod returns the quotient in r0, and the remainder in r1.
    857  MOZ_ASSERT(ToRegister(ins->temp0()) == r0);
    858  MOZ_ASSERT(ToRegister(ins->temp1()) == r1);
    859 
    860  LiveRegisterSet volatileRegs = liveVolatileRegs(ins);
    861  volatileRegs.takeUnchecked(output);
    862 
    863  masm.PushRegsInMask(volatileRegs);
    864 
    865  using Fn = int64_t (*)(int, int);
    866  masm.setupUnalignedABICall(output);
    867  masm.passABIArg(dividend);
    868  masm.passABIArg(divisor);
    869  masm.callWithABI<Fn, __aeabi_idivmod>(ABIType::Int64,
    870                                        CheckUnsafeCallWithABI::DontCheckOther);
    871  masm.move32(r0, output);
    872 
    873  masm.PopRegsInMask(volatileRegs);
    874 }
    875 
    876 void CodeGeneratorARM::emitBigIntPtrMod(LBigIntPtrMod* ins, Register dividend,
    877                                        Register divisor, Register output) {
    878  // Callers handle division by zero and integer overflow.
    879 
    880  if (ARMFlags::HasIDIV()) {
    881    ScratchRegisterScope scratch(masm);
    882    masm.ma_smod(dividend, divisor, /* result= */ output, scratch);
    883    return;
    884  }
    885 
    886  // idivmod returns the quotient in r0, and the remainder in r1.
    887  MOZ_ASSERT(ToRegister(ins->temp0()) == r0);
    888  MOZ_ASSERT(ToRegister(ins->temp1()) == r1);
    889 
    890  LiveRegisterSet volatileRegs = liveVolatileRegs(ins);
    891  volatileRegs.takeUnchecked(output);
    892 
    893  masm.PushRegsInMask(volatileRegs);
    894 
    895  using Fn = int64_t (*)(int, int);
    896  masm.setupUnalignedABICall(output);
    897  masm.passABIArg(dividend);
    898  masm.passABIArg(divisor);
    899  masm.callWithABI<Fn, __aeabi_idivmod>(ABIType::Int64,
    900                                        CheckUnsafeCallWithABI::DontCheckOther);
    901  masm.move32(r1, output);
    902 
    903  masm.PopRegsInMask(volatileRegs);
    904 }
    905 
    906 void CodeGenerator::visitBitNotI(LBitNotI* ins) {
    907  const LAllocation* input = ins->input();
    908  const LDefinition* dest = ins->output();
    909  // This will not actually be true on arm. We can not an imm8m in order to
    910  // get a wider range of numbers
    911  MOZ_ASSERT(!input->isConstant());
    912 
    913  masm.ma_mvn(ToRegister(input), ToRegister(dest));
    914 }
    915 
    916 void CodeGenerator::visitBitOpI(LBitOpI* ins) {
    917  const LAllocation* lhs = ins->lhs();
    918  const LAllocation* rhs = ins->rhs();
    919  const LDefinition* dest = ins->output();
    920 
    921  ScratchRegisterScope scratch(masm);
    922 
    923  // All of these bitops should be either imm32's, or integer registers.
    924  switch (ins->bitop()) {
    925    case JSOp::BitOr:
    926      if (rhs->isConstant()) {
    927        masm.ma_orr(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest),
    928                    scratch);
    929      } else {
    930        masm.ma_orr(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
    931      }
    932      break;
    933    case JSOp::BitXor:
    934      if (rhs->isConstant()) {
    935        masm.ma_eor(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest),
    936                    scratch);
    937      } else {
    938        masm.ma_eor(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
    939      }
    940      break;
    941    case JSOp::BitAnd:
    942      if (rhs->isConstant()) {
    943        masm.ma_and(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest),
    944                    scratch);
    945      } else {
    946        masm.ma_and(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
    947      }
    948      break;
    949    default:
    950      MOZ_CRASH("unexpected binary opcode");
    951  }
    952 }
    953 
    954 void CodeGenerator::visitShiftI(LShiftI* ins) {
    955  Register lhs = ToRegister(ins->lhs());
    956  const LAllocation* rhs = ins->rhs();
    957  Register dest = ToRegister(ins->output());
    958 
    959  if (rhs->isConstant()) {
    960    int32_t shift = ToInt32(rhs) & 0x1F;
    961    switch (ins->bitop()) {
    962      case JSOp::Lsh:
    963        if (shift) {
    964          masm.ma_lsl(Imm32(shift), lhs, dest);
    965        } else {
    966          masm.ma_mov(lhs, dest);
    967        }
    968        break;
    969      case JSOp::Rsh:
    970        if (shift) {
    971          masm.ma_asr(Imm32(shift), lhs, dest);
    972        } else {
    973          masm.ma_mov(lhs, dest);
    974        }
    975        break;
    976      case JSOp::Ursh:
    977        if (shift) {
    978          masm.ma_lsr(Imm32(shift), lhs, dest);
    979        } else {
    980          // x >>> 0 can overflow.
    981          masm.ma_mov(lhs, dest);
    982          if (ins->mir()->toUrsh()->fallible()) {
    983            masm.as_cmp(dest, Imm8(0));
    984            bailoutIf(Assembler::LessThan, ins->snapshot());
    985          }
    986        }
    987        break;
    988      default:
    989        MOZ_CRASH("Unexpected shift op");
    990    }
    991  } else {
    992    // The shift amounts should be AND'ed into the 0-31 range since arm
    993    // shifts by the lower byte of the register (it will attempt to shift by
    994    // 250 if you ask it to).
    995    ScratchRegisterScope scratch(masm);
    996    masm.as_and(scratch, ToRegister(rhs), Imm8(0x1F));
    997 
    998    switch (ins->bitop()) {
    999      case JSOp::Lsh:
   1000        masm.ma_lsl(scratch, lhs, dest);
   1001        break;
   1002      case JSOp::Rsh:
   1003        masm.ma_asr(scratch, lhs, dest);
   1004        break;
   1005      case JSOp::Ursh:
   1006        masm.ma_lsr(scratch, lhs, dest);
   1007        if (ins->mir()->toUrsh()->fallible()) {
   1008          // x >>> 0 can overflow.
   1009          masm.as_cmp(dest, Imm8(0));
   1010          bailoutIf(Assembler::LessThan, ins->snapshot());
   1011        }
   1012        break;
   1013      default:
   1014        MOZ_CRASH("Unexpected shift op");
   1015    }
   1016  }
   1017 }
   1018 
   1019 void CodeGenerator::visitShiftIntPtr(LShiftIntPtr* ins) {
   1020  Register lhs = ToRegister(ins->lhs());
   1021  const LAllocation* rhs = ins->rhs();
   1022  Register dest = ToRegister(ins->output());
   1023 
   1024  if (rhs->isConstant()) {
   1025    int32_t shift = ToIntPtr(rhs) & 0x1F;
   1026    switch (ins->bitop()) {
   1027      case JSOp::Lsh:
   1028        if (shift) {
   1029          masm.ma_lsl(Imm32(shift), lhs, dest);
   1030        } else {
   1031          masm.ma_mov(lhs, dest);
   1032        }
   1033        break;
   1034      case JSOp::Rsh:
   1035        if (shift) {
   1036          masm.ma_asr(Imm32(shift), lhs, dest);
   1037        } else {
   1038          masm.ma_mov(lhs, dest);
   1039        }
   1040        break;
   1041      case JSOp::Ursh:
   1042        if (shift) {
   1043          masm.ma_lsr(Imm32(shift), lhs, dest);
   1044        } else {
   1045          masm.ma_mov(lhs, dest);
   1046        }
   1047        break;
   1048      default:
   1049        MOZ_CRASH("Unexpected shift op");
   1050    }
   1051  } else {
   1052    // The shift amounts should be AND'ed into the 0-31 range since arm
   1053    // shifts by the lower byte of the register (it will attempt to shift by
   1054    // 250 if you ask it to).
   1055    masm.as_and(dest, ToRegister(rhs), Imm8(0x1F));
   1056 
   1057    switch (ins->bitop()) {
   1058      case JSOp::Lsh:
   1059        masm.ma_lsl(dest, lhs, dest);
   1060        break;
   1061      case JSOp::Rsh:
   1062        masm.ma_asr(dest, lhs, dest);
   1063        break;
   1064      case JSOp::Ursh:
   1065        masm.ma_lsr(dest, lhs, dest);
   1066        break;
   1067      default:
   1068        MOZ_CRASH("Unexpected shift op");
   1069    }
   1070  }
   1071 }
   1072 
   1073 void CodeGenerator::visitUrshD(LUrshD* ins) {
   1074  Register lhs = ToRegister(ins->lhs());
   1075  Register temp = ToRegister(ins->temp0());
   1076 
   1077  const LAllocation* rhs = ins->rhs();
   1078  FloatRegister out = ToFloatRegister(ins->output());
   1079 
   1080  if (rhs->isConstant()) {
   1081    int32_t shift = ToInt32(rhs) & 0x1F;
   1082    if (shift) {
   1083      masm.ma_lsr(Imm32(shift), lhs, temp);
   1084    } else {
   1085      masm.ma_mov(lhs, temp);
   1086    }
   1087  } else {
   1088    masm.as_and(temp, ToRegister(rhs), Imm8(0x1F));
   1089    masm.ma_lsr(temp, lhs, temp);
   1090  }
   1091 
   1092  masm.convertUInt32ToDouble(temp, out);
   1093 }
   1094 
   1095 void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
   1096  FloatRegister input = ToFloatRegister(ins->input());
   1097  FloatRegister output = ToFloatRegister(ins->output());
   1098  ScratchDoubleScope scratch(masm);
   1099 
   1100  Label done;
   1101 
   1102  // Masm.pow(-Infinity, 0.5) == Infinity.
   1103  masm.loadConstantDouble(NegativeInfinity<double>(), scratch);
   1104  masm.compareDouble(input, scratch);
   1105  masm.ma_vneg(scratch, output, Assembler::Equal);
   1106  masm.ma_b(&done, Assembler::Equal);
   1107 
   1108  // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
   1109  // Adding 0 converts any -0 to 0.
   1110  masm.loadConstantDouble(0.0, scratch);
   1111  masm.ma_vadd(scratch, input, output);
   1112  masm.ma_vsqrt(output, output);
   1113 
   1114  masm.bind(&done);
   1115 }
   1116 
   1117 MoveOperand CodeGeneratorARM::toMoveOperand(LAllocation a) const {
   1118  if (a.isGeneralReg()) {
   1119    return MoveOperand(ToRegister(a));
   1120  }
   1121  if (a.isFloatReg()) {
   1122    return MoveOperand(ToFloatRegister(a));
   1123  }
   1124  MoveOperand::Kind kind = a.isStackArea() ? MoveOperand::Kind::EffectiveAddress
   1125                                           : MoveOperand::Kind::Memory;
   1126  Address addr = ToAddress(a);
   1127  MOZ_ASSERT((addr.offset & 3) == 0);
   1128  return MoveOperand(addr, kind);
   1129 }
   1130 
   1131 class js::jit::OutOfLineTableSwitch
   1132    : public OutOfLineCodeBase<CodeGeneratorARM> {
   1133  MTableSwitch* mir_;
   1134  Vector<CodeLabel, 8, JitAllocPolicy> codeLabels_;
   1135 
   1136  void accept(CodeGeneratorARM* codegen) override {
   1137    codegen->visitOutOfLineTableSwitch(this);
   1138  }
   1139 
   1140 public:
   1141  OutOfLineTableSwitch(TempAllocator& alloc, MTableSwitch* mir)
   1142      : mir_(mir), codeLabels_(alloc) {}
   1143 
   1144  MTableSwitch* mir() const { return mir_; }
   1145 
   1146  bool addCodeLabel(CodeLabel label) { return codeLabels_.append(label); }
   1147  CodeLabel codeLabel(unsigned i) { return codeLabels_[i]; }
   1148 };
   1149 
   1150 void CodeGeneratorARM::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool) {
   1151  MTableSwitch* mir = ool->mir();
   1152 
   1153  size_t numCases = mir->numCases();
   1154  for (size_t i = 0; i < numCases; i++) {
   1155    LBlock* caseblock =
   1156        skipTrivialBlocks(mir->getCase(numCases - 1 - i))->lir();
   1157    Label* caseheader = caseblock->label();
   1158    uint32_t caseoffset = caseheader->offset();
   1159 
   1160    // The entries of the jump table need to be absolute addresses and thus
   1161    // must be patched after codegen is finished.
   1162    CodeLabel cl = ool->codeLabel(i);
   1163    cl.target()->bind(caseoffset);
   1164    masm.addCodeLabel(cl);
   1165  }
   1166 }
   1167 
   1168 void CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch* mir,
   1169                                               Register index, Register base) {
   1170  // The code generated by this is utter hax.
   1171  // The end result looks something like:
   1172  // SUBS index, input, #base
   1173  // RSBSPL index, index, #max
   1174  // LDRPL pc, pc, index lsl 2
   1175  // B default
   1176 
   1177  // If the range of targets in N through M, we first subtract off the lowest
   1178  // case (N), which both shifts the arguments into the range 0 to (M - N)
   1179  // with and sets the MInus flag if the argument was out of range on the low
   1180  // end.
   1181 
   1182  // Then we a reverse subtract with the size of the jump table, which will
   1183  // reverse the order of range (It is size through 0, rather than 0 through
   1184  // size). The main purpose of this is that we set the same flag as the lower
   1185  // bound check for the upper bound check. Lastly, we do this conditionally
   1186  // on the previous check succeeding.
   1187 
   1188  // Then we conditionally load the pc offset by the (reversed) index (times
   1189  // the address size) into the pc, which branches to the correct case. NOTE:
   1190  // when we go to read the pc, the value that we get back is the pc of the
   1191  // current instruction *PLUS 8*. This means that ldr foo, [pc, +0] reads
   1192  // $pc+8. In other words, there is an empty word after the branch into the
   1193  // switch table before the table actually starts. Since the only other
   1194  // unhandled case is the default case (both out of range high and out of
   1195  // range low) I then insert a branch to default case into the extra slot,
   1196  // which ensures we don't attempt to execute the address table.
   1197  Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
   1198 
   1199  ScratchRegisterScope scratch(masm);
   1200 
   1201  int32_t cases = mir->numCases();
   1202  // Lower value with low value.
   1203  masm.ma_sub(index, Imm32(mir->low()), index, scratch, SetCC);
   1204  masm.ma_rsb(index, Imm32(cases - 1), index, scratch, SetCC,
   1205              Assembler::NotSigned);
   1206  // Inhibit pools within the following sequence because we are indexing into
   1207  // a pc relative table. The region will have one instruction for ma_ldr, one
   1208  // for ma_b, and each table case takes one word.
   1209  AutoForbidPoolsAndNops afp(&masm, 1 + 1 + cases);
   1210  masm.ma_ldr(DTRAddr(pc, DtrRegImmShift(index, LSL, 2)), pc, Offset,
   1211              Assembler::NotSigned);
   1212  masm.ma_b(defaultcase);
   1213 
   1214  // To fill in the CodeLabels for the case entries, we need to first generate
   1215  // the case entries (we don't yet know their offsets in the instruction
   1216  // stream).
   1217  OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(alloc(), mir);
   1218  for (int32_t i = 0; i < cases; i++) {
   1219    CodeLabel cl;
   1220    masm.writeCodePointer(&cl);
   1221    masm.propagateOOM(ool->addCodeLabel(cl));
   1222  }
   1223  addOutOfLineCode(ool, mir);
   1224 }
   1225 
   1226 void CodeGenerator::visitMathD(LMathD* math) {
   1227  FloatRegister src1 = ToFloatRegister(math->lhs());
   1228  FloatRegister src2 = ToFloatRegister(math->rhs());
   1229  FloatRegister output = ToFloatRegister(math->output());
   1230 
   1231  switch (math->jsop()) {
   1232    case JSOp::Add:
   1233      masm.ma_vadd(src1, src2, output);
   1234      break;
   1235    case JSOp::Sub:
   1236      masm.ma_vsub(src1, src2, output);
   1237      break;
   1238    case JSOp::Mul:
   1239      masm.ma_vmul(src1, src2, output);
   1240      break;
   1241    case JSOp::Div:
   1242      masm.ma_vdiv(src1, src2, output);
   1243      break;
   1244    default:
   1245      MOZ_CRASH("unexpected opcode");
   1246  }
   1247 }
   1248 
   1249 void CodeGenerator::visitMathF(LMathF* math) {
   1250  FloatRegister src1 = ToFloatRegister(math->lhs());
   1251  FloatRegister src2 = ToFloatRegister(math->rhs());
   1252  FloatRegister output = ToFloatRegister(math->output());
   1253 
   1254  switch (math->jsop()) {
   1255    case JSOp::Add:
   1256      masm.ma_vadd_f32(src1, src2, output);
   1257      break;
   1258    case JSOp::Sub:
   1259      masm.ma_vsub_f32(src1, src2, output);
   1260      break;
   1261    case JSOp::Mul:
   1262      masm.ma_vmul_f32(src1, src2, output);
   1263      break;
   1264    case JSOp::Div:
   1265      masm.ma_vdiv_f32(src1, src2, output);
   1266      break;
   1267    default:
   1268      MOZ_CRASH("unexpected opcode");
   1269  }
   1270 }
   1271 
   1272 void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
   1273  emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
   1274                     ins->mir());
   1275 }
   1276 
   1277 void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
   1278    LWasmBuiltinTruncateDToInt32* ins) {
   1279  emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
   1280                     ins->mir());
   1281 }
   1282 
   1283 void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
   1284  emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
   1285                      ins->mir());
   1286 }
   1287 
   1288 void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
   1289    LWasmBuiltinTruncateFToInt32* ins) {
   1290  emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
   1291                      ins->mir());
   1292 }
   1293 
   1294 void CodeGenerator::visitBox(LBox* box) {
   1295  const LDefinition* type = box->getDef(TYPE_INDEX);
   1296 
   1297  MOZ_ASSERT(!box->payload()->isConstant());
   1298 
   1299  // On arm, the input operand and the output payload have the same virtual
   1300  // register. All that needs to be written is the type tag for the type
   1301  // definition.
   1302  masm.ma_mov(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
   1303 }
   1304 
   1305 void CodeGenerator::visitBoxFloatingPoint(LBoxFloatingPoint* box) {
   1306  const AnyRegister in = ToAnyRegister(box->input());
   1307  const ValueOperand out = ToOutValue(box);
   1308 
   1309  masm.moveValue(TypedOrValueRegister(box->type(), in), out);
   1310 }
   1311 
   1312 void CodeGenerator::visitUnbox(LUnbox* unbox) {
   1313  // Note that for unbox, the type and payload indexes are switched on the
   1314  // inputs.
   1315  MUnbox* mir = unbox->mir();
   1316  Register type = ToRegister(unbox->type());
   1317  Register payload = ToRegister(unbox->payload());
   1318  Register output = ToRegister(unbox->output());
   1319 
   1320  mozilla::Maybe<ScratchRegisterScope> scratch;
   1321  scratch.emplace(masm);
   1322 
   1323  JSValueTag tag = MIRTypeToTag(mir->type());
   1324  if (mir->fallible()) {
   1325    masm.ma_cmp(type, Imm32(tag), *scratch);
   1326    bailoutIf(Assembler::NotEqual, unbox->snapshot());
   1327  } else {
   1328 #ifdef DEBUG
   1329    Label ok;
   1330    masm.ma_cmp(type, Imm32(tag), *scratch);
   1331    masm.ma_b(&ok, Assembler::Equal);
   1332    scratch.reset();
   1333    masm.assumeUnreachable("Infallible unbox type mismatch");
   1334    masm.bind(&ok);
   1335 #endif
   1336  }
   1337 
   1338  // Note: If spectreValueMasking is disabled, then this instruction will
   1339  // default to a no-op as long as the lowering allocate the same register for
   1340  // the output and the payload.
   1341  masm.unboxNonDouble(ValueOperand(type, payload), output,
   1342                      ValueTypeFromMIRType(mir->type()));
   1343 }
   1344 
   1345 void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
   1346  const LAllocation* opd = test->input();
   1347  masm.ma_vcmpz(ToFloatRegister(opd));
   1348  masm.as_vmrs(pc);
   1349 
   1350  MBasicBlock* ifTrue = test->ifTrue();
   1351  MBasicBlock* ifFalse = test->ifFalse();
   1352  // If the compare set the 0 bit, then the result is definitely false.
   1353  jumpToBlock(ifFalse, Assembler::Zero);
   1354  // It is also false if one of the operands is NAN, which is shown as
   1355  // Overflow.
   1356  jumpToBlock(ifFalse, Assembler::Overflow);
   1357  jumpToBlock(ifTrue);
   1358 }
   1359 
   1360 void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
   1361  const LAllocation* opd = test->input();
   1362  masm.ma_vcmpz_f32(ToFloatRegister(opd));
   1363  masm.as_vmrs(pc);
   1364 
   1365  MBasicBlock* ifTrue = test->ifTrue();
   1366  MBasicBlock* ifFalse = test->ifFalse();
   1367  // If the compare set the 0 bit, then the result is definitely false.
   1368  jumpToBlock(ifFalse, Assembler::Zero);
   1369  // It is also false if one of the operands is NAN, which is shown as
   1370  // Overflow.
   1371  jumpToBlock(ifFalse, Assembler::Overflow);
   1372  jumpToBlock(ifTrue);
   1373 }
   1374 
   1375 void CodeGenerator::visitCompareD(LCompareD* comp) {
   1376  FloatRegister lhs = ToFloatRegister(comp->left());
   1377  FloatRegister rhs = ToFloatRegister(comp->right());
   1378 
   1379  Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
   1380  masm.compareDouble(lhs, rhs);
   1381  masm.emitSet(Assembler::ConditionFromDoubleCondition(cond),
   1382               ToRegister(comp->output()));
   1383 }
   1384 
   1385 void CodeGenerator::visitCompareF(LCompareF* comp) {
   1386  FloatRegister lhs = ToFloatRegister(comp->left());
   1387  FloatRegister rhs = ToFloatRegister(comp->right());
   1388 
   1389  Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
   1390  masm.compareFloat(lhs, rhs);
   1391  masm.emitSet(Assembler::ConditionFromDoubleCondition(cond),
   1392               ToRegister(comp->output()));
   1393 }
   1394 
   1395 void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
   1396  FloatRegister lhs = ToFloatRegister(comp->left());
   1397  FloatRegister rhs = ToFloatRegister(comp->right());
   1398 
   1399  Assembler::DoubleCondition cond =
   1400      JSOpToDoubleCondition(comp->cmpMir()->jsop());
   1401  masm.compareDouble(lhs, rhs);
   1402  emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(),
   1403             comp->ifFalse());
   1404 }
   1405 
   1406 void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
   1407  FloatRegister lhs = ToFloatRegister(comp->left());
   1408  FloatRegister rhs = ToFloatRegister(comp->right());
   1409 
   1410  Assembler::DoubleCondition cond =
   1411      JSOpToDoubleCondition(comp->cmpMir()->jsop());
   1412  masm.compareFloat(lhs, rhs);
   1413  emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(),
   1414             comp->ifFalse());
   1415 }
   1416 
   1417 void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
   1418  masm.convertUInt32ToDouble(ToRegister(lir->input()),
   1419                             ToFloatRegister(lir->output()));
   1420 }
   1421 
   1422 void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
   1423  masm.convertUInt32ToFloat32(ToRegister(lir->input()),
   1424                              ToFloatRegister(lir->output()));
   1425 }
   1426 
   1427 void CodeGenerator::visitNotD(LNotD* ins) {
   1428  // Since this operation is not, we want to set a bit if the double is
   1429  // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
   1430  // 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR.
   1431  FloatRegister opd = ToFloatRegister(ins->input());
   1432  Register dest = ToRegister(ins->output());
   1433 
   1434  // Do the compare.
   1435  masm.ma_vcmpz(opd);
   1436  // TODO There are three variations here to compare performance-wise.
   1437  bool nocond = true;
   1438  if (nocond) {
   1439    // Load the value into the dest register.
   1440    masm.as_vmrs(dest);
   1441    masm.ma_lsr(Imm32(28), dest, dest);
   1442    // 28 + 2 = 30
   1443    masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
   1444    masm.as_and(dest, dest, Imm8(1));
   1445  } else {
   1446    masm.as_vmrs(pc);
   1447    masm.ma_mov(Imm32(0), dest);
   1448    masm.ma_mov(Imm32(1), dest, Assembler::Equal);
   1449    masm.ma_mov(Imm32(1), dest, Assembler::Overflow);
   1450  }
   1451 }
   1452 
   1453 void CodeGenerator::visitNotF(LNotF* ins) {
   1454  // Since this operation is not, we want to set a bit if the double is
   1455  // falsey, which means 0.0, -0.0 or NaN. When comparing with 0, an input of
   1456  // 0 will set the Z bit (30) and NaN will set the V bit (28) of the APSR.
   1457  FloatRegister opd = ToFloatRegister(ins->input());
   1458  Register dest = ToRegister(ins->output());
   1459 
   1460  // Do the compare.
   1461  masm.ma_vcmpz_f32(opd);
   1462  // TODO There are three variations here to compare performance-wise.
   1463  bool nocond = true;
   1464  if (nocond) {
   1465    // Load the value into the dest register.
   1466    masm.as_vmrs(dest);
   1467    masm.ma_lsr(Imm32(28), dest, dest);
   1468    // 28 + 2 = 30
   1469    masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
   1470    masm.as_and(dest, dest, Imm8(1));
   1471  } else {
   1472    masm.as_vmrs(pc);
   1473    masm.ma_mov(Imm32(0), dest);
   1474    masm.ma_mov(Imm32(1), dest, Assembler::Equal);
   1475    masm.ma_mov(Imm32(1), dest, Assembler::Overflow);
   1476  }
   1477 }
   1478 
   1479 void CodeGeneratorARM::generateInvalidateEpilogue() {
   1480  // Ensure that there is enough space in the buffer for the OsiPoint patching
   1481  // to occur. Otherwise, we could overwrite the invalidation epilogue.
   1482  for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
   1483    masm.nop();
   1484  }
   1485 
   1486  masm.bind(&invalidate_);
   1487 
   1488  // Push the return address of the point that we bailed out at onto the stack.
   1489  masm.Push(lr);
   1490 
   1491  // Push the Ion script onto the stack (when we determine what that pointer
   1492  // is).
   1493  invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
   1494 
   1495  // Jump to the invalidator which will replace the current frame.
   1496  TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
   1497  masm.jump(thunk);
   1498 }
   1499 
   1500 void CodeGenerator::visitCompareExchangeTypedArrayElement(
   1501    LCompareExchangeTypedArrayElement* lir) {
   1502  Register elements = ToRegister(lir->elements());
   1503  AnyRegister output = ToAnyRegister(lir->output());
   1504  Register temp = ToTempRegisterOrInvalid(lir->temp0());
   1505 
   1506  Register oldval = ToRegister(lir->oldval());
   1507  Register newval = ToRegister(lir->newval());
   1508 
   1509  Scalar::Type arrayType = lir->mir()->arrayType();
   1510 
   1511  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   1512 
   1513  dest.match([&](const auto& dest) {
   1514    masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
   1515                           newval, temp, output);
   1516  });
   1517 }
   1518 
   1519 void CodeGenerator::visitAtomicExchangeTypedArrayElement(
   1520    LAtomicExchangeTypedArrayElement* lir) {
   1521  Register elements = ToRegister(lir->elements());
   1522  AnyRegister output = ToAnyRegister(lir->output());
   1523  Register temp = ToTempRegisterOrInvalid(lir->temp0());
   1524 
   1525  Register value = ToRegister(lir->value());
   1526 
   1527  Scalar::Type arrayType = lir->mir()->arrayType();
   1528 
   1529  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   1530 
   1531  dest.match([&](const auto& dest) {
   1532    masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp,
   1533                          output);
   1534  });
   1535 }
   1536 
   1537 void CodeGenerator::visitAtomicTypedArrayElementBinop(
   1538    LAtomicTypedArrayElementBinop* lir) {
   1539  MOZ_ASSERT(!lir->mir()->isForEffect());
   1540 
   1541  AnyRegister output = ToAnyRegister(lir->output());
   1542  Register elements = ToRegister(lir->elements());
   1543  Register flagTemp = ToRegister(lir->temp0());
   1544  Register outTemp = ToTempRegisterOrInvalid(lir->temp1());
   1545  Register value = ToRegister(lir->value());
   1546 
   1547  Scalar::Type arrayType = lir->mir()->arrayType();
   1548 
   1549  auto mem = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   1550 
   1551  mem.match([&](const auto& mem) {
   1552    masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
   1553                         lir->mir()->operation(), value, mem, flagTemp, outTemp,
   1554                         output);
   1555  });
   1556 }
   1557 
   1558 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
   1559    LAtomicTypedArrayElementBinopForEffect* lir) {
   1560  MOZ_ASSERT(lir->mir()->isForEffect());
   1561 
   1562  Register elements = ToRegister(lir->elements());
   1563  Register flagTemp = ToRegister(lir->temp0());
   1564  Register value = ToRegister(lir->value());
   1565  Scalar::Type arrayType = lir->mir()->arrayType();
   1566 
   1567  auto mem = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   1568 
   1569  mem.match([&](const auto& mem) {
   1570    masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
   1571                          lir->mir()->operation(), value, mem, flagTemp);
   1572  });
   1573 }
   1574 
   1575 void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
   1576  Register elements = ToRegister(lir->elements());
   1577  Register64 out = ToOutRegister64(lir);
   1578 
   1579  Scalar::Type storageType = lir->mir()->storageType();
   1580 
   1581  auto source = ToAddressOrBaseIndex(elements, lir->index(), storageType);
   1582 
   1583  source.match([&](const auto& source) {
   1584    masm.atomicLoad64(Synchronization::Load(), source, out);
   1585  });
   1586 }
   1587 
   1588 void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
   1589  Register elements = ToRegister(lir->elements());
   1590  Register64 value = ToRegister64(lir->value());
   1591  Register64 temp = ToRegister64(lir->temp0());
   1592 
   1593  Scalar::Type writeType = lir->mir()->writeType();
   1594 
   1595  auto dest = ToAddressOrBaseIndex(elements, lir->index(), writeType);
   1596 
   1597  dest.match([&](const auto& dest) {
   1598    masm.atomicStore64(Synchronization::Store(), dest, value, temp);
   1599  });
   1600 }
   1601 
   1602 void CodeGenerator::visitCompareExchangeTypedArrayElement64(
   1603    LCompareExchangeTypedArrayElement64* lir) {
   1604  Register elements = ToRegister(lir->elements());
   1605  Register64 oldval = ToRegister64(lir->oldval());
   1606  Register64 newval = ToRegister64(lir->newval());
   1607  Register64 out = ToOutRegister64(lir);
   1608 
   1609  Scalar::Type arrayType = lir->mir()->arrayType();
   1610 
   1611  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   1612 
   1613  dest.match([&](const auto& dest) {
   1614    masm.compareExchange64(Synchronization::Full(), dest, oldval, newval, out);
   1615  });
   1616 }
   1617 
   1618 void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
   1619    LAtomicExchangeTypedArrayElement64* lir) {
   1620  Register elements = ToRegister(lir->elements());
   1621  Register64 value = ToRegister64(lir->value());
   1622  Register64 out = ToOutRegister64(lir);
   1623 
   1624  Scalar::Type arrayType = lir->mir()->arrayType();
   1625 
   1626  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   1627 
   1628  dest.match([&](const auto& dest) {
   1629    masm.atomicExchange64(Synchronization::Full(), dest, value, out);
   1630  });
   1631 }
   1632 
   1633 void CodeGenerator::visitAtomicTypedArrayElementBinop64(
   1634    LAtomicTypedArrayElementBinop64* lir) {
   1635  MOZ_ASSERT(!lir->mir()->isForEffect());
   1636 
   1637  Register elements = ToRegister(lir->elements());
   1638  Register64 value = ToRegister64(lir->value());
   1639  Register64 temp = ToRegister64(lir->temp0());
   1640  Register64 out = ToOutRegister64(lir);
   1641 
   1642  Scalar::Type arrayType = lir->mir()->arrayType();
   1643  AtomicOp atomicOp = lir->mir()->operation();
   1644 
   1645  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   1646 
   1647  dest.match([&](const auto& dest) {
   1648    masm.atomicFetchOp64(Synchronization::Full(), atomicOp, value, dest, temp,
   1649                         out);
   1650  });
   1651 }
   1652 
   1653 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
   1654    LAtomicTypedArrayElementBinopForEffect64* lir) {
   1655  MOZ_ASSERT(lir->mir()->isForEffect());
   1656 
   1657  Register elements = ToRegister(lir->elements());
   1658  Register64 value = ToRegister64(lir->value());
   1659  Register64 temp = ToRegister64(lir->temp0());
   1660 
   1661  Scalar::Type arrayType = lir->mir()->arrayType();
   1662  AtomicOp atomicOp = lir->mir()->operation();
   1663 
   1664  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   1665 
   1666  dest.match([&](const auto& dest) {
   1667    masm.atomicEffectOp64(Synchronization::Full(), atomicOp, value, dest, temp);
   1668  });
   1669 }
   1670 
   1671 void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
   1672  MIRType mirType = ins->mir()->type();
   1673 
   1674  Register cond = ToRegister(ins->condExpr());
   1675  masm.as_cmp(cond, Imm8(0));
   1676 
   1677  if (mirType == MIRType::Int32 || mirType == MIRType::WasmAnyRef) {
   1678    Register falseExpr = ToRegister(ins->falseExpr());
   1679    Register out = ToRegister(ins->output());
   1680    MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
   1681               "true expr input is reused for output");
   1682    masm.ma_mov(falseExpr, out, LeaveCC, Assembler::Zero);
   1683    return;
   1684  }
   1685 
   1686  FloatRegister out = ToFloatRegister(ins->output());
   1687  MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
   1688             "true expr input is reused for output");
   1689 
   1690  FloatRegister falseExpr = ToFloatRegister(ins->falseExpr());
   1691 
   1692  if (mirType == MIRType::Double) {
   1693    masm.moveDouble(falseExpr, out, Assembler::Zero);
   1694  } else if (mirType == MIRType::Float32) {
   1695    masm.moveFloat32(falseExpr, out, Assembler::Zero);
   1696  } else {
   1697    MOZ_CRASH("unhandled type in visitWasmSelect!");
   1698  }
   1699 }
   1700 
   1701 // We expect to handle only the case where compare is {U,}Int32 and select is
   1702 // {U,}Int32, and the "true" input is reused for the output.
   1703 void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
   1704  bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
   1705                    ins->compareType() == MCompare::Compare_UInt32;
   1706  bool selIs32bit = ins->mir()->type() == MIRType::Int32;
   1707 
   1708  MOZ_RELEASE_ASSERT(
   1709      cmpIs32bit && selIs32bit,
   1710      "CodeGenerator::visitWasmCompareAndSelect: unexpected types");
   1711 
   1712  Register trueExprAndDest = ToRegister(ins->output());
   1713  MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == trueExprAndDest,
   1714             "true expr input is reused for output");
   1715 
   1716  Assembler::Condition cond = Assembler::InvertCondition(
   1717      JSOpToCondition(ins->compareType(), ins->jsop()));
   1718  const LAllocation* rhs = ins->rightExpr();
   1719  const LAllocation* falseExpr = ins->ifFalseExpr();
   1720  Register lhs = ToRegister(ins->leftExpr());
   1721 
   1722  masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
   1723                   trueExprAndDest);
   1724 }
   1725 
   1726 void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
   1727  const MAsmJSLoadHeap* mir = ins->mir();
   1728 
   1729  const LAllocation* ptr = ins->ptr();
   1730  const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
   1731 
   1732  Scalar::Type accessType = mir->access().type();
   1733  bool isSigned = Scalar::isSignedIntType(accessType);
   1734  int size = Scalar::byteSize(accessType) * 8;
   1735  bool isFloat = Scalar::isFloatingType(accessType);
   1736 
   1737  if (ptr->isConstant()) {
   1738    MOZ_ASSERT(!mir->needsBoundsCheck());
   1739    int32_t ptrImm = ptr->toConstant()->toInt32();
   1740    MOZ_ASSERT(ptrImm >= 0);
   1741    if (isFloat) {
   1742      ScratchRegisterScope scratch(masm);
   1743      VFPRegister vd(ToFloatRegister(ins->output()));
   1744      if (size == 32) {
   1745        masm.ma_vldr(Address(HeapReg, ptrImm), vd.singleOverlay(), scratch,
   1746                     Assembler::Always);
   1747      } else {
   1748        masm.ma_vldr(Address(HeapReg, ptrImm), vd, scratch, Assembler::Always);
   1749      }
   1750    } else {
   1751      ScratchRegisterScope scratch(masm);
   1752      masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, Imm32(ptrImm),
   1753                            ToRegister(ins->output()), scratch, Offset,
   1754                            Assembler::Always);
   1755    }
   1756  } else {
   1757    Register ptrReg = ToRegister(ptr);
   1758    if (isFloat) {
   1759      FloatRegister output = ToFloatRegister(ins->output());
   1760      if (size == 32) {
   1761        output = output.singleOverlay();
   1762      }
   1763 
   1764      Assembler::Condition cond = Assembler::Always;
   1765      if (mir->needsBoundsCheck()) {
   1766        Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
   1767        masm.as_cmp(ptrReg, O2Reg(boundsCheckLimitReg));
   1768        if (size == 32) {
   1769          masm.ma_vimm_f32(GenericNaN(), output, Assembler::AboveOrEqual);
   1770        } else {
   1771          masm.ma_vimm(GenericNaN(), output, Assembler::AboveOrEqual);
   1772        }
   1773        cond = Assembler::Below;
   1774      }
   1775 
   1776      ScratchRegisterScope scratch(masm);
   1777      masm.ma_vldr(output, HeapReg, ptrReg, scratch, 0, cond);
   1778    } else {
   1779      Register output = ToRegister(ins->output());
   1780 
   1781      Assembler::Condition cond = Assembler::Always;
   1782      if (mir->needsBoundsCheck()) {
   1783        Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
   1784        masm.as_cmp(ptrReg, O2Reg(boundsCheckLimitReg));
   1785        masm.ma_mov(Imm32(0), output, Assembler::AboveOrEqual);
   1786        cond = Assembler::Below;
   1787      }
   1788 
   1789      ScratchRegisterScope scratch(masm);
   1790      masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, output,
   1791                            scratch, Offset, cond);
   1792    }
   1793  }
   1794 }
   1795 
   1796 template <typename T>
   1797 void CodeGeneratorARM::emitWasmLoad(T* lir) {
   1798  const MWasmLoad* mir = lir->mir();
   1799  MIRType resultType = mir->type();
   1800  Register ptr;
   1801  Register memoryBase = ToRegister(lir->memoryBase());
   1802 
   1803  if (mir->access().offset32() || mir->access().type() == Scalar::Int64) {
   1804    ptr = ToRegister(lir->temp0());
   1805  } else {
   1806    MOZ_ASSERT(lir->temp0()->isBogusTemp());
   1807    ptr = ToRegister(lir->ptr());
   1808  }
   1809 
   1810  if (resultType == MIRType::Int64) {
   1811    masm.wasmLoadI64(mir->access(), memoryBase, ptr, ptr, ToOutRegister64(lir));
   1812  } else {
   1813    masm.wasmLoad(mir->access(), memoryBase, ptr, ptr,
   1814                  ToAnyRegister(lir->output()));
   1815  }
   1816 }
   1817 
   1818 void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
   1819 
   1820 void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) { emitWasmLoad(lir); }
   1821 
   1822 void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
   1823  MWasmAddOffset* mir = lir->mir();
   1824  Register base = ToRegister(lir->base());
   1825  Register out = ToRegister(lir->output());
   1826 
   1827  ScratchRegisterScope scratch(masm);
   1828  masm.ma_add(base, Imm32(mir->offset()), out, scratch, SetCC);
   1829  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
   1830    masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
   1831  });
   1832  addOutOfLineCode(ool, mir);
   1833  masm.ma_b(ool->entry(), Assembler::CarrySet);
   1834 }
   1835 
   1836 void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) {
   1837  MWasmAddOffset* mir = lir->mir();
   1838  Register64 base = ToRegister64(lir->base());
   1839  Register64 out = ToOutRegister64(lir);
   1840  MOZ_ASSERT(base.low != out.high && base.high != out.low);
   1841 
   1842  ScratchRegisterScope scratch(masm);
   1843  masm.ma_add(base.low, Imm32(mir->offset()), out.low, scratch, SetCC);
   1844  masm.ma_adc(base.high, Imm32(mir->offset() >> 32), out.high, scratch, SetCC);
   1845  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
   1846    masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
   1847  });
   1848  addOutOfLineCode(ool, mir);
   1849  masm.ma_b(ool->entry(), Assembler::CarrySet);
   1850 }
   1851 
   1852 template <typename T>
   1853 void CodeGeneratorARM::emitWasmStore(T* lir) {
   1854  const MWasmStore* mir = lir->mir();
   1855  Register memoryBase = ToRegister(lir->memoryBase());
   1856 
   1857  if constexpr (std::is_same_v<T, LWasmStoreI64>) {
   1858    Register64 value = ToRegister64(lir->value());
   1859    Register ptr = ToRegister(lir->temp0());
   1860    masm.wasmStoreI64(mir->access(), value, memoryBase, ptr, ptr);
   1861  } else {
   1862    // Maybe add the offset.
   1863    Register ptr;
   1864    if (mir->access().offset32()) {
   1865      ptr = ToRegister(lir->temp0());
   1866    } else {
   1867      MOZ_ASSERT(lir->temp0()->isBogusTemp());
   1868      ptr = ToRegister(lir->ptr());
   1869    }
   1870 
   1871    masm.wasmStore(mir->access(), ToAnyRegister(lir->value()), memoryBase, ptr,
   1872                   ptr);
   1873  }
   1874 }
   1875 
   1876 void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
   1877 
   1878 void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
   1879  emitWasmStore(lir);
   1880 }
   1881 
   1882 void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
   1883  const MAsmJSStoreHeap* mir = ins->mir();
   1884 
   1885  const LAllocation* ptr = ins->ptr();
   1886  const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
   1887 
   1888  Scalar::Type accessType = mir->access().type();
   1889  bool isSigned = accessType == Scalar::Int32 || accessType == Scalar::Uint32;
   1890  int size = Scalar::byteSize(accessType) * 8;
   1891  bool isFloat = Scalar::isFloatingType(accessType);
   1892 
   1893  if (ptr->isConstant()) {
   1894    MOZ_ASSERT(!mir->needsBoundsCheck());
   1895    int32_t ptrImm = ptr->toConstant()->toInt32();
   1896    MOZ_ASSERT(ptrImm >= 0);
   1897    if (isFloat) {
   1898      VFPRegister vd(ToFloatRegister(ins->value()));
   1899      Address addr(HeapReg, ptrImm);
   1900      if (size == 32) {
   1901        masm.storeFloat32(vd, addr);
   1902      } else {
   1903        masm.storeDouble(vd, addr);
   1904      }
   1905    } else {
   1906      ScratchRegisterScope scratch(masm);
   1907      masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, Imm32(ptrImm),
   1908                            ToRegister(ins->value()), scratch, Offset,
   1909                            Assembler::Always);
   1910    }
   1911  } else {
   1912    Register ptrReg = ToRegister(ptr);
   1913 
   1914    Assembler::Condition cond = Assembler::Always;
   1915    if (mir->needsBoundsCheck()) {
   1916      Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
   1917      masm.as_cmp(ptrReg, O2Reg(boundsCheckLimitReg));
   1918      cond = Assembler::Below;
   1919    }
   1920 
   1921    if (isFloat) {
   1922      ScratchRegisterScope scratch(masm);
   1923      FloatRegister value = ToFloatRegister(ins->value());
   1924      if (size == 32) {
   1925        value = value.singleOverlay();
   1926      }
   1927 
   1928      masm.ma_vstr(value, HeapReg, ptrReg, scratch, 0, Assembler::Below);
   1929    } else {
   1930      ScratchRegisterScope scratch(masm);
   1931      Register value = ToRegister(ins->value());
   1932      masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, value,
   1933                            scratch, Offset, cond);
   1934    }
   1935  }
   1936 }
   1937 
   1938 void CodeGenerator::visitWasmCompareExchangeHeap(
   1939    LWasmCompareExchangeHeap* ins) {
   1940  MWasmCompareExchangeHeap* mir = ins->mir();
   1941 
   1942  const LAllocation* ptr = ins->ptr();
   1943  Register ptrReg = ToRegister(ptr);
   1944  Register memoryBase = ToRegister(ins->memoryBase());
   1945  BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset32());
   1946 
   1947  Register oldval = ToRegister(ins->oldValue());
   1948  Register newval = ToRegister(ins->newValue());
   1949  Register out = ToRegister(ins->output());
   1950 
   1951  masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, out);
   1952 }
   1953 
   1954 void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
   1955  MWasmAtomicExchangeHeap* mir = ins->mir();
   1956 
   1957  Register ptrReg = ToRegister(ins->ptr());
   1958  Register value = ToRegister(ins->value());
   1959  Register memoryBase = ToRegister(ins->memoryBase());
   1960  Register output = ToRegister(ins->output());
   1961  BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset32());
   1962 
   1963  masm.wasmAtomicExchange(mir->access(), srcAddr, value, output);
   1964 }
   1965 
   1966 void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
   1967  MWasmAtomicBinopHeap* mir = ins->mir();
   1968  MOZ_ASSERT(mir->hasUses());
   1969 
   1970  Register ptrReg = ToRegister(ins->ptr());
   1971  Register memoryBase = ToRegister(ins->memoryBase());
   1972  Register flagTemp = ToRegister(ins->temp0());
   1973  Register output = ToRegister(ins->output());
   1974  const LAllocation* value = ins->value();
   1975  AtomicOp op = mir->operation();
   1976 
   1977  BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset32());
   1978  masm.wasmAtomicFetchOp(mir->access(), op, ToRegister(value), srcAddr,
   1979                         flagTemp, output);
   1980 }
   1981 
   1982 void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
   1983    LWasmAtomicBinopHeapForEffect* ins) {
   1984  MWasmAtomicBinopHeap* mir = ins->mir();
   1985  MOZ_ASSERT(!mir->hasUses());
   1986 
   1987  Register ptrReg = ToRegister(ins->ptr());
   1988  Register memoryBase = ToRegister(ins->memoryBase());
   1989  Register flagTemp = ToRegister(ins->temp0());
   1990  const LAllocation* value = ins->value();
   1991  AtomicOp op = mir->operation();
   1992 
   1993  BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset32());
   1994  masm.wasmAtomicEffectOp(mir->access(), op, ToRegister(value), srcAddr,
   1995                          flagTemp);
   1996 }
   1997 
   1998 void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
   1999  const MWasmStackArg* mir = ins->mir();
   2000  Address dst(StackPointer, mir->spOffset());
   2001  ScratchRegisterScope scratch(masm);
   2002  SecondScratchRegisterScope scratch2(masm);
   2003 
   2004  if (ins->arg()->isConstant()) {
   2005    masm.ma_mov(Imm32(ToInt32(ins->arg())), scratch);
   2006    masm.ma_str(scratch, dst, scratch2);
   2007  } else {
   2008    if (ins->arg()->isGeneralReg()) {
   2009      masm.ma_str(ToRegister(ins->arg()), dst, scratch);
   2010    } else {
   2011      masm.ma_vstr(ToFloatRegister(ins->arg()), dst, scratch);
   2012    }
   2013  }
   2014 }
   2015 
   2016 void CodeGenerator::visitUDiv(LUDiv* ins) {
   2017  Register lhs = ToRegister(ins->lhs());
   2018  Register rhs = ToRegister(ins->rhs());
   2019  Register output = ToRegister(ins->output());
   2020 
   2021  Label done;
   2022  generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), ins->mir());
   2023 
   2024  masm.ma_udiv(lhs, rhs, output);
   2025 
   2026  // Check for large unsigned result - represent as double.
   2027  if (!ins->mir()->isTruncated()) {
   2028    MOZ_ASSERT(ins->mir()->fallible());
   2029    masm.as_cmp(output, Imm8(0));
   2030    bailoutIf(Assembler::LessThan, ins->snapshot());
   2031  }
   2032 
   2033  // Check for non-zero remainder if not truncating to int.
   2034  if (!ins->mir()->canTruncateRemainder()) {
   2035    MOZ_ASSERT(ins->mir()->fallible());
   2036    {
   2037      ScratchRegisterScope scratch(masm);
   2038      masm.ma_mul(rhs, output, scratch);
   2039      masm.ma_cmp(scratch, lhs);
   2040    }
   2041    bailoutIf(Assembler::NotEqual, ins->snapshot());
   2042  }
   2043 
   2044  if (done.used()) {
   2045    masm.bind(&done);
   2046  }
   2047 }
   2048 
   2049 void CodeGenerator::visitUMod(LUMod* ins) {
   2050  Register lhs = ToRegister(ins->lhs());
   2051  Register rhs = ToRegister(ins->rhs());
   2052  Register output = ToRegister(ins->output());
   2053 
   2054  Label done;
   2055  generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), ins->mir());
   2056 
   2057  {
   2058    ScratchRegisterScope scratch(masm);
   2059    masm.ma_umod(lhs, rhs, output, scratch);
   2060  }
   2061 
   2062  // Check for large unsigned result - represent as double.
   2063  if (!ins->mir()->isTruncated()) {
   2064    MOZ_ASSERT(ins->mir()->fallible());
   2065    masm.as_cmp(output, Imm8(0));
   2066    bailoutIf(Assembler::LessThan, ins->snapshot());
   2067  }
   2068 
   2069  if (done.used()) {
   2070    masm.bind(&done);
   2071  }
   2072 }
   2073 
   2074 template <class T>
   2075 void CodeGeneratorARM::generateUDivModZeroCheck(Register rhs, Register output,
   2076                                                Label* done,
   2077                                                LSnapshot* snapshot, T* mir) {
   2078  if (!mir) {
   2079    return;
   2080  }
   2081  if (mir->canBeDivideByZero()) {
   2082    masm.as_cmp(rhs, Imm8(0));
   2083    if (mir->isTruncated()) {
   2084      if (mir->trapOnError()) {
   2085        Label nonZero;
   2086        masm.ma_b(&nonZero, Assembler::NotEqual);
   2087        masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->trapSiteDesc());
   2088        masm.bind(&nonZero);
   2089      } else {
   2090        Label skip;
   2091        masm.ma_b(&skip, Assembler::NotEqual);
   2092        // Infinity|0 == 0
   2093        masm.ma_mov(Imm32(0), output);
   2094        masm.ma_b(done);
   2095        masm.bind(&skip);
   2096      }
   2097    } else {
   2098      // Bailout for divide by zero
   2099      MOZ_ASSERT(mir->fallible());
   2100      bailoutIf(Assembler::Equal, snapshot);
   2101    }
   2102  }
   2103 }
   2104 
   2105 void CodeGenerator::visitSoftUDivOrMod(LSoftUDivOrMod* ins) {
   2106  Register lhs = ToRegister(ins->lhs());
   2107  Register rhs = ToRegister(ins->rhs());
   2108  Register output = ToRegister(ins->output());
   2109 
   2110  MOZ_ASSERT(lhs == r0);
   2111  MOZ_ASSERT(rhs == r1);
   2112  MOZ_ASSERT(output == r0);
   2113 
   2114  Label done;
   2115  MDiv* div = ins->mir()->isDiv() ? ins->mir()->toDiv() : nullptr;
   2116  MMod* mod = !div ? ins->mir()->toMod() : nullptr;
   2117 
   2118  generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), div);
   2119  generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), mod);
   2120 
   2121  if (gen->compilingWasm()) {
   2122    masm.Push(InstanceReg);
   2123    int32_t framePushedAfterInstance = masm.framePushed();
   2124    masm.setupWasmABICall(wasm::SymbolicAddress::aeabi_uidivmod);
   2125    masm.passABIArg(lhs);
   2126    masm.passABIArg(rhs);
   2127    wasm::BytecodeOffset bytecodeOffset =
   2128        (div ? div->trapSiteDesc().bytecodeOffset
   2129             : mod->trapSiteDesc().bytecodeOffset);
   2130    int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
   2131    masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::aeabi_uidivmod,
   2132                     mozilla::Some(instanceOffset));
   2133    masm.Pop(InstanceReg);
   2134  } else {
   2135    using Fn = int64_t (*)(int, int);
   2136    masm.setupAlignedABICall();
   2137    masm.passABIArg(lhs);
   2138    masm.passABIArg(rhs);
   2139    masm.callWithABI<Fn, __aeabi_uidivmod>(
   2140        ABIType::Int64, CheckUnsafeCallWithABI::DontCheckOther);
   2141  }
   2142 
   2143  if (mod) {
   2144    MOZ_ASSERT(output == r0, "output should not be r1 for mod");
   2145    masm.move32(r1, output);
   2146  }
   2147 
   2148  // uidivmod returns the quotient in r0, and the remainder in r1.
   2149  if (div && !div->canTruncateRemainder()) {
   2150    MOZ_ASSERT(div->fallible());
   2151    masm.as_cmp(r1, Imm8(0));
   2152    bailoutIf(Assembler::NonZero, ins->snapshot());
   2153  }
   2154 
   2155  // Bailout for big unsigned results
   2156  if ((div && !div->isTruncated()) || (mod && !mod->isTruncated())) {
   2157    DebugOnly<bool> isFallible =
   2158        (div && div->fallible()) || (mod && mod->fallible());
   2159    MOZ_ASSERT(isFallible);
   2160    masm.as_cmp(output, Imm8(0));
   2161    bailoutIf(Assembler::LessThan, ins->snapshot());
   2162  }
   2163 
   2164  masm.bind(&done);
   2165 }
   2166 
   2167 void CodeGenerator::visitEffectiveAddress3(LEffectiveAddress3* ins) {
   2168  const MEffectiveAddress3* mir = ins->mir();
   2169  Register base = ToRegister(ins->base());
   2170  Register index = ToRegister(ins->index());
   2171  Register output = ToRegister(ins->output());
   2172 
   2173  masm.as_add(output, base, lsl(index, mir->scale()));
   2174  if (mir->displacement() != 0) {
   2175    ScratchRegisterScope scratch(masm);
   2176    masm.ma_add(Imm32(mir->displacement()), output, scratch);
   2177  }
   2178 }
   2179 
   2180 void CodeGenerator::visitEffectiveAddress2(LEffectiveAddress2* ins) {
   2181  const MEffectiveAddress2* mir = ins->mir();
   2182  Register index = ToRegister(ins->index());
   2183  Register output = ToRegister(ins->output());
   2184 
   2185  masm.ma_lsl(Imm32(mir->scale()), index, output);
   2186  ScratchRegisterScope scratch(masm);
   2187  masm.ma_add(Imm32(mir->displacement()), output, scratch);
   2188 }
   2189 
   2190 void CodeGenerator::visitNegI(LNegI* ins) {
   2191  Register input = ToRegister(ins->input());
   2192  masm.ma_neg(input, ToRegister(ins->output()));
   2193 }
   2194 
   2195 void CodeGenerator::visitNegI64(LNegI64* ins) {
   2196  Register64 input = ToRegister64(ins->input());
   2197  MOZ_ASSERT(input == ToOutRegister64(ins));
   2198  masm.neg64(input);
   2199 }
   2200 
   2201 void CodeGenerator::visitNegD(LNegD* ins) {
   2202  FloatRegister input = ToFloatRegister(ins->input());
   2203  masm.ma_vneg(input, ToFloatRegister(ins->output()));
   2204 }
   2205 
   2206 void CodeGenerator::visitNegF(LNegF* ins) {
   2207  FloatRegister input = ToFloatRegister(ins->input());
   2208  masm.ma_vneg_f32(input, ToFloatRegister(ins->output()));
   2209 }
   2210 
   2211 void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
   2212  auto input = ToFloatRegister(lir->input());
   2213  auto output = ToRegister(lir->output());
   2214 
   2215  MWasmTruncateToInt32* mir = lir->mir();
   2216  MIRType fromType = mir->input()->type();
   2217 
   2218  OutOfLineWasmTruncateCheck* ool = nullptr;
   2219  Label* oolEntry = nullptr;
   2220  if (!lir->mir()->isSaturating()) {
   2221    ool = new (alloc())
   2222        OutOfLineWasmTruncateCheck(mir, input, Register::Invalid());
   2223    addOutOfLineCode(ool, mir);
   2224    oolEntry = ool->entry();
   2225  }
   2226 
   2227  masm.wasmTruncateToInt32(input, output, fromType, mir->isUnsigned(),
   2228                           mir->isSaturating(), oolEntry);
   2229 
   2230  if (!lir->mir()->isSaturating()) {
   2231    masm.bind(ool->rejoin());
   2232  }
   2233 }
   2234 
   2235 void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
   2236  MOZ_ASSERT(gen->compilingWasm());
   2237  MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
   2238  masm.Push(InstanceReg);
   2239  int32_t framePushedAfterInstance = masm.framePushed();
   2240 
   2241  FloatRegister input = ToFloatRegister(lir->input());
   2242  FloatRegister inputDouble = input;
   2243  Register64 output = ToOutRegister64(lir);
   2244 
   2245  MWasmBuiltinTruncateToInt64* mir = lir->mir();
   2246  MIRType fromType = mir->input()->type();
   2247 
   2248  OutOfLineWasmTruncateCheck* ool = nullptr;
   2249  if (!lir->mir()->isSaturating()) {
   2250    ool = new (alloc())
   2251        OutOfLineWasmTruncateCheck(mir, input, Register64::Invalid());
   2252    addOutOfLineCode(ool, mir);
   2253  }
   2254 
   2255  ScratchDoubleScope fpscratch(masm);
   2256  if (fromType == MIRType::Float32) {
   2257    inputDouble = fpscratch;
   2258    masm.convertFloat32ToDouble(input, inputDouble);
   2259  }
   2260 
   2261  masm.Push(input);
   2262 
   2263  wasm::SymbolicAddress callee;
   2264  if (lir->mir()->isSaturating()) {
   2265    if (lir->mir()->isUnsigned()) {
   2266      callee = wasm::SymbolicAddress::SaturatingTruncateDoubleToUint64;
   2267    } else {
   2268      callee = wasm::SymbolicAddress::SaturatingTruncateDoubleToInt64;
   2269    }
   2270  } else {
   2271    if (lir->mir()->isUnsigned()) {
   2272      callee = wasm::SymbolicAddress::TruncateDoubleToUint64;
   2273    } else {
   2274      callee = wasm::SymbolicAddress::TruncateDoubleToInt64;
   2275    }
   2276  }
   2277 
   2278  masm.setupWasmABICall(callee);
   2279  masm.passABIArg(inputDouble, ABIType::Float64);
   2280 
   2281  int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
   2282  masm.callWithABI(mir->trapSiteDesc().bytecodeOffset, callee,
   2283                   mozilla::Some(instanceOffset));
   2284 
   2285  masm.Pop(input);
   2286  masm.Pop(InstanceReg);
   2287 
   2288  // TruncateDoubleTo{UI,I}nt64 returns 0x8000000000000000 to indicate
   2289  // exceptional results, so check for that and produce the appropriate
   2290  // traps. The Saturating form always returns a normal value and never
   2291  // needs traps.
   2292  if (!lir->mir()->isSaturating()) {
   2293    ScratchRegisterScope scratch(masm);
   2294    masm.ma_cmp(output.high, Imm32(0x80000000), scratch);
   2295    masm.as_cmp(output.low, Imm8(0x00000000), Assembler::Equal);
   2296    masm.ma_b(ool->entry(), Assembler::Equal);
   2297 
   2298    masm.bind(ool->rejoin());
   2299  }
   2300 
   2301  MOZ_ASSERT(ReturnReg64 == output);
   2302 }
   2303 
   2304 void CodeGeneratorARM::visitOutOfLineWasmTruncateCheck(
   2305    OutOfLineWasmTruncateCheck* ool) {
   2306  // On ARM, saturating truncation codegen handles saturating itself rather than
   2307  // relying on out-of-line fixup code.
   2308  if (ool->isSaturating()) {
   2309    return;
   2310  }
   2311 
   2312  masm.outOfLineWasmTruncateToIntCheck(ool->input(), ool->fromType(),
   2313                                       ool->toType(), ool->isUnsigned(),
   2314                                       ool->rejoin(), ool->trapSiteDesc());
   2315 }
   2316 
   2317 void CodeGenerator::visitInt64ToFloatingPointCall(
   2318    LInt64ToFloatingPointCall* lir) {
   2319  MOZ_ASSERT(gen->compilingWasm());
   2320  MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
   2321  masm.Push(InstanceReg);
   2322  int32_t framePushedAfterInstance = masm.framePushed();
   2323 
   2324  Register64 input = ToRegister64(lir->input());
   2325 
   2326  MBuiltinInt64ToFloatingPoint* mir = lir->mir();
   2327  MIRType toType = mir->type();
   2328 
   2329  bool isUnsigned = mir->isUnsigned();
   2330  wasm::SymbolicAddress callee =
   2331      toType == MIRType::Float32
   2332          ? (isUnsigned ? wasm::SymbolicAddress::Uint64ToFloat32
   2333                        : wasm::SymbolicAddress::Int64ToFloat32)
   2334          : (isUnsigned ? wasm::SymbolicAddress::Uint64ToDouble
   2335                        : wasm::SymbolicAddress::Int64ToDouble);
   2336  masm.setupWasmABICall(callee);
   2337  masm.passABIArg(input.high);
   2338  masm.passABIArg(input.low);
   2339 
   2340  int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
   2341  ABIType result =
   2342      toType == MIRType::Float32 ? ABIType::Float32 : ABIType::Float64;
   2343  masm.callWithABI(mir->bytecodeOffset(), callee, mozilla::Some(instanceOffset),
   2344                   result);
   2345 
   2346  DebugOnly<FloatRegister> output(ToFloatRegister(lir->output()));
   2347  MOZ_ASSERT_IF(toType == MIRType::Double, output.value == ReturnDoubleReg);
   2348  MOZ_ASSERT_IF(toType == MIRType::Float32, output.value == ReturnFloat32Reg);
   2349 
   2350  masm.Pop(InstanceReg);
   2351 }
   2352 
   2353 void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
   2354  LInt64Allocation input = lir->input();
   2355  Register output = ToRegister(lir->output());
   2356 
   2357  if (lir->mir()->bottomHalf()) {
   2358    masm.move32(ToRegister(input.low()), output);
   2359  } else {
   2360    masm.move32(ToRegister(input.high()), output);
   2361  }
   2362 }
   2363 
   2364 void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
   2365  Register64 output = ToOutRegister64(lir);
   2366  MOZ_ASSERT(ToRegister(lir->input()) == output.low);
   2367 
   2368  if (lir->mir()->isUnsigned()) {
   2369    masm.ma_mov(Imm32(0), output.high);
   2370  } else {
   2371    masm.ma_asr(Imm32(31), output.low, output.high);
   2372  }
   2373 }
   2374 
   2375 void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
   2376  Register64 input = ToRegister64(lir->input());
   2377  Register64 output = ToOutRegister64(lir);
   2378  switch (lir->mir()->mode()) {
   2379    case MSignExtendInt64::Byte:
   2380      masm.move8SignExtend(input.low, output.low);
   2381      break;
   2382    case MSignExtendInt64::Half:
   2383      masm.move16SignExtend(input.low, output.low);
   2384      break;
   2385    case MSignExtendInt64::Word:
   2386      masm.move32(input.low, output.low);
   2387      break;
   2388  }
   2389  masm.ma_asr(Imm32(31), output.low, output.high);
   2390 }
   2391 
   2392 void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index*) {
   2393  MOZ_CRASH("64-bit only");
   2394 }
   2395 
   2396 void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
   2397  // Generates no code on this platform because we just return the low part of
   2398  // the input register pair.
   2399  MOZ_ASSERT(ToRegister(lir->input()) == ToRegister(lir->output()));
   2400 }
   2401 
   2402 void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
   2403  MOZ_ASSERT(gen->compilingWasm());
   2404  MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
   2405  masm.Push(InstanceReg);
   2406  int32_t framePushedAfterInstance = masm.framePushed();
   2407 
   2408  Register64 lhs = ToRegister64(lir->lhs());
   2409  Register64 rhs = ToRegister64(lir->rhs());
   2410  Register64 output = ToOutRegister64(lir);
   2411 
   2412  MOZ_ASSERT(output == ReturnReg64);
   2413 
   2414  Label done;
   2415 
   2416  // Handle divide by zero.
   2417  if (lir->canBeDivideByZero()) {
   2418    Label nonZero;
   2419    // We can use InstanceReg as temp register because we preserved it
   2420    // before.
   2421    masm.branchTest64(Assembler::NonZero, rhs, rhs, InstanceReg, &nonZero);
   2422    masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->trapSiteDesc());
   2423    masm.bind(&nonZero);
   2424  }
   2425 
   2426  auto* mir = lir->mir();
   2427 
   2428  // Handle an integer overflow exception from INT64_MIN / -1.
   2429  if (lir->canBeNegativeOverflow()) {
   2430    Label notmin;
   2431    masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), &notmin);
   2432    masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notmin);
   2433    if (mir->isWasmBuiltinModI64()) {
   2434      masm.xor64(output, output);
   2435    } else {
   2436      masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->trapSiteDesc());
   2437    }
   2438    masm.jump(&done);
   2439    masm.bind(&notmin);
   2440  }
   2441 
   2442  wasm::SymbolicAddress callee = mir->isWasmBuiltinModI64()
   2443                                     ? wasm::SymbolicAddress::ModI64
   2444                                     : wasm::SymbolicAddress::DivI64;
   2445  masm.setupWasmABICall(callee);
   2446  masm.passABIArg(lhs.high);
   2447  masm.passABIArg(lhs.low);
   2448  masm.passABIArg(rhs.high);
   2449  masm.passABIArg(rhs.low);
   2450 
   2451  int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
   2452  masm.callWithABI(lir->trapSiteDesc().bytecodeOffset, callee,
   2453                   mozilla::Some(instanceOffset));
   2454 
   2455  MOZ_ASSERT(ReturnReg64 == output);
   2456 
   2457  masm.bind(&done);
   2458  masm.Pop(InstanceReg);
   2459 }
   2460 
   2461 void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
   2462  MOZ_ASSERT(gen->compilingWasm());
   2463  MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
   2464  masm.Push(InstanceReg);
   2465  int32_t framePushedAfterInstance = masm.framePushed();
   2466 
   2467  Register64 lhs = ToRegister64(lir->lhs());
   2468  Register64 rhs = ToRegister64(lir->rhs());
   2469 
   2470  MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
   2471 
   2472  // Prevent divide by zero.
   2473  if (lir->canBeDivideByZero()) {
   2474    Label nonZero;
   2475    // We can use InstanceReg as temp register because we preserved it
   2476    // before.
   2477    masm.branchTest64(Assembler::NonZero, rhs, rhs, InstanceReg, &nonZero);
   2478    masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->trapSiteDesc());
   2479    masm.bind(&nonZero);
   2480  }
   2481 
   2482  MDefinition* mir = lir->mir();
   2483  wasm::SymbolicAddress callee = mir->isWasmBuiltinModI64()
   2484                                     ? wasm::SymbolicAddress::UModI64
   2485                                     : wasm::SymbolicAddress::UDivI64;
   2486  masm.setupWasmABICall(callee);
   2487  masm.passABIArg(lhs.high);
   2488  masm.passABIArg(lhs.low);
   2489  masm.passABIArg(rhs.high);
   2490  masm.passABIArg(rhs.low);
   2491 
   2492  int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
   2493  masm.callWithABI(lir->trapSiteDesc().bytecodeOffset, callee,
   2494                   mozilla::Some(instanceOffset));
   2495  masm.Pop(InstanceReg);
   2496 }
   2497 
   2498 void CodeGenerator::visitShiftI64(LShiftI64* lir) {
   2499  LInt64Allocation lhs = lir->lhs();
   2500  const LAllocation* rhs = lir->rhs();
   2501 
   2502  MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
   2503 
   2504  if (rhs->isConstant()) {
   2505    int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
   2506    switch (lir->bitop()) {
   2507      case JSOp::Lsh:
   2508        if (shift) {
   2509          masm.lshift64(Imm32(shift), ToRegister64(lhs));
   2510        }
   2511        break;
   2512      case JSOp::Rsh:
   2513        if (shift) {
   2514          masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
   2515        }
   2516        break;
   2517      case JSOp::Ursh:
   2518        if (shift) {
   2519          masm.rshift64(Imm32(shift), ToRegister64(lhs));
   2520        }
   2521        break;
   2522      default:
   2523        MOZ_CRASH("Unexpected shift op");
   2524    }
   2525    return;
   2526  }
   2527 
   2528  switch (lir->bitop()) {
   2529    case JSOp::Lsh:
   2530      masm.lshift64(ToRegister(rhs), ToRegister64(lhs));
   2531      break;
   2532    case JSOp::Rsh:
   2533      masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs));
   2534      break;
   2535    case JSOp::Ursh:
   2536      masm.rshift64(ToRegister(rhs), ToRegister64(lhs));
   2537      break;
   2538    default:
   2539      MOZ_CRASH("Unexpected shift op");
   2540  }
   2541 }
   2542 
   2543 void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
   2544  LInt64Allocation lhs = lir->lhs();
   2545  LInt64Allocation rhs = lir->rhs();
   2546 
   2547  MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
   2548 
   2549  switch (lir->bitop()) {
   2550    case JSOp::BitOr:
   2551      if (IsConstant(rhs)) {
   2552        masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
   2553      } else {
   2554        masm.or64(ToRegister64(rhs), ToRegister64(lhs));
   2555      }
   2556      break;
   2557    case JSOp::BitXor:
   2558      if (IsConstant(rhs)) {
   2559        masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
   2560      } else {
   2561        masm.xor64(ToRegister64(rhs), ToRegister64(lhs));
   2562      }
   2563      break;
   2564    case JSOp::BitAnd:
   2565      if (IsConstant(rhs)) {
   2566        masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
   2567      } else {
   2568        masm.and64(ToRegister64(rhs), ToRegister64(lhs));
   2569      }
   2570      break;
   2571    default:
   2572      MOZ_CRASH("unexpected binary opcode");
   2573  }
   2574 }
   2575 
   2576 void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
   2577  const MWasmStackArg* mir = ins->mir();
   2578  Address dst(StackPointer, mir->spOffset());
   2579  if (IsConstant(ins->arg())) {
   2580    masm.store64(Imm64(ToInt64(ins->arg())), dst);
   2581  } else {
   2582    masm.store64(ToRegister64(ins->arg()), dst);
   2583  }
   2584 }
   2585 
   2586 void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
   2587  Register cond = ToRegister(lir->condExpr());
   2588  LInt64Allocation falseExpr = lir->falseExpr();
   2589 
   2590  Register64 out = ToOutRegister64(lir);
   2591  MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
   2592             "true expr is reused for input");
   2593 
   2594  masm.as_cmp(cond, Imm8(0));
   2595  if (falseExpr.low().isGeneralReg()) {
   2596    masm.ma_mov(ToRegister(falseExpr.low()), out.low, LeaveCC,
   2597                Assembler::Equal);
   2598    masm.ma_mov(ToRegister(falseExpr.high()), out.high, LeaveCC,
   2599                Assembler::Equal);
   2600  } else {
   2601    ScratchRegisterScope scratch(masm);
   2602    masm.ma_ldr(ToAddress(falseExpr.low()), out.low, scratch, Offset,
   2603                Assembler::Equal);
   2604    masm.ma_ldr(ToAddress(falseExpr.high()), out.high, scratch, Offset,
   2605                Assembler::Equal);
   2606  }
   2607 }
   2608 
   2609 void CodeGenerator::visitBitNotI64(LBitNotI64* lir) {
   2610  Register64 input = ToRegister64(lir->input());
   2611  MOZ_ASSERT(input == ToOutRegister64(lir));
   2612  masm.ma_mvn(input.high, input.high);
   2613  masm.ma_mvn(input.low, input.low);
   2614 }
   2615 
   2616 void CodeGenerator::visitWasmAtomicLoadI64(LWasmAtomicLoadI64* lir) {
   2617  Register ptr = ToRegister(lir->ptr());
   2618  Register memoryBase = ToRegister(lir->memoryBase());
   2619  Register64 output = ToOutRegister64(lir);
   2620  Register64 tmp(InvalidReg, InvalidReg);
   2621 
   2622  BaseIndex addr(memoryBase, ptr, TimesOne, lir->mir()->access().offset32());
   2623  masm.wasmAtomicLoad64(lir->mir()->access(), addr, tmp, output);
   2624 }
   2625 
   2626 void CodeGenerator::visitWasmAtomicStoreI64(LWasmAtomicStoreI64* lir) {
   2627  Register ptr = ToRegister(lir->ptr());
   2628  Register memoryBase = ToRegister(lir->memoryBase());
   2629  Register64 value = ToRegister64(lir->value());
   2630  Register64 tmp = ToRegister64(lir->temp0());
   2631 
   2632  BaseIndex addr(memoryBase, ptr, TimesOne, lir->mir()->access().offset32());
   2633  masm.wasmAtomicExchange64(lir->mir()->access(), addr, value, tmp);
   2634 }
   2635 
   2636 void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir) {
   2637  Register ptr = ToRegister(lir->ptr());
   2638  Register64 expected = ToRegister64(lir->expected());
   2639  Register64 replacement = ToRegister64(lir->replacement());
   2640  Register memoryBase = ToRegister(lir->memoryBase());
   2641  Register64 out = ToOutRegister64(lir);
   2642 
   2643  BaseIndex addr(memoryBase, ptr, TimesOne, lir->mir()->access().offset32());
   2644  masm.wasmCompareExchange64(lir->mir()->access(), addr, expected, replacement,
   2645                             out);
   2646 }
   2647 
   2648 void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir) {
   2649  Register ptr = ToRegister(lir->ptr());
   2650  Register64 value = ToRegister64(lir->value());
   2651  Register memoryBase = ToRegister(lir->memoryBase());
   2652  Register64 out = ToOutRegister64(lir);
   2653 
   2654  BaseIndex addr(memoryBase, ptr, TimesOne, lir->access().offset32());
   2655  Register64 tmp = ToRegister64(lir->temp0());
   2656  masm.wasmAtomicFetchOp64(lir->access(), lir->operation(), value, addr, tmp,
   2657                           out);
   2658 }
   2659 
   2660 void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir) {
   2661  Register ptr = ToRegister(lir->ptr());
   2662  Register64 value = ToRegister64(lir->value());
   2663  Register memoryBase = ToRegister(lir->memoryBase());
   2664  Register64 out = ToOutRegister64(lir);
   2665 
   2666  BaseIndex addr(memoryBase, ptr, TimesOne, lir->access().offset32());
   2667  masm.wasmAtomicExchange64(lir->access(), addr, value, out);
   2668 }
   2669 
   2670 void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH("No SIMD"); }
   2671 
   2672 void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
   2673  MOZ_CRASH("No SIMD");
   2674 }
   2675 
   2676 void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
   2677  MOZ_CRASH("No SIMD");
   2678 }
   2679 
   2680 void CodeGenerator::visitWasmBinarySimd128WithConstant(
   2681    LWasmBinarySimd128WithConstant* ins) {
   2682  MOZ_CRASH("No SIMD");
   2683 }
   2684 
   2685 void CodeGenerator::visitWasmVariableShiftSimd128(
   2686    LWasmVariableShiftSimd128* ins) {
   2687  MOZ_CRASH("No SIMD");
   2688 }
   2689 
   2690 void CodeGenerator::visitWasmConstantShiftSimd128(
   2691    LWasmConstantShiftSimd128* ins) {
   2692  MOZ_CRASH("No SIMD");
   2693 }
   2694 
   2695 void CodeGenerator::visitWasmSignReplicationSimd128(
   2696    LWasmSignReplicationSimd128* ins) {
   2697  MOZ_CRASH("No SIMD");
   2698 }
   2699 
   2700 void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
   2701  MOZ_CRASH("No SIMD");
   2702 }
   2703 
   2704 void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
   2705  MOZ_CRASH("No SIMD");
   2706 }
   2707 
   2708 void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
   2709  MOZ_CRASH("No SIMD");
   2710 }
   2711 
   2712 void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
   2713    LWasmReplaceInt64LaneSimd128* ins) {
   2714  MOZ_CRASH("No SIMD");
   2715 }
   2716 
   2717 void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
   2718  MOZ_CRASH("No SIMD");
   2719 }
   2720 
   2721 void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
   2722  MOZ_CRASH("No SIMD");
   2723 }
   2724 
   2725 void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
   2726  MOZ_CRASH("No SIMD");
   2727 }
   2728 
   2729 void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
   2730  MOZ_CRASH("No SIMD");
   2731 }
   2732 
   2733 void CodeGenerator::visitWasmReduceAndBranchSimd128(
   2734    LWasmReduceAndBranchSimd128* ins) {
   2735  MOZ_CRASH("No SIMD");
   2736 }
   2737 
   2738 void CodeGenerator::visitWasmReduceSimd128ToInt64(
   2739    LWasmReduceSimd128ToInt64* ins) {
   2740  MOZ_CRASH("No SIMD");
   2741 }
   2742 
   2743 void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
   2744  MOZ_CRASH("No SIMD");
   2745 }
   2746 
   2747 void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
   2748  MOZ_CRASH("No SIMD");
   2749 }