tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Lowering-arm.cpp (40934B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/arm/Lowering-arm.h"
      8 
      9 #include "mozilla/MathAlgorithms.h"
     10 
     11 #include "jit/arm/Assembler-arm.h"
     12 #include "jit/Lowering.h"
     13 #include "jit/MIR-wasm.h"
     14 #include "jit/MIR.h"
     15 #include "jit/shared/Lowering-shared-inl.h"
     16 
     17 using namespace js;
     18 using namespace js::jit;
     19 
     20 using mozilla::FloorLog2;
     21 
     22 LBoxAllocation LIRGeneratorARM::useBoxFixed(MDefinition* mir, Register reg1,
     23                                            Register reg2, bool useAtStart) {
     24  MOZ_ASSERT(mir->type() == MIRType::Value);
     25  MOZ_ASSERT(reg1 != reg2);
     26 
     27  ensureDefined(mir);
     28  return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart),
     29                        LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
     30 }
     31 
     32 LAllocation LIRGeneratorARM::useByteOpRegister(MDefinition* mir) {
     33  return useRegister(mir);
     34 }
     35 
     36 LAllocation LIRGeneratorARM::useByteOpRegisterAtStart(MDefinition* mir) {
     37  return useRegisterAtStart(mir);
     38 }
     39 
     40 LAllocation LIRGeneratorARM::useByteOpRegisterOrNonDoubleConstant(
     41    MDefinition* mir) {
     42  return useRegisterOrNonDoubleConstant(mir);
     43 }
     44 
     45 LDefinition LIRGeneratorARM::tempByteOpRegister() { return temp(); }
     46 
     47 void LIRGenerator::visitBox(MBox* box) {
     48  MDefinition* inner = box->getOperand(0);
     49 
     50  // If the box wrapped a double, it needs a new register.
     51  if (IsFloatingPointType(inner->type())) {
     52    defineBox(new (alloc()) LBoxFloatingPoint(
     53                  useRegisterAtStart(inner), tempCopy(inner, 0), inner->type()),
     54              box);
     55    return;
     56  }
     57 
     58  if (box->canEmitAtUses()) {
     59    emitAtUses(box);
     60    return;
     61  }
     62 
     63  if (inner->isConstant()) {
     64    defineBox(new (alloc()) LValue(inner->toConstant()->toJSValue()), box);
     65    return;
     66  }
     67 
     68  LBox* lir = new (alloc()) LBox(use(inner), inner->type());
     69 
     70  // Otherwise, we should not define a new register for the payload portion
     71  // of the output, so bypass defineBox().
     72  uint32_t vreg = getVirtualRegister();
     73 
     74  // Note that because we're using BogusTemp(), we do not change the type of
     75  // the definition. We also do not define the first output as "TYPE",
     76  // because it has no corresponding payload at (vreg + 1). Also note that
     77  // although we copy the input's original type for the payload half of the
     78  // definition, this is only for clarity. BogusTemp() definitions are
     79  // ignored.
     80  lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
     81  lir->setDef(1, LDefinition::BogusTemp());
     82  box->setVirtualRegister(vreg);
     83  addUnchecked(lir);
     84 }
     85 
     86 void LIRGenerator::visitUnbox(MUnbox* unbox) {
     87  MDefinition* inner = unbox->getOperand(0);
     88 
     89  // An unbox on arm reads in a type tag (either in memory or a register) and
     90  // a payload. Unlike most instructions consuming a box, we ask for the type
     91  // second, so that the result can re-use the first input.
     92  MOZ_ASSERT(inner->type() == MIRType::Value);
     93 
     94  ensureDefined(inner);
     95 
     96  if (IsFloatingPointType(unbox->type())) {
     97    MOZ_ASSERT(unbox->type() == MIRType::Double);
     98    auto* lir = new (alloc()) LUnboxFloatingPoint(useBox(inner));
     99    if (unbox->fallible()) {
    100      assignSnapshot(lir, unbox->bailoutKind());
    101    }
    102    define(lir, unbox);
    103    return;
    104  }
    105 
    106  // Swap the order we use the box pieces so we can re-use the payload register.
    107  LUnbox* lir = new (alloc()) LUnbox;
    108  lir->setOperand(0, usePayloadInRegisterAtStart(inner));
    109  lir->setOperand(1, useType(inner, LUse::REGISTER));
    110 
    111  if (unbox->fallible()) {
    112    assignSnapshot(lir, unbox->bailoutKind());
    113  }
    114 
    115  // Types and payloads form two separate intervals. If the type becomes dead
    116  // before the payload, it could be used as a Value without the type being
    117  // recoverable. Unbox's purpose is to eagerly kill the definition of a type
    118  // tag, so keeping both alive (for the purpose of gcmaps) is unappealing.
    119  // Instead, we create a new virtual register.
    120  defineReuseInput(lir, unbox, 0);
    121 }
    122 
    123 void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
    124  MOZ_ASSERT(opd->type() == MIRType::Value);
    125 
    126  LReturn* ins = new (alloc()) LReturn(isGenerator);
    127  ins->setOperand(0, LUse(JSReturnReg_Type));
    128  ins->setOperand(1, LUse(JSReturnReg_Data));
    129  fillBoxUses(ins, 0, opd);
    130  add(ins);
    131 }
    132 
    133 void LIRGeneratorARM::defineInt64Phi(MPhi* phi, size_t lirIndex) {
    134  LPhi* low = current->getPhi(lirIndex + INT64LOW_INDEX);
    135  LPhi* high = current->getPhi(lirIndex + INT64HIGH_INDEX);
    136 
    137  uint32_t lowVreg = getVirtualRegister();
    138 
    139  phi->setVirtualRegister(lowVreg);
    140 
    141  uint32_t highVreg = getVirtualRegister();
    142  MOZ_ASSERT(lowVreg + INT64HIGH_INDEX == highVreg + INT64LOW_INDEX);
    143 
    144  low->setDef(0, LDefinition(lowVreg, LDefinition::INT32));
    145  high->setDef(0, LDefinition(highVreg, LDefinition::INT32));
    146  annotate(high);
    147  annotate(low);
    148 }
    149 
    150 void LIRGeneratorARM::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
    151                                         LBlock* block, size_t lirIndex) {
    152  MDefinition* operand = phi->getOperand(inputPosition);
    153  LPhi* low = block->getPhi(lirIndex + INT64LOW_INDEX);
    154  LPhi* high = block->getPhi(lirIndex + INT64HIGH_INDEX);
    155  low->setOperand(inputPosition,
    156                  LUse(operand->virtualRegister() + INT64LOW_INDEX, LUse::ANY));
    157  high->setOperand(
    158      inputPosition,
    159      LUse(operand->virtualRegister() + INT64HIGH_INDEX, LUse::ANY));
    160 }
    161 
    162 // x = !y
    163 void LIRGeneratorARM::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
    164                                  MDefinition* mir, MDefinition* input) {
    165  // Unary ALU operations don't read the input after writing to the output, even
    166  // for fallible operations, so we can use at-start allocations.
    167  ins->setOperand(0, useRegisterAtStart(input));
    168  define(ins, mir);
    169 }
    170 
    171 // z = x+y
    172 void LIRGeneratorARM::lowerForALU(LInstructionHelper<1, 2, 0>* ins,
    173                                  MDefinition* mir, MDefinition* lhs,
    174                                  MDefinition* rhs) {
    175  // Binary ALU operations don't read any input after writing to the output,
    176  // even for fallible operations, so we can use at-start allocations.
    177  ins->setOperand(0, useRegisterAtStart(lhs));
    178  ins->setOperand(1, useRegisterOrConstantAtStart(rhs));
    179  define(ins, mir);
    180 }
    181 
    182 void LIRGeneratorARM::lowerForALUInt64(
    183    LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
    184    MDefinition* input) {
    185  // Reuse the input.  Define + use-at-start would create risk that the output
    186  // uses the same register pair as the input but in reverse order.  Reusing
    187  // probably has less spilling than the alternative, define + use.
    188  ins->setInt64Operand(0, useInt64RegisterAtStart(input));
    189  defineInt64ReuseInput(ins, mir, 0);
    190 }
    191 
    192 void LIRGeneratorARM::lowerForALUInt64(
    193    LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
    194    MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
    195  ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
    196  ins->setInt64Operand(INT64_PIECES, useInt64RegisterOrConstant(rhs));
    197  defineInt64ReuseInput(ins, mir, 0);
    198 }
    199 
    200 void LIRGeneratorARM::lowerForMulInt64(LMulI64* ins, MMul* mir,
    201                                       MDefinition* lhs, MDefinition* rhs) {
    202  bool needsTemp = true;
    203 
    204  if (rhs->isConstant()) {
    205    int64_t constant = rhs->toConstant()->toInt64();
    206    int32_t shift = mozilla::FloorLog2(constant);
    207    // See special cases in CodeGeneratorARM::visitMulI64
    208    if (constant >= -1 && constant <= 2) {
    209      needsTemp = false;
    210    }
    211    if (constant > 0 && int64_t(1) << shift == constant) {
    212      needsTemp = false;
    213    }
    214  }
    215 
    216  ins->setLhs(useInt64RegisterAtStart(lhs));
    217  ins->setRhs(useInt64RegisterOrConstant(rhs));
    218  if (needsTemp) {
    219    ins->setTemp0(temp());
    220  }
    221 
    222  defineInt64ReuseInput(ins, mir, 0);
    223 }
    224 
    225 void LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 1, 0>* ins,
    226                                  MDefinition* mir, MDefinition* input) {
    227  ins->setOperand(0, useRegisterAtStart(input));
    228  define(ins, mir);
    229 }
    230 
    231 void LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 2, 0>* ins,
    232                                  MDefinition* mir, MDefinition* lhs,
    233                                  MDefinition* rhs) {
    234  ins->setOperand(0, useRegisterAtStart(lhs));
    235  ins->setOperand(1, useRegisterAtStart(rhs));
    236  define(ins, mir);
    237 }
    238 
    239 void LIRGeneratorARM::lowerWasmBuiltinTruncateToInt32(
    240    MWasmBuiltinTruncateToInt32* ins) {
    241  MDefinition* opd = ins->input();
    242  MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
    243 
    244  if (opd->type() == MIRType::Double) {
    245    define(new (alloc()) LWasmBuiltinTruncateDToInt32(
    246               useRegister(opd), useFixedAtStart(ins->instance(), InstanceReg),
    247               LDefinition::BogusTemp()),
    248           ins);
    249    return;
    250  }
    251 
    252  define(new (alloc()) LWasmBuiltinTruncateFToInt32(
    253             useRegister(opd), useFixedAtStart(ins->instance(), InstanceReg),
    254             LDefinition::BogusTemp()),
    255         ins);
    256 }
    257 
    258 void LIRGeneratorARM::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
    259                                           LBlock* block, size_t lirIndex) {
    260  MDefinition* operand = phi->getOperand(inputPosition);
    261  LPhi* type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
    262  LPhi* payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
    263  type->setOperand(
    264      inputPosition,
    265      LUse(operand->virtualRegister() + VREG_TYPE_OFFSET, LUse::ANY));
    266  payload->setOperand(inputPosition,
    267                      LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
    268 }
    269 
    270 void LIRGeneratorARM::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
    271                                    MDefinition* mir, MDefinition* lhs,
    272                                    MDefinition* rhs) {
    273  lowerForALU(ins, mir, lhs, rhs);
    274 }
    275 
    276 template <class LInstr>
    277 void LIRGeneratorARM::lowerForShiftInt64(LInstr* ins, MDefinition* mir,
    278                                         MDefinition* lhs, MDefinition* rhs) {
    279  LAllocation rhsAlloc;
    280  if (rhs->isConstant()) {
    281    rhsAlloc = useOrConstant(rhs);
    282  } else {
    283    // The operands are int64, but we only care about the lower 32 bits of the
    284    // RHS. The code below will load that part and will discard the upper half.
    285    rhsAlloc = useLowWordRegister(rhs);
    286  }
    287 
    288  if constexpr (std::is_same_v<LInstr, LShiftI64>) {
    289    ins->setLhs(useInt64RegisterAtStart(lhs));
    290    ins->setRhs(rhsAlloc);
    291    defineInt64ReuseInput(ins, mir, LShiftI64::LhsIndex);
    292  } else {
    293    ins->setInput(useInt64RegisterAtStart(lhs));
    294    ins->setCount(rhsAlloc);
    295    if (!rhs->isConstant()) {
    296      ins->setTemp0(temp());
    297    }
    298    defineInt64ReuseInput(ins, mir, LRotateI64::InputIndex);
    299  }
    300 }
    301 
    302 template void LIRGeneratorARM::lowerForShiftInt64(LShiftI64* ins,
    303                                                  MDefinition* mir,
    304                                                  MDefinition* lhs,
    305                                                  MDefinition* rhs);
    306 template void LIRGeneratorARM::lowerForShiftInt64(LRotateI64* ins,
    307                                                  MDefinition* mir,
    308                                                  MDefinition* lhs,
    309                                                  MDefinition* rhs);
    310 
    311 void LIRGeneratorARM::lowerDivI(MDiv* div) {
    312  // Division instructions are slow. Division by constant denominators can be
    313  // rewritten to use other instructions.
    314  if (div->rhs()->isConstant()) {
    315    int32_t rhs = div->rhs()->toConstant()->toInt32();
    316    // Check for division by a positive power of two, which is an easy and
    317    // important case to optimize. Note that other optimizations are also
    318    // possible; division by negative powers of two can be optimized in a
    319    // similar manner as positive powers of two, and division by other
    320    // constants can be optimized by a reciprocal multiplication technique.
    321    int32_t shift = FloorLog2(rhs);
    322    if (rhs > 0 && 1 << shift == rhs) {
    323      LDivPowTwoI* lir =
    324          new (alloc()) LDivPowTwoI(useRegisterAtStart(div->lhs()), shift);
    325      if (div->fallible()) {
    326        assignSnapshot(lir, div->bailoutKind());
    327      }
    328      define(lir, div);
    329      return;
    330    }
    331  }
    332 
    333  if (ARMFlags::HasIDIV()) {
    334    LDivI* lir = new (alloc())
    335        LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
    336    if (div->fallible()) {
    337      assignSnapshot(lir, div->bailoutKind());
    338    }
    339    define(lir, div);
    340    return;
    341  }
    342 
    343  LSoftDivI* lir = new (alloc()) LSoftDivI(useFixedAtStart(div->lhs(), r0),
    344                                           useFixedAtStart(div->rhs(), r1));
    345 
    346  if (div->fallible()) {
    347    assignSnapshot(lir, div->bailoutKind());
    348  }
    349 
    350  defineReturn(lir, div);
    351 }
    352 
    353 void LIRGeneratorARM::lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs) {
    354  LMulI* lir = new (alloc()) LMulI;
    355  if (mul->fallible()) {
    356    assignSnapshot(lir, mul->bailoutKind());
    357  }
    358 
    359  // Negative zero check reads |lhs| and |rhs| after writing to the output, so
    360  // we can't use at-start allocations.
    361  if (mul->canBeNegativeZero() && !rhs->isConstant()) {
    362    lir->setOperand(0, useRegister(lhs));
    363    lir->setOperand(1, useRegister(rhs));
    364    define(lir, mul);
    365    return;
    366  }
    367 
    368  lowerForALU(lir, mul, lhs, rhs);
    369 }
    370 
    371 void LIRGeneratorARM::lowerModI(MMod* mod) {
    372  if (mod->rhs()->isConstant()) {
    373    int32_t rhs = mod->rhs()->toConstant()->toInt32();
    374    int32_t shift = FloorLog2(rhs);
    375    if (rhs > 0 && 1 << shift == rhs) {
    376      LModPowTwoI* lir =
    377          new (alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
    378      if (mod->fallible()) {
    379        assignSnapshot(lir, mod->bailoutKind());
    380      }
    381      define(lir, mod);
    382      return;
    383    }
    384    if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
    385      MOZ_ASSERT(rhs);
    386      LModMaskI* lir = new (alloc())
    387          LModMaskI(useRegister(mod->lhs()), temp(), temp(), shift + 1);
    388      if (mod->fallible()) {
    389        assignSnapshot(lir, mod->bailoutKind());
    390      }
    391      define(lir, mod);
    392      return;
    393    }
    394  }
    395 
    396  if (ARMFlags::HasIDIV()) {
    397    LModI* lir =
    398        new (alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()));
    399    if (mod->fallible()) {
    400      assignSnapshot(lir, mod->bailoutKind());
    401    }
    402    define(lir, mod);
    403    return;
    404  }
    405 
    406  // The temp register must be preserved across a call to __aeabi_idivmod
    407  MOZ_ASSERT(!GeneralRegisterSet(Registers::VolatileMask).hasRegisterIndex(r4));
    408  LSoftModI* lir =
    409      new (alloc()) LSoftModI(useFixedAtStart(mod->lhs(), r0),
    410                              useFixedAtStart(mod->rhs(), r1), tempFixed(r4));
    411 
    412  if (mod->fallible()) {
    413    assignSnapshot(lir, mod->bailoutKind());
    414  }
    415 
    416  defineReturn(lir, mod);
    417 }
    418 
    419 void LIRGeneratorARM::lowerDivI64(MDiv* div) {
    420  MOZ_CRASH("We use MWasmBuiltinDivI64 instead.");
    421 }
    422 
    423 void LIRGeneratorARM::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
    424  if (div->isUnsigned()) {
    425    LUDivOrModI64* lir = new (alloc())
    426        LUDivOrModI64(useInt64RegisterAtStart(div->lhs()),
    427                      useInt64RegisterAtStart(div->rhs()),
    428                      useFixedAtStart(div->instance(), InstanceReg));
    429    defineReturn(lir, div);
    430    return;
    431  }
    432 
    433  LDivOrModI64* lir = new (alloc()) LDivOrModI64(
    434      useInt64RegisterAtStart(div->lhs()), useInt64RegisterAtStart(div->rhs()),
    435      useFixedAtStart(div->instance(), InstanceReg));
    436  defineReturn(lir, div);
    437 }
    438 
    439 void LIRGeneratorARM::lowerModI64(MMod* mod) {
    440  MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
    441 }
    442 
    443 void LIRGeneratorARM::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
    444  if (mod->isUnsigned()) {
    445    LUDivOrModI64* lir = new (alloc())
    446        LUDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
    447                      useInt64RegisterAtStart(mod->rhs()),
    448                      useFixedAtStart(mod->instance(), InstanceReg));
    449    defineReturn(lir, mod);
    450    return;
    451  }
    452 
    453  LDivOrModI64* lir = new (alloc()) LDivOrModI64(
    454      useInt64RegisterAtStart(mod->lhs()), useInt64RegisterAtStart(mod->rhs()),
    455      useFixedAtStart(mod->instance(), InstanceReg));
    456  defineReturn(lir, mod);
    457 }
    458 
    459 void LIRGeneratorARM::lowerUDivI64(MDiv* div) {
    460  MOZ_CRASH("We use MWasmBuiltinDivI64 instead.");
    461 }
    462 
    463 void LIRGeneratorARM::lowerUModI64(MMod* mod) {
    464  MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
    465 }
    466 
    467 void LIRGeneratorARM::lowerWasmSelectI(MWasmSelect* select) {
    468  auto* lir = new (alloc())
    469      LWasmSelect(useRegisterAtStart(select->trueExpr()),
    470                  useAny(select->falseExpr()), useRegister(select->condExpr()));
    471  defineReuseInput(lir, select, LWasmSelect::TrueExprIndex);
    472 }
    473 
    474 void LIRGeneratorARM::lowerWasmSelectI64(MWasmSelect* select) {
    475  auto* lir = new (alloc()) LWasmSelectI64(
    476      useInt64RegisterAtStart(select->trueExpr()),
    477      useInt64Register(select->falseExpr()), useRegister(select->condExpr()));
    478  defineInt64ReuseInput(lir, select, LWasmSelectI64::TrueExprIndex);
    479 }
    480 
    481 LTableSwitch* LIRGeneratorARM::newLTableSwitch(const LAllocation& in,
    482                                               const LDefinition& inputCopy) {
    483  return new (alloc()) LTableSwitch(in, inputCopy, LDefinition::BogusTemp());
    484 }
    485 
    486 LTableSwitchV* LIRGeneratorARM::newLTableSwitchV(const LBoxAllocation& in) {
    487  return new (alloc())
    488      LTableSwitchV(in, temp(), tempDouble(), LDefinition::BogusTemp());
    489 }
    490 
    491 void LIRGeneratorARM::lowerUrshD(MUrsh* mir) {
    492  MDefinition* lhs = mir->lhs();
    493  MDefinition* rhs = mir->rhs();
    494 
    495  MOZ_ASSERT(lhs->type() == MIRType::Int32);
    496  MOZ_ASSERT(rhs->type() == MIRType::Int32);
    497 
    498  LUrshD* lir = new (alloc())
    499      LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
    500  define(lir, mir);
    501 }
    502 
    503 void LIRGeneratorARM::lowerPowOfTwoI(MPow* mir) {
    504  int32_t base = mir->input()->toConstant()->toInt32();
    505  MDefinition* power = mir->power();
    506 
    507  auto* lir = new (alloc()) LPowOfTwoI(useRegister(power), base);
    508  assignSnapshot(lir, mir->bailoutKind());
    509  define(lir, mir);
    510 }
    511 
    512 void LIRGeneratorARM::lowerBigIntPtrLsh(MBigIntPtrLsh* ins) {
    513  auto* lir = new (alloc()) LBigIntPtrLsh(
    514      useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp());
    515  assignSnapshot(lir, ins->bailoutKind());
    516  define(lir, ins);
    517 }
    518 
    519 void LIRGeneratorARM::lowerBigIntPtrRsh(MBigIntPtrRsh* ins) {
    520  auto* lir = new (alloc()) LBigIntPtrRsh(
    521      useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp());
    522  assignSnapshot(lir, ins->bailoutKind());
    523  define(lir, ins);
    524 }
    525 
    526 void LIRGeneratorARM::lowerBigIntPtrDiv(MBigIntPtrDiv* ins) {
    527  LDefinition temp1, temp2;
    528  if (ARMFlags::HasIDIV()) {
    529    temp1 = LDefinition::BogusTemp();
    530    temp2 = LDefinition::BogusTemp();
    531  } else {
    532    temp1 = tempFixed(r0);
    533    temp2 = tempFixed(r1);
    534  }
    535  auto* lir = new (alloc()) LBigIntPtrDiv(
    536      useRegister(ins->lhs()), useRegister(ins->rhs()), temp1, temp2);
    537  assignSnapshot(lir, ins->bailoutKind());
    538  define(lir, ins);
    539  if (!ARMFlags::HasIDIV()) {
    540    assignSafepoint(lir, ins);
    541  }
    542 }
    543 
    544 void LIRGeneratorARM::lowerBigIntPtrMod(MBigIntPtrMod* ins) {
    545  LDefinition temp1, temp2;
    546  if (ARMFlags::HasIDIV()) {
    547    temp1 = temp();
    548    temp2 = LDefinition::BogusTemp();
    549  } else {
    550    temp1 = tempFixed(r0);
    551    temp2 = tempFixed(r1);
    552  }
    553  auto* lir = new (alloc()) LBigIntPtrMod(
    554      useRegister(ins->lhs()), useRegister(ins->rhs()), temp1, temp2);
    555  if (ins->canBeDivideByZero()) {
    556    assignSnapshot(lir, ins->bailoutKind());
    557  }
    558  define(lir, ins);
    559  if (!ARMFlags::HasIDIV()) {
    560    assignSafepoint(lir, ins);
    561  }
    562 }
    563 
    564 void LIRGeneratorARM::lowerUDiv(MDiv* div) {
    565  MDefinition* lhs = div->getOperand(0);
    566  MDefinition* rhs = div->getOperand(1);
    567 
    568  if (ARMFlags::HasIDIV()) {
    569    auto* lir = new (alloc()) LUDiv(useRegister(lhs), useRegister(rhs));
    570    if (div->fallible()) {
    571      assignSnapshot(lir, div->bailoutKind());
    572    }
    573    define(lir, div);
    574    return;
    575  }
    576 
    577  auto* lir = new (alloc())
    578      LSoftUDivOrMod(useFixedAtStart(lhs, r0), useFixedAtStart(rhs, r1));
    579 
    580  if (div->fallible()) {
    581    assignSnapshot(lir, div->bailoutKind());
    582  }
    583 
    584  defineReturn(lir, div);
    585 }
    586 
    587 void LIRGeneratorARM::lowerUMod(MMod* mod) {
    588  MDefinition* lhs = mod->getOperand(0);
    589  MDefinition* rhs = mod->getOperand(1);
    590 
    591  if (ARMFlags::HasIDIV()) {
    592    auto* lir = new (alloc()) LUMod(useRegister(lhs), useRegister(rhs));
    593    if (mod->fallible()) {
    594      assignSnapshot(lir, mod->bailoutKind());
    595    }
    596    define(lir, mod);
    597    return;
    598  }
    599 
    600  auto* lir = new (alloc())
    601      LSoftUDivOrMod(useFixedAtStart(lhs, r0), useFixedAtStart(rhs, r1));
    602 
    603  if (mod->fallible()) {
    604    assignSnapshot(lir, mod->bailoutKind());
    605  }
    606 
    607  defineReturn(lir, mod);
    608 }
    609 
    610 void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
    611  MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
    612  LWasmUint32ToDouble* lir =
    613      new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
    614  define(lir, ins);
    615 }
    616 
    617 void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
    618  MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
    619  LWasmUint32ToFloat32* lir =
    620      new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
    621  define(lir, ins);
    622 }
    623 
    624 void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
    625  MDefinition* base = ins->base();
    626  MOZ_ASSERT(base->type() == MIRType::Int32);
    627 
    628  const LAllocation memoryBase =
    629      ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
    630                           : LGeneralReg(HeapReg);
    631 
    632  if (ins->access().type() == Scalar::Int64 && ins->access().isAtomic()) {
    633    auto* lir =
    634        new (alloc()) LWasmAtomicLoadI64(useRegisterAtStart(base), memoryBase);
    635    defineInt64Fixed(lir, ins,
    636                     LInt64Allocation(LAllocation(AnyRegister(IntArgReg1)),
    637                                      LAllocation(AnyRegister(IntArgReg0))));
    638    return;
    639  }
    640 
    641  LAllocation ptr = useRegisterAtStart(base);
    642 
    643  if (ins->type() == MIRType::Int64) {
    644    LDefinition ptrCopy = LDefinition::BogusTemp();
    645    if (ins->access().offset32() || ins->access().type() == Scalar::Int64) {
    646      ptrCopy = tempCopy(base, 0);
    647    }
    648 
    649    LDefinition memoryBaseCopy = LDefinition::BogusTemp();
    650    if (ins->hasMemoryBase()) {
    651      memoryBaseCopy = tempCopy(ins->memoryBase(), 1);
    652    }
    653 
    654    auto* lir =
    655        new (alloc()) LWasmLoadI64(ptr, memoryBase, ptrCopy, memoryBaseCopy);
    656    defineInt64(lir, ins);
    657    return;
    658  }
    659 
    660  LDefinition ptrCopy = LDefinition::BogusTemp();
    661  if (ins->access().offset32()) {
    662    ptrCopy = tempCopy(base, 0);
    663  }
    664 
    665  auto* lir = new (alloc()) LWasmLoad(ptr, memoryBase, ptrCopy);
    666  define(lir, ins);
    667 }
    668 
    669 void LIRGenerator::visitWasmStore(MWasmStore* ins) {
    670  MDefinition* base = ins->base();
    671  MOZ_ASSERT(base->type() == MIRType::Int32);
    672 
    673  const LAllocation memoryBase =
    674      ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
    675                           : LGeneralReg(HeapReg);
    676 
    677  if (ins->access().type() == Scalar::Int64 && ins->access().isAtomic()) {
    678    auto* lir = new (alloc()) LWasmAtomicStoreI64(
    679        useRegister(base),
    680        useInt64Fixed(ins->value(), Register64(IntArgReg1, IntArgReg0)),
    681        memoryBase, tempInt64Fixed(Register64(IntArgReg3, IntArgReg2)));
    682    add(lir, ins);
    683    return;
    684  }
    685 
    686  LAllocation ptr = useRegisterAtStart(base);
    687 
    688  if (ins->access().type() == Scalar::Int64) {
    689    LInt64Allocation value = useInt64RegisterAtStart(ins->value());
    690    LDefinition ptrCopy = tempCopy(base, 0);
    691    auto* lir = new (alloc()) LWasmStoreI64(ptr, value, memoryBase, ptrCopy);
    692    add(lir, ins);
    693    return;
    694  }
    695 
    696  LDefinition ptrCopy = LDefinition::BogusTemp();
    697  if (ins->access().offset32()) {
    698    ptrCopy = tempCopy(base, 0);
    699  }
    700 
    701  LAllocation value;
    702  if (ins->value()->type() != MIRType::Int64) {
    703    value = useRegisterAtStart(ins->value());
    704  } else {
    705    value = useLowWordRegisterAtStart(ins->value());
    706  }
    707 
    708  auto* lir = new (alloc()) LWasmStore(ptr, value, memoryBase, ptrCopy);
    709  add(lir, ins);
    710 }
    711 
    712 void LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) {
    713  MDefinition* base = ins->base();
    714  MOZ_ASSERT(base->type() == MIRType::Int32);
    715 
    716  // For the ARM it is best to keep the 'base' in a register if a bounds check
    717  // is needed.
    718  LAllocation baseAlloc;
    719  LAllocation limitAlloc;
    720 
    721  if (base->isConstant() && !ins->needsBoundsCheck()) {
    722    // A bounds check is only skipped for a positive index.
    723    MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
    724    baseAlloc = LAllocation(base->toConstant());
    725  } else {
    726    baseAlloc = useRegisterAtStart(base);
    727    if (ins->needsBoundsCheck()) {
    728      MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
    729      MOZ_ASSERT(boundsCheckLimit->type() == MIRType::Int32);
    730      limitAlloc = useRegisterAtStart(boundsCheckLimit);
    731    }
    732  }
    733 
    734  define(new (alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc, LAllocation()),
    735         ins);
    736 }
    737 
    738 void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) {
    739  MDefinition* base = ins->base();
    740  MOZ_ASSERT(base->type() == MIRType::Int32);
    741 
    742  LAllocation baseAlloc;
    743  LAllocation limitAlloc;
    744 
    745  if (base->isConstant() && !ins->needsBoundsCheck()) {
    746    MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
    747    baseAlloc = LAllocation(base->toConstant());
    748  } else {
    749    baseAlloc = useRegisterAtStart(base);
    750    if (ins->needsBoundsCheck()) {
    751      MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
    752      MOZ_ASSERT(boundsCheckLimit->type() == MIRType::Int32);
    753      limitAlloc = useRegisterAtStart(boundsCheckLimit);
    754    }
    755  }
    756 
    757  add(new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
    758                                    limitAlloc, LAllocation()),
    759      ins);
    760 }
    761 
    762 void LIRGeneratorARM::lowerTruncateDToInt32(MTruncateToInt32* ins) {
    763  MDefinition* opd = ins->input();
    764  MOZ_ASSERT(opd->type() == MIRType::Double);
    765 
    766  define(new (alloc())
    767             LTruncateDToInt32(useRegister(opd), LDefinition::BogusTemp()),
    768         ins);
    769 }
    770 
    771 void LIRGeneratorARM::lowerTruncateFToInt32(MTruncateToInt32* ins) {
    772  MDefinition* opd = ins->input();
    773  MOZ_ASSERT(opd->type() == MIRType::Float32);
    774 
    775  define(new (alloc())
    776             LTruncateFToInt32(useRegister(opd), LDefinition::BogusTemp()),
    777         ins);
    778 }
    779 
    780 void LIRGenerator::visitAtomicExchangeTypedArrayElement(
    781    MAtomicExchangeTypedArrayElement* ins) {
    782  MOZ_ASSERT(ARMFlags::HasLDSTREXBHD());
    783 
    784  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    785  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    786 
    787  const LUse elements = useRegister(ins->elements());
    788  const LAllocation index =
    789      useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    790 
    791  if (Scalar::isBigIntType(ins->arrayType())) {
    792    // The two register pairs must be distinct.
    793    LInt64Allocation value = useInt64Fixed(ins->value(), XchgNew64);
    794 
    795    auto* lir = new (alloc())
    796        LAtomicExchangeTypedArrayElement64(elements, index, value);
    797    defineInt64Fixed(lir, ins,
    798                     LInt64Allocation(LAllocation(AnyRegister(XchgOutHi)),
    799                                      LAllocation(AnyRegister(XchgOutLo))));
    800    return;
    801  }
    802 
    803  MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
    804 
    805  const LAllocation value = useRegister(ins->value());
    806 
    807  // If the target is a floating register then we need a temp at the
    808  // CodeGenerator level for creating the result.
    809 
    810  LDefinition tempDef = LDefinition::BogusTemp();
    811  if (ins->arrayType() == Scalar::Uint32) {
    812    MOZ_ASSERT(ins->type() == MIRType::Double);
    813    tempDef = temp();
    814  }
    815 
    816  LAtomicExchangeTypedArrayElement* lir = new (alloc())
    817      LAtomicExchangeTypedArrayElement(elements, index, value, tempDef);
    818 
    819  define(lir, ins);
    820 }
    821 
    822 void LIRGenerator::visitAtomicTypedArrayElementBinop(
    823    MAtomicTypedArrayElementBinop* ins) {
    824  MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
    825  MOZ_ASSERT(!Scalar::isFloatingType(ins->arrayType()));
    826 
    827  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    828  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    829 
    830  const LUse elements = useRegister(ins->elements());
    831  const LAllocation index =
    832      useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    833 
    834  if (Scalar::isBigIntType(ins->arrayType())) {
    835    // Wasm additionally pins the value register to `FetchOpVal64`, but it's
    836    // unclear why this was deemed necessary.
    837    LInt64Allocation value = useInt64Register(ins->value());
    838    LInt64Definition temp = tempInt64Fixed(FetchOpTmp64);
    839 
    840    if (ins->isForEffect()) {
    841      auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
    842          elements, index, value, temp);
    843      add(lir, ins);
    844      return;
    845    }
    846 
    847    auto* lir = new (alloc())
    848        LAtomicTypedArrayElementBinop64(elements, index, value, temp);
    849    defineInt64Fixed(lir, ins,
    850                     LInt64Allocation(LAllocation(AnyRegister(FetchOpOutHi)),
    851                                      LAllocation(AnyRegister(FetchOpOutLo))));
    852    return;
    853  }
    854 
    855  const LAllocation value = useRegister(ins->value());
    856 
    857  if (ins->isForEffect()) {
    858    LAtomicTypedArrayElementBinopForEffect* lir = new (alloc())
    859        LAtomicTypedArrayElementBinopForEffect(elements, index, value,
    860                                               /* flagTemp= */ temp());
    861    add(lir, ins);
    862    return;
    863  }
    864 
    865  // For a Uint32Array with a known double result we need a temp for
    866  // the intermediate output.
    867  //
    868  // Optimization opportunity (bug 1077317): We can do better by
    869  // allowing 'value' to remain as an imm32 if it is small enough to
    870  // fit in an instruction.
    871 
    872  LDefinition flagTemp = temp();
    873  LDefinition outTemp = LDefinition::BogusTemp();
    874 
    875  if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
    876    outTemp = temp();
    877  }
    878 
    879  // On arm, map flagTemp to temp1 and outTemp to temp2, at least for now.
    880 
    881  LAtomicTypedArrayElementBinop* lir = new (alloc())
    882      LAtomicTypedArrayElementBinop(elements, index, value, flagTemp, outTemp);
    883  define(lir, ins);
    884 }
    885 
    886 void LIRGenerator::visitCompareExchangeTypedArrayElement(
    887    MCompareExchangeTypedArrayElement* ins) {
    888  MOZ_ASSERT(!Scalar::isFloatingType(ins->arrayType()));
    889  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    890  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    891 
    892  const LUse elements = useRegister(ins->elements());
    893  const LAllocation index =
    894      useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    895 
    896  if (Scalar::isBigIntType(ins->arrayType())) {
    897    // The three register pairs must be distinct.
    898    LInt64Allocation oldval = useInt64Fixed(ins->oldval(), CmpXchgOld64);
    899    LInt64Allocation newval = useInt64Fixed(ins->newval(), CmpXchgNew64);
    900 
    901    auto* lir = new (alloc())
    902        LCompareExchangeTypedArrayElement64(elements, index, oldval, newval);
    903    defineInt64Fixed(lir, ins,
    904                     LInt64Allocation(LAllocation(AnyRegister(CmpXchgOutHi)),
    905                                      LAllocation(AnyRegister(CmpXchgOutLo))));
    906    return;
    907  }
    908 
    909  const LAllocation oldval = useRegister(ins->oldval());
    910  const LAllocation newval = useRegister(ins->newval());
    911 
    912  // If the target is a floating register then we need a temp at the
    913  // CodeGenerator level for creating the result.
    914  //
    915  // Optimization opportunity (bug 1077317): We could do better by
    916  // allowing oldval to remain an immediate, if it is small enough
    917  // to fit in an instruction.
    918 
    919  LDefinition tempDef = LDefinition::BogusTemp();
    920  if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
    921    tempDef = temp();
    922  }
    923 
    924  LCompareExchangeTypedArrayElement* lir =
    925      new (alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval,
    926                                                      newval, tempDef);
    927 
    928  define(lir, ins);
    929 }
    930 
    931 void LIRGeneratorARM::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
    932  const LUse elements = useRegister(ins->elements());
    933  const LAllocation index =
    934      useRegisterOrIndexConstant(ins->index(), ins->storageType());
    935 
    936  auto* lir = new (alloc()) LAtomicLoad64(elements, index);
    937  defineInt64Fixed(lir, ins,
    938                   LInt64Allocation(LAllocation(AnyRegister(IntArgReg1)),
    939                                    LAllocation(AnyRegister(IntArgReg0))));
    940 }
    941 
    942 void LIRGeneratorARM::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
    943  LUse elements = useRegister(ins->elements());
    944  LAllocation index =
    945      useRegisterOrIndexConstant(ins->index(), ins->writeType());
    946  LInt64Allocation value =
    947      useInt64Fixed(ins->value(), Register64(IntArgReg1, IntArgReg0));
    948  LInt64Definition temp = tempInt64Fixed(Register64(IntArgReg3, IntArgReg2));
    949 
    950  add(new (alloc()) LAtomicStore64(elements, index, value, temp), ins);
    951 }
    952 
    953 void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
    954  MDefinition* base = ins->base();
    955  MOZ_ASSERT(base->type() == MIRType::Int32);
    956 
    957  const LAllocation memoryBase =
    958      ins->hasMemoryBase() ? LAllocation(useRegister(ins->memoryBase()))
    959                           : LGeneralReg(HeapReg);
    960 
    961  if (ins->access().type() == Scalar::Int64) {
    962    // The three register pairs must be distinct.
    963    auto* lir = new (alloc()) LWasmCompareExchangeI64(
    964        useRegister(base), useInt64Fixed(ins->oldValue(), CmpXchgOld64),
    965        useInt64Fixed(ins->newValue(), CmpXchgNew64), memoryBase);
    966    defineInt64Fixed(lir, ins,
    967                     LInt64Allocation(LAllocation(AnyRegister(CmpXchgOutHi)),
    968                                      LAllocation(AnyRegister(CmpXchgOutLo))));
    969    return;
    970  }
    971 
    972  MOZ_ASSERT(ins->access().type() < Scalar::Float32);
    973  MOZ_ASSERT(ARMFlags::HasLDSTREXBHD(), "by HasPlatformSupport() constraints");
    974 
    975  LWasmCompareExchangeHeap* lir = new (alloc())
    976      LWasmCompareExchangeHeap(useRegister(base), useRegister(ins->oldValue()),
    977                               useRegister(ins->newValue()), memoryBase);
    978 
    979  define(lir, ins);
    980 }
    981 
    982 void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
    983  MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
    984 
    985  const LAllocation memoryBase =
    986      ins->hasMemoryBase() ? LAllocation(useRegister(ins->memoryBase()))
    987                           : LGeneralReg(HeapReg);
    988 
    989  if (ins->access().type() == Scalar::Int64) {
    990    auto* lir = new (alloc()) LWasmAtomicExchangeI64(
    991        useRegister(ins->base()), useInt64Fixed(ins->value(), XchgNew64),
    992        memoryBase, ins->access());
    993    defineInt64Fixed(lir, ins,
    994                     LInt64Allocation(LAllocation(AnyRegister(XchgOutHi)),
    995                                      LAllocation(AnyRegister(XchgOutLo))));
    996    return;
    997  }
    998 
    999  MOZ_ASSERT(ins->access().type() < Scalar::Float32);
   1000  MOZ_ASSERT(ARMFlags::HasLDSTREXBHD(), "by HasPlatformSupport() constraints");
   1001 
   1002  const LAllocation base = useRegister(ins->base());
   1003  const LAllocation value = useRegister(ins->value());
   1004  define(new (alloc()) LWasmAtomicExchangeHeap(base, value, memoryBase), ins);
   1005 }
   1006 
   1007 void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
   1008  const LAllocation memoryBase =
   1009      ins->hasMemoryBase() ? LAllocation(useRegister(ins->memoryBase()))
   1010                           : LGeneralReg(HeapReg);
   1011 
   1012  if (ins->access().type() == Scalar::Int64) {
   1013    auto* lir = new (alloc()) LWasmAtomicBinopI64(
   1014        useRegister(ins->base()), useInt64Fixed(ins->value(), FetchOpVal64),
   1015        memoryBase, tempInt64Fixed(Register64(FetchOpTmpHi, FetchOpTmpLo)),
   1016        ins->access(), ins->operation());
   1017    defineInt64Fixed(lir, ins,
   1018                     LInt64Allocation(LAllocation(AnyRegister(FetchOpOutHi)),
   1019                                      LAllocation(AnyRegister(FetchOpOutLo))));
   1020    return;
   1021  }
   1022 
   1023  MOZ_ASSERT(ins->access().type() < Scalar::Float32);
   1024  MOZ_ASSERT(ARMFlags::HasLDSTREXBHD(), "by HasPlatformSupport() constraints");
   1025 
   1026  MDefinition* base = ins->base();
   1027  MOZ_ASSERT(base->type() == MIRType::Int32);
   1028 
   1029  if (!ins->hasUses()) {
   1030    auto* lir = new (alloc()) LWasmAtomicBinopHeapForEffect(
   1031        useRegister(base), useRegister(ins->value()), memoryBase, temp());
   1032    add(lir, ins);
   1033    return;
   1034  }
   1035 
   1036  auto* lir = new (alloc()) LWasmAtomicBinopHeap(
   1037      useRegister(base), useRegister(ins->value()), memoryBase, temp());
   1038  define(lir, ins);
   1039 }
   1040 
   1041 void LIRGenerator::visitSubstr(MSubstr* ins) {
   1042  LSubstr* lir = new (alloc())
   1043      LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
   1044              useRegister(ins->length()), temp(), temp(), tempByteOpRegister());
   1045  define(lir, ins);
   1046  assignSafepoint(lir, ins);
   1047 }
   1048 
   1049 void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
   1050  MOZ_CRASH("We don't use MWasmTruncateToInt64 for arm");
   1051 }
   1052 
   1053 void LIRGeneratorARM::lowerWasmBuiltinTruncateToInt64(
   1054    MWasmBuiltinTruncateToInt64* ins) {
   1055  MDefinition* opd = ins->input();
   1056  MDefinition* instance = ins->instance();
   1057  MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
   1058 
   1059  defineReturn(new (alloc())
   1060                   LWasmTruncateToInt64(useRegisterAtStart(opd),
   1061                                        useFixedAtStart(instance, InstanceReg)),
   1062               ins);
   1063 }
   1064 
   1065 void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
   1066  MOZ_CRASH("We use BuiltinInt64ToFloatingPoint instead.");
   1067 }
   1068 
   1069 void LIRGeneratorARM::lowerBuiltinInt64ToFloatingPoint(
   1070    MBuiltinInt64ToFloatingPoint* ins) {
   1071  MOZ_ASSERT(ins->type() == MIRType::Double || ins->type() == MIRType::Float32);
   1072 
   1073  auto* lir = new (alloc())
   1074      LInt64ToFloatingPointCall(useInt64RegisterAtStart(ins->input()),
   1075                                useFixedAtStart(ins->instance(), InstanceReg));
   1076  defineReturn(lir, ins);
   1077 }
   1078 
   1079 void LIRGenerator::visitCopySign(MCopySign* ins) {
   1080  MDefinition* lhs = ins->lhs();
   1081  MDefinition* rhs = ins->rhs();
   1082 
   1083  MOZ_ASSERT(IsFloatingPointType(lhs->type()));
   1084  MOZ_ASSERT(lhs->type() == rhs->type());
   1085  MOZ_ASSERT(lhs->type() == ins->type());
   1086 
   1087  LInstructionHelper<1, 2, 0>* lir;
   1088  if (lhs->type() == MIRType::Double) {
   1089    lir = new (alloc()) LCopySignD();
   1090  } else {
   1091    lir = new (alloc()) LCopySignF();
   1092  }
   1093 
   1094  lowerForFPU(lir, ins, lhs, rhs);
   1095 }
   1096 
   1097 void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
   1098  auto* lir =
   1099      new (alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input()));
   1100  defineInt64(lir, ins);
   1101 
   1102  LDefinition def(LDefinition::GENERAL, LDefinition::MUST_REUSE_INPUT);
   1103  def.setReusedInput(0);
   1104  def.setVirtualRegister(ins->virtualRegister());
   1105 
   1106  lir->setDef(0, def);
   1107 }
   1108 
   1109 void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
   1110  defineInt64(new (alloc())
   1111                  LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
   1112              ins);
   1113 }
   1114 
   1115 // On arm we specialize the only cases where compare is {U,}Int32 and select
   1116 // is {U,}Int32.
   1117 bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
   1118    MCompare::CompareType compTy, MIRType insTy) {
   1119  return insTy == MIRType::Int32 && (compTy == MCompare::Compare_Int32 ||
   1120                                     compTy == MCompare::Compare_UInt32);
   1121 }
   1122 
   1123 void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
   1124                                                   MDefinition* lhs,
   1125                                                   MDefinition* rhs,
   1126                                                   MCompare::CompareType compTy,
   1127                                                   JSOp jsop) {
   1128  MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
   1129  auto* lir = new (alloc()) LWasmCompareAndSelect(
   1130      useRegister(lhs), useRegister(rhs), useRegisterAtStart(ins->trueExpr()),
   1131      useRegister(ins->falseExpr()), compTy, jsop);
   1132  defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
   1133 }
   1134 
   1135 void LIRGenerator::visitWasmTernarySimd128(MWasmTernarySimd128* ins) {
   1136  MOZ_CRASH("ternary SIMD NYI");
   1137 }
   1138 
   1139 void LIRGenerator::visitWasmBinarySimd128(MWasmBinarySimd128* ins) {
   1140  MOZ_CRASH("binary SIMD NYI");
   1141 }
   1142 
   1143 #ifdef ENABLE_WASM_SIMD
   1144 bool MWasmTernarySimd128::specializeBitselectConstantMaskAsShuffle(
   1145    int8_t shuffle[16]) {
   1146  return false;
   1147 }
   1148 bool MWasmTernarySimd128::canRelaxBitselect() { return false; }
   1149 
   1150 bool MWasmBinarySimd128::canPmaddubsw() { return false; }
   1151 #endif
   1152 
   1153 bool MWasmBinarySimd128::specializeForConstantRhs() {
   1154  // Probably many we want to do here
   1155  return false;
   1156 }
   1157 
   1158 void LIRGenerator::visitWasmBinarySimd128WithConstant(
   1159    MWasmBinarySimd128WithConstant* ins) {
   1160  MOZ_CRASH("binary SIMD with constant NYI");
   1161 }
   1162 
   1163 void LIRGenerator::visitWasmShiftSimd128(MWasmShiftSimd128* ins) {
   1164  MOZ_CRASH("shift SIMD NYI");
   1165 }
   1166 
   1167 void LIRGenerator::visitWasmShuffleSimd128(MWasmShuffleSimd128* ins) {
   1168  MOZ_CRASH("shuffle SIMD NYI");
   1169 }
   1170 
   1171 void LIRGenerator::visitWasmReplaceLaneSimd128(MWasmReplaceLaneSimd128* ins) {
   1172  MOZ_CRASH("replace-lane SIMD NYI");
   1173 }
   1174 
   1175 void LIRGenerator::visitWasmScalarToSimd128(MWasmScalarToSimd128* ins) {
   1176  MOZ_CRASH("scalar-to-SIMD NYI");
   1177 }
   1178 
   1179 void LIRGenerator::visitWasmUnarySimd128(MWasmUnarySimd128* ins) {
   1180  MOZ_CRASH("unary SIMD NYI");
   1181 }
   1182 
   1183 void LIRGenerator::visitWasmReduceSimd128(MWasmReduceSimd128* ins) {
   1184  MOZ_CRASH("reduce-SIMD NYI");
   1185 }
   1186 
   1187 void LIRGenerator::visitWasmLoadLaneSimd128(MWasmLoadLaneSimd128* ins) {
   1188  MOZ_CRASH("load-lane SIMD NYI");
   1189 }
   1190 
   1191 void LIRGenerator::visitWasmStoreLaneSimd128(MWasmStoreLaneSimd128* ins) {
   1192  MOZ_CRASH("store-lane SIMD NYI");
   1193 }