tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Lowering-riscv64.cpp (34003B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/riscv64/Lowering-riscv64.h"
      8 
      9 #include "mozilla/MathAlgorithms.h"
     10 
     11 #include "jit/Lowering.h"
     12 #include "jit/MIR-wasm.h"
     13 #include "jit/MIR.h"
     14 #include "jit/riscv64/Assembler-riscv64.h"
     15 
     16 #include "jit/shared/Lowering-shared-inl.h"
     17 
     18 using namespace js;
     19 using namespace js::jit;
     20 
     21 LTableSwitch* LIRGeneratorRiscv64::newLTableSwitch(
     22    const LAllocation& in, const LDefinition& inputCopy) {
     23  return new (alloc()) LTableSwitch(in, inputCopy, temp());
     24 }
     25 
     26 LTableSwitchV* LIRGeneratorRiscv64::newLTableSwitchV(const LBoxAllocation& in) {
     27  return new (alloc()) LTableSwitchV(in, temp(), tempDouble(), temp());
     28 }
     29 
     30 void LIRGeneratorRiscv64::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
     31                                        MDefinition* mir, MDefinition* lhs,
     32                                        MDefinition* rhs) {
     33  lowerForALU(ins, mir, lhs, rhs);
     34 }
     35 
     36 template <class LInstr>
     37 void LIRGeneratorRiscv64::lowerForShiftInt64(LInstr* ins, MDefinition* mir,
     38                                             MDefinition* lhs,
     39                                             MDefinition* rhs) {
     40  if constexpr (std::is_same_v<LInstr, LShiftI64>) {
     41    ins->setLhs(useInt64RegisterAtStart(lhs));
     42    ins->setRhs(useRegisterOrConstantAtStart(rhs));
     43  } else {
     44    ins->setInput(useInt64RegisterAtStart(lhs));
     45    ins->setCount(useRegisterOrConstantAtStart(rhs));
     46  }
     47  defineInt64(ins, mir);
     48 }
     49 
     50 template void LIRGeneratorRiscv64::lowerForShiftInt64(LShiftI64* ins,
     51                                                      MDefinition* mir,
     52                                                      MDefinition* lhs,
     53                                                      MDefinition* rhs);
     54 template void LIRGeneratorRiscv64::lowerForShiftInt64(LRotateI64* ins,
     55                                                      MDefinition* mir,
     56                                                      MDefinition* lhs,
     57                                                      MDefinition* rhs);
     58 
     59 // x = !y
     60 void LIRGeneratorRiscv64::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
     61                                      MDefinition* mir, MDefinition* input) {
     62  // Unary ALU operations don't read the input after writing to the output, even
     63  // for fallible operations, so we can use at-start allocations.
     64  ins->setOperand(0, useRegisterAtStart(input));
     65  define(ins, mir);
     66 }
     67 
     68 // z = x + y
     69 void LIRGeneratorRiscv64::lowerForALU(LInstructionHelper<1, 2, 0>* ins,
     70                                      MDefinition* mir, MDefinition* lhs,
     71                                      MDefinition* rhs) {
     72  // Binary ALU operations don't read any input after writing to the output,
     73  // even for fallible operations, so we can use at-start allocations.
     74  ins->setOperand(0, useRegisterAtStart(lhs));
     75  ins->setOperand(1, useRegisterOrConstantAtStart(rhs));
     76  define(ins, mir);
     77 }
     78 
     79 void LIRGeneratorRiscv64::lowerForALUInt64(
     80    LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
     81    MDefinition* input) {
     82  ins->setInt64Operand(0, useInt64RegisterAtStart(input));
     83  defineInt64(ins, mir);
     84 }
     85 
     86 void LIRGeneratorRiscv64::lowerForALUInt64(
     87    LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
     88    MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
     89  ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
     90  ins->setInt64Operand(INT64_PIECES, useInt64RegisterOrConstantAtStart(rhs));
     91  defineInt64(ins, mir);
     92 }
     93 
     94 void LIRGeneratorRiscv64::lowerForMulInt64(LMulI64* ins, MMul* mir,
     95                                           MDefinition* lhs, MDefinition* rhs) {
     96  lowerForALUInt64(ins, mir, lhs, rhs);
     97 }
     98 
     99 void LIRGeneratorRiscv64::lowerForFPU(LInstructionHelper<1, 1, 0>* ins,
    100                                      MDefinition* mir, MDefinition* input) {
    101  ins->setOperand(0, useRegisterAtStart(input));
    102  define(ins, mir);
    103 }
    104 
    105 void LIRGeneratorRiscv64::lowerForFPU(LInstructionHelper<1, 2, 0>* ins,
    106                                      MDefinition* mir, MDefinition* lhs,
    107                                      MDefinition* rhs) {
    108  ins->setOperand(0, useRegisterAtStart(lhs));
    109  ins->setOperand(1, useRegisterAtStart(rhs));
    110  define(ins, mir);
    111 }
    112 
    113 LBoxAllocation LIRGeneratorRiscv64::useBoxFixed(MDefinition* mir, Register reg1,
    114                                                Register reg2,
    115                                                bool useAtStart) {
    116  MOZ_ASSERT(mir->type() == MIRType::Value);
    117 
    118  ensureDefined(mir);
    119  return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
    120 }
    121 
    122 LAllocation LIRGeneratorRiscv64::useByteOpRegister(MDefinition* mir) {
    123  return useRegister(mir);
    124 }
    125 
    126 LAllocation LIRGeneratorRiscv64::useByteOpRegisterAtStart(MDefinition* mir) {
    127  return useRegisterAtStart(mir);
    128 }
    129 
    130 LAllocation LIRGeneratorRiscv64::useByteOpRegisterOrNonDoubleConstant(
    131    MDefinition* mir) {
    132  return useRegisterOrNonDoubleConstant(mir);
    133 }
    134 
    135 LDefinition LIRGeneratorRiscv64::tempByteOpRegister() { return temp(); }
    136 LDefinition LIRGeneratorRiscv64::tempToUnbox() { return temp(); }
    137 
    138 void LIRGeneratorRiscv64::lowerUntypedPhiInput(MPhi* phi,
    139                                               uint32_t inputPosition,
    140                                               LBlock* block, size_t lirIndex) {
    141  lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
    142 }
    143 void LIRGeneratorRiscv64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
    144                                             LBlock* block, size_t lirIndex) {
    145  lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
    146 }
    147 void LIRGeneratorRiscv64::defineInt64Phi(MPhi* phi, size_t lirIndex) {
    148  defineTypedPhi(phi, lirIndex);
    149 }
    150 
    151 void LIRGeneratorRiscv64::lowerMulI(MMul* mul, MDefinition* lhs,
    152                                    MDefinition* rhs) {
    153  LMulI* lir = new (alloc()) LMulI;
    154  if (mul->fallible()) {
    155    assignSnapshot(lir, mul->bailoutKind());
    156  }
    157 
    158  // Negative zero check reads |lhs| and |rhs| after writing to the output, so
    159  // we can't use at-start allocations.
    160  if (mul->canBeNegativeZero() && !rhs->isConstant()) {
    161    lir->setOperand(0, useRegister(lhs));
    162    lir->setOperand(1, useRegister(rhs));
    163    define(lir, mul);
    164    return;
    165  }
    166 
    167  lowerForALU(lir, mul, lhs, rhs);
    168 }
    169 
    170 void LIRGeneratorRiscv64::lowerDivI(MDiv* div) {
    171  // Division instructions are slow. Division by constant denominators can be
    172  // rewritten to use other instructions.
    173  if (div->rhs()->isConstant()) {
    174    int32_t rhs = div->rhs()->toConstant()->toInt32();
    175    // Check for division by a positive power of two, which is an easy and
    176    // important case to optimize. Note that other optimizations are also
    177    // possible; division by negative powers of two can be optimized in a
    178    // similar manner as positive powers of two, and division by other
    179    // constants can be optimized by a reciprocal multiplication technique.
    180    if (rhs > 0 && mozilla::IsPowerOfTwo(mozilla::Abs(rhs))) {
    181      int32_t shift = mozilla::FloorLog2(rhs);
    182      auto* lir =
    183          new (alloc()) LDivPowTwoI(useRegisterAtStart(div->lhs()), shift);
    184      if (div->fallible()) {
    185        assignSnapshot(lir, div->bailoutKind());
    186      }
    187      define(lir, div);
    188      return;
    189    }
    190  }
    191 
    192  LAllocation lhs, rhs;
    193  if (!div->canTruncateRemainder()) {
    194    lhs = useRegister(div->lhs());
    195    rhs = useRegister(div->rhs());
    196  } else {
    197    lhs = useRegisterAtStart(div->lhs());
    198    rhs = useRegisterAtStart(div->rhs());
    199  }
    200 
    201  // RISCV64 has plenty of scratch registers, so we don't need to request an
    202  // additonal temp register from the register allocator.
    203  auto* lir = new (alloc()) LDivI(lhs, rhs, LDefinition::BogusTemp());
    204  if (div->fallible()) {
    205    assignSnapshot(lir, div->bailoutKind());
    206  }
    207  define(lir, div);
    208 }
    209 
    210 void LIRGeneratorRiscv64::lowerDivI64(MDiv* div) {
    211  auto* lir = new (alloc())
    212      LDivI64(useRegisterAtStart(div->lhs()), useRegisterAtStart(div->rhs()));
    213  defineInt64(lir, div);
    214 }
    215 
    216 void LIRGeneratorRiscv64::lowerModI(MMod* mod) {
    217  if (mod->rhs()->isConstant()) {
    218    int32_t rhs = mod->rhs()->toConstant()->toInt32();
    219    int32_t shift = mozilla::FloorLog2(rhs);
    220    if (rhs > 0 && 1 << shift == rhs) {
    221      LModPowTwoI* lir =
    222          new (alloc()) LModPowTwoI(useRegisterAtStart(mod->lhs()), shift);
    223      if (mod->fallible()) {
    224        assignSnapshot(lir, mod->bailoutKind());
    225      }
    226      define(lir, mod);
    227      return;
    228    }
    229    if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
    230      LModMaskI* lir = new (alloc())
    231          LModMaskI(useRegister(mod->lhs()), temp(), temp(), shift + 1);
    232      if (mod->fallible()) {
    233        assignSnapshot(lir, mod->bailoutKind());
    234      }
    235      define(lir, mod);
    236      return;
    237    }
    238  }
    239 
    240  LAllocation lhs, rhs;
    241  if (mod->canBeNegativeDividend() && !mod->isTruncated()) {
    242    lhs = useRegister(mod->lhs());
    243    rhs = useRegister(mod->rhs());
    244  } else {
    245    lhs = useRegisterAtStart(mod->lhs());
    246    rhs = useRegisterAtStart(mod->rhs());
    247  }
    248 
    249  auto* lir = new (alloc()) LModI(lhs, rhs);
    250  if (mod->fallible()) {
    251    assignSnapshot(lir, mod->bailoutKind());
    252  }
    253  define(lir, mod);
    254 }
    255 
    256 void LIRGeneratorRiscv64::lowerModI64(MMod* mod) {
    257  auto* lir = new (alloc())
    258      LModI64(useRegisterAtStart(mod->lhs()), useRegisterAtStart(mod->rhs()));
    259  defineInt64(lir, mod);
    260 }
    261 
    262 void LIRGeneratorRiscv64::lowerUDiv(MDiv* div) {
    263  LAllocation lhs, rhs;
    264  if (!div->canTruncateRemainder()) {
    265    lhs = useRegister(div->lhs());
    266    rhs = useRegister(div->rhs());
    267  } else {
    268    lhs = useRegisterAtStart(div->lhs());
    269    rhs = useRegisterAtStart(div->rhs());
    270  }
    271 
    272  auto* lir = new (alloc()) LUDiv(lhs, rhs);
    273  if (div->fallible()) {
    274    assignSnapshot(lir, div->bailoutKind());
    275  }
    276  define(lir, div);
    277 }
    278 
    279 void LIRGeneratorRiscv64::lowerUDivI64(MDiv* div) {
    280  auto* lir = new (alloc())
    281      LUDivI64(useRegisterAtStart(div->lhs()), useRegisterAtStart(div->rhs()));
    282  defineInt64(lir, div);
    283 }
    284 
    285 void LIRGeneratorRiscv64::lowerUMod(MMod* mod) {
    286  auto* lir = new (alloc())
    287      LUMod(useRegisterAtStart(mod->lhs()), useRegisterAtStart(mod->rhs()));
    288  if (mod->fallible()) {
    289    assignSnapshot(lir, mod->bailoutKind());
    290  }
    291  define(lir, mod);
    292 }
    293 
    294 void LIRGeneratorRiscv64::lowerUModI64(MMod* mod) {
    295  auto* lir = new (alloc())
    296      LUModI64(useRegisterAtStart(mod->lhs()), useRegisterAtStart(mod->rhs()));
    297  defineInt64(lir, mod);
    298 }
    299 
    300 void LIRGeneratorRiscv64::lowerUrshD(MUrsh* mir) {
    301  MDefinition* lhs = mir->lhs();
    302  MDefinition* rhs = mir->rhs();
    303 
    304  MOZ_ASSERT(lhs->type() == MIRType::Int32);
    305  MOZ_ASSERT(rhs->type() == MIRType::Int32);
    306 
    307  auto* lir = new (alloc()) LUrshD(useRegisterAtStart(lhs),
    308                                   useRegisterOrConstantAtStart(rhs), temp());
    309  define(lir, mir);
    310 }
    311 
    312 void LIRGeneratorRiscv64::lowerPowOfTwoI(MPow* mir) {
    313  int32_t base = mir->input()->toConstant()->toInt32();
    314  MDefinition* power = mir->power();
    315 
    316  auto* lir = new (alloc()) LPowOfTwoI(useRegister(power), base);
    317  assignSnapshot(lir, mir->bailoutKind());
    318  define(lir, mir);
    319 }
    320 
    321 void LIRGeneratorRiscv64::lowerTruncateDToInt32(MTruncateToInt32* ins) {
    322  MDefinition* opd = ins->input();
    323  MOZ_ASSERT(opd->type() == MIRType::Double);
    324 
    325  define(new (alloc()) LTruncateDToInt32(useRegister(opd), tempDouble()), ins);
    326 }
    327 
    328 void LIRGeneratorRiscv64::lowerTruncateFToInt32(MTruncateToInt32* ins) {
    329  MDefinition* opd = ins->input();
    330  MOZ_ASSERT(opd->type() == MIRType::Float32);
    331 
    332  define(new (alloc()) LTruncateFToInt32(useRegister(opd), tempFloat32()), ins);
    333 }
    334 
    335 void LIRGeneratorRiscv64::lowerBuiltinInt64ToFloatingPoint(
    336    MBuiltinInt64ToFloatingPoint* ins) {
    337  MOZ_CRASH("We don't use it for this architecture");
    338 }
    339 
    340 void LIRGeneratorRiscv64::lowerWasmSelectI(MWasmSelect* select) {
    341  auto* lir = new (alloc())
    342      LWasmSelect(useRegisterAtStart(select->trueExpr()),
    343                  useAny(select->falseExpr()), useRegister(select->condExpr()));
    344  defineReuseInput(lir, select, LWasmSelect::TrueExprIndex);
    345 }
    346 
    347 void LIRGeneratorRiscv64::lowerWasmSelectI64(MWasmSelect* select) {
    348  auto* lir = new (alloc()) LWasmSelectI64(
    349      useInt64RegisterAtStart(select->trueExpr()),
    350      useInt64(select->falseExpr()), useRegister(select->condExpr()));
    351  defineInt64ReuseInput(lir, select, LWasmSelectI64::TrueExprIndex);
    352 }
    353 
    354 // On riscv we specialize the only cases where compare is {U,}Int32 and select
    355 // is {U,}Int32.
    356 bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
    357    MCompare::CompareType compTy, MIRType insTy) {
    358  return insTy == MIRType::Int32 && (compTy == MCompare::Compare_Int32 ||
    359                                     compTy == MCompare::Compare_UInt32);
    360 }
    361 
    362 void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
    363                                                   MDefinition* lhs,
    364                                                   MDefinition* rhs,
    365                                                   MCompare::CompareType compTy,
    366                                                   JSOp jsop) {
    367  MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
    368  auto* lir = new (alloc()) LWasmCompareAndSelect(
    369      useRegister(lhs), useRegister(rhs), useRegisterAtStart(ins->trueExpr()),
    370      useRegister(ins->falseExpr()), compTy, jsop);
    371  defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
    372 }
    373 
    374 void LIRGeneratorRiscv64::lowerWasmBuiltinTruncateToInt32(
    375    MWasmBuiltinTruncateToInt32* ins) {
    376  MDefinition* opd = ins->input();
    377  MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
    378 
    379  if (opd->type() == MIRType::Double) {
    380    define(new (alloc()) LWasmBuiltinTruncateDToInt32(
    381               useRegister(opd), useFixed(ins->instance(), InstanceReg),
    382               LDefinition::BogusTemp()),
    383           ins);
    384    return;
    385  }
    386 
    387  define(new (alloc()) LWasmBuiltinTruncateFToInt32(
    388             useRegister(opd), LAllocation(), LDefinition::BogusTemp()),
    389         ins);
    390 }
    391 
    392 void LIRGeneratorRiscv64::lowerWasmBuiltinTruncateToInt64(
    393    MWasmBuiltinTruncateToInt64* ins) {
    394  MOZ_CRASH("We don't use it for this architecture");
    395 }
    396 
    397 void LIRGeneratorRiscv64::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
    398  MOZ_CRASH("We don't use runtime div for this architecture");
    399 }
    400 
    401 void LIRGeneratorRiscv64::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
    402  MOZ_CRASH("We don't use runtime mod for this architecture");
    403 }
    404 
    405 void LIRGeneratorRiscv64::lowerBigIntPtrLsh(MBigIntPtrLsh* ins) {
    406  auto* lir = new (alloc()) LBigIntPtrLsh(
    407      useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp());
    408  assignSnapshot(lir, ins->bailoutKind());
    409  define(lir, ins);
    410 }
    411 
    412 void LIRGeneratorRiscv64::lowerBigIntPtrRsh(MBigIntPtrRsh* ins) {
    413  auto* lir = new (alloc()) LBigIntPtrRsh(
    414      useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp());
    415  assignSnapshot(lir, ins->bailoutKind());
    416  define(lir, ins);
    417 }
    418 
    419 void LIRGeneratorRiscv64::lowerBigIntPtrDiv(MBigIntPtrDiv* ins) {
    420  auto* lir = new (alloc())
    421      LBigIntPtrDiv(useRegister(ins->lhs()), useRegister(ins->rhs()),
    422                    LDefinition::BogusTemp(), LDefinition::BogusTemp());
    423  assignSnapshot(lir, ins->bailoutKind());
    424  define(lir, ins);
    425 }
    426 
    427 void LIRGeneratorRiscv64::lowerBigIntPtrMod(MBigIntPtrMod* ins) {
    428  auto* lir = new (alloc())
    429      LBigIntPtrMod(useRegister(ins->lhs()), useRegister(ins->rhs()), temp(),
    430                    LDefinition::BogusTemp());
    431  if (ins->canBeDivideByZero()) {
    432    assignSnapshot(lir, ins->bailoutKind());
    433  }
    434  define(lir, ins);
    435 }
    436 
    437 void LIRGeneratorRiscv64::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
    438  const LUse elements = useRegister(ins->elements());
    439  const LAllocation index =
    440      useRegisterOrIndexConstant(ins->index(), ins->storageType());
    441 
    442  auto* lir = new (alloc()) LAtomicLoad64(elements, index);
    443  defineInt64(lir, ins);
    444 }
    445 
    446 void LIRGeneratorRiscv64::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
    447  LUse elements = useRegister(ins->elements());
    448  LAllocation index =
    449      useRegisterOrIndexConstant(ins->index(), ins->writeType());
    450  LInt64Allocation value = useInt64Register(ins->value());
    451 
    452  add(new (alloc()) LAtomicStore64(elements, index, value), ins);
    453 }
    454 
    455 void LIRGenerator::visitBox(MBox* box) {
    456  MDefinition* opd = box->getOperand(0);
    457 
    458  // If the operand is a constant, emit near its uses.
    459  if (opd->isConstant() && box->canEmitAtUses()) {
    460    emitAtUses(box);
    461    return;
    462  }
    463 
    464  if (opd->isConstant()) {
    465    define(new (alloc()) LValue(opd->toConstant()->toJSValue()), box,
    466           LDefinition(LDefinition::BOX));
    467  } else {
    468    LBox* ins = new (alloc()) LBox(useRegisterAtStart(opd), opd->type());
    469    define(ins, box, LDefinition(LDefinition::BOX));
    470  }
    471 }
    472 
    473 void LIRGenerator::visitUnbox(MUnbox* unbox) {
    474  MDefinition* box = unbox->getOperand(0);
    475  MOZ_ASSERT(box->type() == MIRType::Value);
    476 
    477  LInstructionHelper<1, BOX_PIECES, 0>* lir;
    478  if (IsFloatingPointType(unbox->type())) {
    479    MOZ_ASSERT(unbox->type() == MIRType::Double);
    480    lir = new (alloc()) LUnboxFloatingPoint(useBoxAtStart(box));
    481  } else if (unbox->fallible()) {
    482    // If the unbox is fallible, load the Value in a register first to
    483    // avoid multiple loads.
    484    lir = new (alloc()) LUnbox(useRegisterAtStart(box));
    485  } else {
    486    lir = new (alloc()) LUnbox(useAtStart(box));
    487  }
    488 
    489  if (unbox->fallible()) {
    490    assignSnapshot(lir, unbox->bailoutKind());
    491  }
    492 
    493  define(lir, unbox);
    494 }
    495 
    496 void LIRGenerator::visitCopySign(MCopySign* ins) {
    497  MDefinition* lhs = ins->lhs();
    498  MDefinition* rhs = ins->rhs();
    499 
    500  MOZ_ASSERT(IsFloatingPointType(lhs->type()));
    501  MOZ_ASSERT(lhs->type() == rhs->type());
    502  MOZ_ASSERT(lhs->type() == ins->type());
    503 
    504  LInstructionHelper<1, 2, 0>* lir;
    505  if (lhs->type() == MIRType::Double) {
    506    lir = new (alloc()) LCopySignD();
    507  } else {
    508    lir = new (alloc()) LCopySignF();
    509  }
    510 
    511  lowerForFPU(lir, ins, lhs, rhs);
    512 }
    513 
    514 void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
    515  defineInt64(
    516      new (alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input())), ins);
    517 }
    518 
    519 void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
    520  defineInt64(new (alloc())
    521                  LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
    522              ins);
    523 }
    524 
    525 void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
    526  MDefinition* opd = ins->input();
    527  MOZ_ASSERT(opd->type() == MIRType::Int64);
    528  MOZ_ASSERT(IsFloatingPointType(ins->type()));
    529 
    530  define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd)), ins);
    531 }
    532 
    533 void LIRGenerator::visitSubstr(MSubstr* ins) {
    534  LSubstr* lir = new (alloc())
    535      LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
    536              useRegister(ins->length()), temp(), temp(), temp());
    537  define(lir, ins);
    538  assignSafepoint(lir, ins);
    539 }
    540 
    541 void LIRGenerator::visitCompareExchangeTypedArrayElement(
    542    MCompareExchangeTypedArrayElement* ins) {
    543  MOZ_ASSERT(!Scalar::isFloatingType(ins->arrayType()));
    544  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    545  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    546 
    547  const LUse elements = useRegister(ins->elements());
    548  const LAllocation index =
    549      useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    550 
    551  if (Scalar::isBigIntType(ins->arrayType())) {
    552    LInt64Allocation oldval = useInt64Register(ins->oldval());
    553    LInt64Allocation newval = useInt64Register(ins->newval());
    554 
    555    auto* lir = new (alloc())
    556        LCompareExchangeTypedArrayElement64(elements, index, oldval, newval);
    557    defineInt64(lir, ins);
    558    return;
    559  }
    560 
    561  const LAllocation oldval = useRegister(ins->oldval());
    562  const LAllocation newval = useRegister(ins->newval());
    563 
    564  // If the target is a floating register then we need a temp at the
    565  // CodeGenerator level for creating the result.
    566 
    567  LDefinition outTemp = LDefinition::BogusTemp();
    568  LDefinition valueTemp = LDefinition::BogusTemp();
    569  LDefinition offsetTemp = LDefinition::BogusTemp();
    570  LDefinition maskTemp = LDefinition::BogusTemp();
    571 
    572  if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
    573    outTemp = temp();
    574  }
    575 
    576  if (Scalar::byteSize(ins->arrayType()) < 4) {
    577    valueTemp = temp();
    578    offsetTemp = temp();
    579    maskTemp = temp();
    580  }
    581 
    582  LCompareExchangeTypedArrayElement* lir = new (alloc())
    583      LCompareExchangeTypedArrayElement(elements, index, oldval, newval,
    584                                        outTemp, valueTemp, offsetTemp,
    585                                        maskTemp);
    586 
    587  define(lir, ins);
    588 }
    589 
    590 void LIRGenerator::visitAtomicExchangeTypedArrayElement(
    591    MAtomicExchangeTypedArrayElement* ins) {
    592  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    593  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    594 
    595  const LUse elements = useRegister(ins->elements());
    596  const LAllocation index =
    597      useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    598 
    599  if (Scalar::isBigIntType(ins->arrayType())) {
    600    LInt64Allocation value = useInt64Register(ins->value());
    601 
    602    auto* lir = new (alloc())
    603        LAtomicExchangeTypedArrayElement64(elements, index, value);
    604    defineInt64(lir, ins);
    605    return;
    606  }
    607 
    608  // If the target is a floating register then we need a temp at the
    609  // CodeGenerator level for creating the result.
    610 
    611  MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
    612 
    613  const LAllocation value = useRegister(ins->value());
    614 
    615  LDefinition outTemp = LDefinition::BogusTemp();
    616  LDefinition valueTemp = LDefinition::BogusTemp();
    617  LDefinition offsetTemp = LDefinition::BogusTemp();
    618  LDefinition maskTemp = LDefinition::BogusTemp();
    619 
    620  if (ins->arrayType() == Scalar::Uint32) {
    621    MOZ_ASSERT(ins->type() == MIRType::Double);
    622    outTemp = temp();
    623  }
    624 
    625  if (Scalar::byteSize(ins->arrayType()) < 4) {
    626    valueTemp = temp();
    627    offsetTemp = temp();
    628    maskTemp = temp();
    629  }
    630 
    631  LAtomicExchangeTypedArrayElement* lir =
    632      new (alloc()) LAtomicExchangeTypedArrayElement(
    633          elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
    634 
    635  define(lir, ins);
    636 }
    637 
    638 void LIRGenerator::visitAtomicTypedArrayElementBinop(
    639    MAtomicTypedArrayElementBinop* ins) {
    640  MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
    641  MOZ_ASSERT(!Scalar::isFloatingType(ins->arrayType()));
    642  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    643  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    644 
    645  const LUse elements = useRegister(ins->elements());
    646  const LAllocation index =
    647      useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    648 
    649  if (Scalar::isBigIntType(ins->arrayType())) {
    650    LInt64Allocation value = useInt64Register(ins->value());
    651    LInt64Definition temp = tempInt64();
    652 
    653    // Case 1: the result of the operation is not used.
    654 
    655    if (ins->isForEffect()) {
    656      auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
    657          elements, index, value, temp);
    658      add(lir, ins);
    659      return;
    660    }
    661 
    662    // Case 2: the result of the operation is used.
    663 
    664    auto* lir = new (alloc())
    665        LAtomicTypedArrayElementBinop64(elements, index, value, temp);
    666    defineInt64(lir, ins);
    667    return;
    668  }
    669 
    670  const LAllocation value = useRegister(ins->value());
    671 
    672  LDefinition valueTemp = LDefinition::BogusTemp();
    673  LDefinition offsetTemp = LDefinition::BogusTemp();
    674  LDefinition maskTemp = LDefinition::BogusTemp();
    675 
    676  if (Scalar::byteSize(ins->arrayType()) < 4) {
    677    valueTemp = temp();
    678    offsetTemp = temp();
    679    maskTemp = temp();
    680  }
    681 
    682  if (ins->isForEffect()) {
    683    LAtomicTypedArrayElementBinopForEffect* lir =
    684        new (alloc()) LAtomicTypedArrayElementBinopForEffect(
    685            elements, index, value, valueTemp, offsetTemp, maskTemp);
    686    add(lir, ins);
    687    return;
    688  }
    689 
    690  // For a Uint32Array with a known double result we need a temp for
    691  // the intermediate output.
    692 
    693  LDefinition outTemp = LDefinition::BogusTemp();
    694 
    695  if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
    696    outTemp = temp();
    697  }
    698 
    699  LAtomicTypedArrayElementBinop* lir =
    700      new (alloc()) LAtomicTypedArrayElementBinop(
    701          elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
    702  define(lir, ins);
    703 }
    704 
    705 void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
    706  MOZ_ASSERT(opd->type() == MIRType::Value);
    707 
    708  LReturn* ins = new (alloc()) LReturn(isGenerator);
    709  ins->setOperand(0, useFixed(opd, JSReturnReg));
    710  add(ins);
    711 }
    712 
    713 void LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) {
    714  MDefinition* base = ins->base();
    715  MOZ_ASSERT(base->type() == MIRType::Int32);
    716 
    717  MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
    718  MOZ_ASSERT_IF(ins->needsBoundsCheck(),
    719                boundsCheckLimit->type() == MIRType::Int32);
    720 
    721  LAllocation baseAlloc = useRegisterAtStart(base);
    722 
    723  LAllocation limitAlloc = ins->needsBoundsCheck()
    724                               ? useRegisterAtStart(boundsCheckLimit)
    725                               : LAllocation();
    726 
    727  // We have no memory-base value, meaning that HeapReg is to be used as the
    728  // memory base.  This follows from the definition of
    729  // FunctionCompiler::maybeLoadMemoryBase() in WasmIonCompile.cpp.
    730  MOZ_ASSERT(!ins->hasMemoryBase());
    731  auto* lir =
    732      new (alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc, LAllocation());
    733  define(lir, ins);
    734 }
    735 
    736 void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) {
    737  MDefinition* base = ins->base();
    738  MOZ_ASSERT(base->type() == MIRType::Int32);
    739 
    740  MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
    741  MOZ_ASSERT_IF(ins->needsBoundsCheck(),
    742                boundsCheckLimit->type() == MIRType::Int32);
    743 
    744  LAllocation baseAlloc = useRegisterAtStart(base);
    745 
    746  LAllocation limitAlloc = ins->needsBoundsCheck()
    747                               ? useRegisterAtStart(boundsCheckLimit)
    748                               : LAllocation();
    749 
    750  // See comment in LIRGenerator::visitAsmJSStoreHeap just above.
    751  MOZ_ASSERT(!ins->hasMemoryBase());
    752  add(new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
    753                                    limitAlloc, LAllocation()),
    754      ins);
    755 }
    756 
    757 void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
    758  MDefinition* base = ins->base();
    759  // 'base' is a GPR but may be of either type. If it is 32-bit, it is
    760  // sign-extended on riscv64 platform and we should explicitly promote it
    761  // to 64-bit by zero-extension when use it as an index register in memory
    762  // accesses.
    763  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    764 
    765  LAllocation memoryBase =
    766      ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
    767                           : LGeneralReg(HeapReg);
    768 
    769  LAllocation ptr = useRegisterAtStart(base);
    770 
    771  LDefinition ptrCopy = LDefinition::BogusTemp();
    772  if (ins->access().offset32()) {
    773    ptrCopy = tempCopy(base, 0);
    774  }
    775 
    776  if (ins->type() == MIRType::Int64) {
    777    auto* lir = new (alloc()) LWasmLoadI64(ptr, memoryBase, ptrCopy);
    778    defineInt64(lir, ins);
    779    return;
    780  }
    781 
    782  auto* lir = new (alloc()) LWasmLoad(ptr, memoryBase, ptrCopy);
    783  define(lir, ins);
    784 }
    785 
    786 void LIRGenerator::visitWasmStore(MWasmStore* ins) {
    787  MDefinition* base = ins->base();
    788  // See comment in visitWasmLoad re the type of 'base'.
    789  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    790 
    791  MDefinition* value = ins->value();
    792 
    793  LAllocation memoryBase =
    794      ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
    795                           : LGeneralReg(HeapReg);
    796 
    797  LAllocation baseAlloc = useRegisterAtStart(base);
    798 
    799  LDefinition ptrCopy = LDefinition::BogusTemp();
    800  if (ins->access().offset32()) {
    801    ptrCopy = tempCopy(base, 0);
    802  }
    803 
    804  if (ins->access().type() == Scalar::Int64) {
    805    LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
    806    auto* lir =
    807        new (alloc()) LWasmStoreI64(baseAlloc, valueAlloc, memoryBase, ptrCopy);
    808    add(lir, ins);
    809    return;
    810  }
    811 
    812  LAllocation valueAlloc = useRegisterAtStart(value);
    813  auto* lir =
    814      new (alloc()) LWasmStore(baseAlloc, valueAlloc, memoryBase, ptrCopy);
    815  add(lir, ins);
    816 }
    817 
    818 void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
    819  MDefinition* opd = ins->input();
    820  MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
    821 
    822  defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd)), ins);
    823 }
    824 
    825 void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
    826  MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
    827  LWasmUint32ToDouble* lir =
    828      new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
    829  define(lir, ins);
    830 }
    831 
    832 void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
    833  MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
    834  LWasmUint32ToFloat32* lir =
    835      new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
    836  define(lir, ins);
    837 }
    838 
    839 void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
    840  MDefinition* base = ins->base();
    841  // See comment in visitWasmLoad re the type of 'base'.
    842  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    843 
    844  LAllocation memoryBase =
    845      ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
    846                           : LGeneralReg(HeapReg);
    847  if (ins->access().type() == Scalar::Int64) {
    848    auto* lir = new (alloc()) LWasmCompareExchangeI64(
    849        useRegister(base), useInt64Register(ins->oldValue()),
    850        useInt64Register(ins->newValue()), memoryBase);
    851    defineInt64(lir, ins);
    852    return;
    853  }
    854 
    855  LDefinition valueTemp = LDefinition::BogusTemp();
    856  LDefinition offsetTemp = LDefinition::BogusTemp();
    857  LDefinition maskTemp = LDefinition::BogusTemp();
    858 
    859  if (ins->access().byteSize() < 4) {
    860    valueTemp = temp();
    861    offsetTemp = temp();
    862    maskTemp = temp();
    863  }
    864 
    865  auto* lir = new (alloc())
    866      LWasmCompareExchangeHeap(useRegister(base), useRegister(ins->oldValue()),
    867                               useRegister(ins->newValue()), memoryBase,
    868                               valueTemp, offsetTemp, maskTemp);
    869 
    870  define(lir, ins);
    871 }
    872 
    873 void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
    874  MDefinition* base = ins->base();
    875  // See comment in visitWasmLoad re the type of 'base'.
    876  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    877 
    878  LAllocation memoryBase =
    879      ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
    880                           : LGeneralReg(HeapReg);
    881 
    882  if (ins->access().type() == Scalar::Int64) {
    883    auto* lir = new (alloc()) LWasmAtomicExchangeI64(
    884        useRegister(base), useInt64Register(ins->value()), memoryBase);
    885    defineInt64(lir, ins);
    886    return;
    887  }
    888 
    889  LDefinition valueTemp = LDefinition::BogusTemp();
    890  LDefinition offsetTemp = LDefinition::BogusTemp();
    891  LDefinition maskTemp = LDefinition::BogusTemp();
    892 
    893  if (ins->access().byteSize() < 4) {
    894    valueTemp = temp();
    895    offsetTemp = temp();
    896    maskTemp = temp();
    897  }
    898 
    899  auto* lir = new (alloc())
    900      LWasmAtomicExchangeHeap(useRegister(base), useRegister(ins->value()),
    901                              memoryBase, valueTemp, offsetTemp, maskTemp);
    902  define(lir, ins);
    903 }
    904 
    905 void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
    906  MDefinition* base = ins->base();
    907  // See comment in visitWasmLoad re the type of 'base'.
    908  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    909  LAllocation memoryBase =
    910      ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
    911                           : LGeneralReg(HeapReg);
    912 
    913  if (ins->access().type() == Scalar::Int64) {
    914    auto* lir = new (alloc())
    915        LWasmAtomicBinopI64(useRegister(base), useInt64Register(ins->value()),
    916                            memoryBase, tempInt64());
    917    defineInt64(lir, ins);
    918    return;
    919  }
    920 
    921  LDefinition valueTemp = LDefinition::BogusTemp();
    922  LDefinition offsetTemp = LDefinition::BogusTemp();
    923  LDefinition maskTemp = LDefinition::BogusTemp();
    924 
    925  if (ins->access().byteSize() < 4) {
    926    valueTemp = temp();
    927    offsetTemp = temp();
    928    maskTemp = temp();
    929  }
    930 
    931  if (!ins->hasUses()) {
    932    auto* lir = new (alloc()) LWasmAtomicBinopHeapForEffect(
    933        useRegister(base), useRegister(ins->value()), memoryBase, valueTemp,
    934        offsetTemp, maskTemp);
    935    add(lir, ins);
    936    return;
    937  }
    938 
    939  auto* lir = new (alloc())
    940      LWasmAtomicBinopHeap(useRegister(base), useRegister(ins->value()),
    941                           memoryBase, valueTemp, offsetTemp, maskTemp);
    942 
    943  define(lir, ins);
    944 }
    945 
    946 void LIRGenerator::visitWasmTernarySimd128(MWasmTernarySimd128* ins) {
    947  MOZ_CRASH("ternary SIMD NYI");
    948 }
    949 
    950 void LIRGenerator::visitWasmBinarySimd128(MWasmBinarySimd128* ins) {
    951  MOZ_CRASH("binary SIMD NYI");
    952 }
    953 
    954 #ifdef ENABLE_WASM_SIMD
    955 bool MWasmTernarySimd128::specializeBitselectConstantMaskAsShuffle(
    956    int8_t shuffle[16]) {
    957  return false;
    958 }
    959 #endif
    960 
    961 bool MWasmBinarySimd128::specializeForConstantRhs() {
    962  // Probably many we want to do here
    963  return false;
    964 }
    965 
    966 void LIRGenerator::visitWasmBinarySimd128WithConstant(
    967    MWasmBinarySimd128WithConstant* ins) {
    968  MOZ_CRASH("binary SIMD with constant NYI");
    969 }
    970 
    971 void LIRGenerator::visitWasmShiftSimd128(MWasmShiftSimd128* ins) {
    972  MOZ_CRASH("shift SIMD NYI");
    973 }
    974 
    975 void LIRGenerator::visitWasmShuffleSimd128(MWasmShuffleSimd128* ins) {
    976  MOZ_CRASH("shuffle SIMD NYI");
    977 }
    978 
    979 void LIRGenerator::visitWasmReplaceLaneSimd128(MWasmReplaceLaneSimd128* ins) {
    980  MOZ_CRASH("replace-lane SIMD NYI");
    981 }
    982 
    983 void LIRGenerator::visitWasmScalarToSimd128(MWasmScalarToSimd128* ins) {
    984  MOZ_CRASH("scalar-to-SIMD NYI");
    985 }
    986 
    987 void LIRGenerator::visitWasmUnarySimd128(MWasmUnarySimd128* ins) {
    988  MOZ_CRASH("unary SIMD NYI");
    989 }
    990 
    991 void LIRGenerator::visitWasmReduceSimd128(MWasmReduceSimd128* ins) {
    992  MOZ_CRASH("reduce-SIMD NYI");
    993 }
    994 
    995 void LIRGenerator::visitWasmLoadLaneSimd128(MWasmLoadLaneSimd128* ins) {
    996  MOZ_CRASH("load-lane SIMD NYI");
    997 }
    998 
    999 void LIRGenerator::visitWasmStoreLaneSimd128(MWasmStoreLaneSimd128* ins) {
   1000  MOZ_CRASH("store-lane SIMD NYI");
   1001 }