tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Lowering-loong64.cpp (33637B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/loong64/Lowering-loong64.h"
      8 
      9 #include "mozilla/MathAlgorithms.h"
     10 
     11 #include "jit/loong64/Assembler-loong64.h"
     12 #include "jit/Lowering.h"
     13 #include "jit/MIR-wasm.h"
     14 #include "jit/MIR.h"
     15 #include "jit/shared/Lowering-shared-inl.h"
     16 
     17 using namespace js;
     18 using namespace js::jit;
     19 
     20 using mozilla::FloorLog2;
     21 
     22 LTableSwitch* LIRGeneratorLOONG64::newLTableSwitch(
     23    const LAllocation& in, const LDefinition& inputCopy) {
     24  return new (alloc()) LTableSwitch(in, inputCopy, temp());
     25 }
     26 
     27 LTableSwitchV* LIRGeneratorLOONG64::newLTableSwitchV(const LBoxAllocation& in) {
     28  return new (alloc()) LTableSwitchV(in, temp(), tempDouble(), temp());
     29 }
     30 
     31 void LIRGeneratorLOONG64::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
     32                                        MDefinition* mir, MDefinition* lhs,
     33                                        MDefinition* rhs) {
     34  lowerForALU(ins, mir, lhs, rhs);
     35 }
     36 
     37 template <class LInstr>
     38 void LIRGeneratorLOONG64::lowerForShiftInt64(LInstr* ins, MDefinition* mir,
     39                                             MDefinition* lhs,
     40                                             MDefinition* rhs) {
     41  if constexpr (std::is_same_v<LInstr, LShiftI64>) {
     42    ins->setLhs(useInt64RegisterAtStart(lhs));
     43    ins->setRhs(useRegisterOrConstantAtStart(rhs));
     44  } else {
     45    ins->setInput(useInt64RegisterAtStart(lhs));
     46    ins->setCount(useRegisterOrConstantAtStart(rhs));
     47  }
     48  defineInt64(ins, mir);
     49 }
     50 
     51 template void LIRGeneratorLOONG64::lowerForShiftInt64(LShiftI64* ins,
     52                                                      MDefinition* mir,
     53                                                      MDefinition* lhs,
     54                                                      MDefinition* rhs);
     55 template void LIRGeneratorLOONG64::lowerForShiftInt64(LRotateI64* ins,
     56                                                      MDefinition* mir,
     57                                                      MDefinition* lhs,
     58                                                      MDefinition* rhs);
     59 
     60 // x = !y
     61 void LIRGeneratorLOONG64::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
     62                                      MDefinition* mir, MDefinition* input) {
     63  // Unary ALU operations don't read the input after writing to the output, even
     64  // for fallible operations, so we can use at-start allocations.
     65  ins->setOperand(0, useRegisterAtStart(input));
     66  define(ins, mir);
     67 }
     68 
     69 // z = x + y
     70 void LIRGeneratorLOONG64::lowerForALU(LInstructionHelper<1, 2, 0>* ins,
     71                                      MDefinition* mir, MDefinition* lhs,
     72                                      MDefinition* rhs) {
     73  // Binary ALU operations don't read any input after writing to the output,
     74  // even for fallible operations, so we can use at-start allocations.
     75  ins->setOperand(0, useRegisterAtStart(lhs));
     76  ins->setOperand(1, useRegisterOrConstantAtStart(rhs));
     77  define(ins, mir);
     78 }
     79 
     80 void LIRGeneratorLOONG64::lowerForALUInt64(
     81    LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
     82    MDefinition* input) {
     83  ins->setInt64Operand(0, useInt64RegisterAtStart(input));
     84  defineInt64(ins, mir);
     85 }
     86 
     87 void LIRGeneratorLOONG64::lowerForALUInt64(
     88    LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
     89    MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
     90  ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
     91  ins->setInt64Operand(INT64_PIECES, useInt64RegisterOrConstantAtStart(rhs));
     92  defineInt64(ins, mir);
     93 }
     94 
     95 void LIRGeneratorLOONG64::lowerForMulInt64(LMulI64* ins, MMul* mir,
     96                                           MDefinition* lhs, MDefinition* rhs) {
     97  lowerForALUInt64(ins, mir, lhs, rhs);
     98 }
     99 
    100 void LIRGeneratorLOONG64::lowerForFPU(LInstructionHelper<1, 1, 0>* ins,
    101                                      MDefinition* mir, MDefinition* input) {
    102  ins->setOperand(0, useRegisterAtStart(input));
    103  define(ins, mir);
    104 }
    105 
    106 void LIRGeneratorLOONG64::lowerForFPU(LInstructionHelper<1, 2, 0>* ins,
    107                                      MDefinition* mir, MDefinition* lhs,
    108                                      MDefinition* rhs) {
    109  ins->setOperand(0, useRegisterAtStart(lhs));
    110  ins->setOperand(1, useRegisterAtStart(rhs));
    111  define(ins, mir);
    112 }
    113 
    114 LBoxAllocation LIRGeneratorLOONG64::useBoxFixed(MDefinition* mir, Register reg1,
    115                                                Register reg2,
    116                                                bool useAtStart) {
    117  MOZ_ASSERT(mir->type() == MIRType::Value);
    118 
    119  ensureDefined(mir);
    120  return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
    121 }
    122 
    123 LAllocation LIRGeneratorLOONG64::useByteOpRegister(MDefinition* mir) {
    124  return useRegister(mir);
    125 }
    126 
    127 LAllocation LIRGeneratorLOONG64::useByteOpRegisterAtStart(MDefinition* mir) {
    128  return useRegisterAtStart(mir);
    129 }
    130 
    131 LAllocation LIRGeneratorLOONG64::useByteOpRegisterOrNonDoubleConstant(
    132    MDefinition* mir) {
    133  return useRegisterOrNonDoubleConstant(mir);
    134 }
    135 
    136 LDefinition LIRGeneratorLOONG64::tempByteOpRegister() { return temp(); }
    137 
    138 LDefinition LIRGeneratorLOONG64::tempToUnbox() { return temp(); }
    139 
    140 void LIRGeneratorLOONG64::lowerUntypedPhiInput(MPhi* phi,
    141                                               uint32_t inputPosition,
    142                                               LBlock* block, size_t lirIndex) {
    143  lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
    144 }
    145 void LIRGeneratorLOONG64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
    146                                             LBlock* block, size_t lirIndex) {
    147  lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
    148 }
    149 void LIRGeneratorLOONG64::defineInt64Phi(MPhi* phi, size_t lirIndex) {
    150  defineTypedPhi(phi, lirIndex);
    151 }
    152 
    153 void LIRGeneratorLOONG64::lowerMulI(MMul* mul, MDefinition* lhs,
    154                                    MDefinition* rhs) {
    155  LMulI* lir = new (alloc()) LMulI;
    156  if (mul->fallible()) {
    157    assignSnapshot(lir, mul->bailoutKind());
    158  }
    159 
    160  // Negative zero check reads |lhs| and |rhs| after writing to the output, so
    161  // we can't use at-start allocations.
    162  if (mul->canBeNegativeZero() && !rhs->isConstant()) {
    163    lir->setOperand(0, useRegister(lhs));
    164    lir->setOperand(1, useRegister(rhs));
    165    define(lir, mul);
    166    return;
    167  }
    168 
    169  lowerForALU(lir, mul, lhs, rhs);
    170 }
    171 
    172 void LIRGeneratorLOONG64::lowerDivI(MDiv* div) {
    173  // Division instructions are slow. Division by constant denominators can be
    174  // rewritten to use other instructions.
    175  if (div->rhs()->isConstant()) {
    176    int32_t rhs = div->rhs()->toConstant()->toInt32();
    177    // Check for division by a positive power of two, which is an easy and
    178    // important case to optimize. Note that other optimizations are also
    179    // possible; division by negative powers of two can be optimized in a
    180    // similar manner as positive powers of two, and division by other
    181    // constants can be optimized by a reciprocal multiplication technique.
    182    int32_t shift = FloorLog2(rhs);
    183    if (rhs > 0 && 1 << shift == rhs) {
    184      LDivPowTwoI* lir =
    185          new (alloc()) LDivPowTwoI(useRegister(div->lhs()), temp(), shift);
    186      if (div->fallible()) {
    187        assignSnapshot(lir, div->bailoutKind());
    188      }
    189      define(lir, div);
    190      return;
    191    }
    192  }
    193 
    194  LDivI* lir = new (alloc())
    195      LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
    196  if (div->fallible()) {
    197    assignSnapshot(lir, div->bailoutKind());
    198  }
    199  define(lir, div);
    200 }
    201 
    202 void LIRGeneratorLOONG64::lowerDivI64(MDiv* div) {
    203  auto* lir = new (alloc())
    204      LDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()));
    205  defineInt64(lir, div);
    206 }
    207 
    208 void LIRGeneratorLOONG64::lowerModI(MMod* mod) {
    209  if (mod->rhs()->isConstant()) {
    210    int32_t rhs = mod->rhs()->toConstant()->toInt32();
    211    int32_t shift = FloorLog2(rhs);
    212    if (rhs > 0 && 1 << shift == rhs) {
    213      LModPowTwoI* lir =
    214          new (alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
    215      if (mod->fallible()) {
    216        assignSnapshot(lir, mod->bailoutKind());
    217      }
    218      define(lir, mod);
    219      return;
    220    } else if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
    221      LModMaskI* lir = new (alloc())
    222          LModMaskI(useRegister(mod->lhs()), temp(), temp(), shift + 1);
    223      if (mod->fallible()) {
    224        assignSnapshot(lir, mod->bailoutKind());
    225      }
    226      define(lir, mod);
    227      return;
    228    }
    229  }
    230  auto* lir =
    231      new (alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()));
    232 
    233  if (mod->fallible()) {
    234    assignSnapshot(lir, mod->bailoutKind());
    235  }
    236  define(lir, mod);
    237 }
    238 
    239 void LIRGeneratorLOONG64::lowerModI64(MMod* mod) {
    240  auto* lir = new (alloc())
    241      LDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()));
    242  defineInt64(lir, mod);
    243 }
    244 
    245 void LIRGeneratorLOONG64::lowerUDiv(MDiv* div) {
    246  MDefinition* lhs = div->getOperand(0);
    247  MDefinition* rhs = div->getOperand(1);
    248 
    249  LUDivOrMod* lir = new (alloc()) LUDivOrMod;
    250  lir->setOperand(0, useRegister(lhs));
    251  lir->setOperand(1, useRegister(rhs));
    252  if (div->fallible()) {
    253    assignSnapshot(lir, div->bailoutKind());
    254  }
    255 
    256  define(lir, div);
    257 }
    258 
    259 void LIRGeneratorLOONG64::lowerUDivI64(MDiv* div) {
    260  auto* lir = new (alloc())
    261      LUDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()));
    262  defineInt64(lir, div);
    263 }
    264 
    265 void LIRGeneratorLOONG64::lowerUMod(MMod* mod) {
    266  MDefinition* lhs = mod->getOperand(0);
    267  MDefinition* rhs = mod->getOperand(1);
    268 
    269  LUDivOrMod* lir = new (alloc()) LUDivOrMod;
    270  lir->setOperand(0, useRegister(lhs));
    271  lir->setOperand(1, useRegister(rhs));
    272  if (mod->fallible()) {
    273    assignSnapshot(lir, mod->bailoutKind());
    274  }
    275 
    276  define(lir, mod);
    277 }
    278 
    279 void LIRGeneratorLOONG64::lowerUModI64(MMod* mod) {
    280  auto* lir = new (alloc())
    281      LUDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()));
    282  defineInt64(lir, mod);
    283 }
    284 
    285 void LIRGeneratorLOONG64::lowerUrshD(MUrsh* mir) {
    286  MDefinition* lhs = mir->lhs();
    287  MDefinition* rhs = mir->rhs();
    288 
    289  MOZ_ASSERT(lhs->type() == MIRType::Int32);
    290  MOZ_ASSERT(rhs->type() == MIRType::Int32);
    291 
    292  auto* lir = new (alloc()) LUrshD(useRegisterAtStart(lhs),
    293                                   useRegisterOrConstantAtStart(rhs), temp());
    294  define(lir, mir);
    295 }
    296 
    297 void LIRGeneratorLOONG64::lowerPowOfTwoI(MPow* mir) {
    298  int32_t base = mir->input()->toConstant()->toInt32();
    299  MDefinition* power = mir->power();
    300 
    301  auto* lir = new (alloc()) LPowOfTwoI(useRegister(power), base);
    302  assignSnapshot(lir, mir->bailoutKind());
    303  define(lir, mir);
    304 }
    305 
    306 void LIRGeneratorLOONG64::lowerBigIntPtrLsh(MBigIntPtrLsh* ins) {
    307  auto* lir = new (alloc()) LBigIntPtrLsh(
    308      useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp());
    309  assignSnapshot(lir, ins->bailoutKind());
    310  define(lir, ins);
    311 }
    312 
    313 void LIRGeneratorLOONG64::lowerBigIntPtrRsh(MBigIntPtrRsh* ins) {
    314  auto* lir = new (alloc()) LBigIntPtrRsh(
    315      useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp());
    316  assignSnapshot(lir, ins->bailoutKind());
    317  define(lir, ins);
    318 }
    319 
    320 void LIRGeneratorLOONG64::lowerBigIntPtrDiv(MBigIntPtrDiv* ins) {
    321  auto* lir = new (alloc())
    322      LBigIntPtrDiv(useRegister(ins->lhs()), useRegister(ins->rhs()),
    323                    LDefinition::BogusTemp(), LDefinition::BogusTemp());
    324  assignSnapshot(lir, ins->bailoutKind());
    325  define(lir, ins);
    326 }
    327 
    328 void LIRGeneratorLOONG64::lowerBigIntPtrMod(MBigIntPtrMod* ins) {
    329  auto* lir = new (alloc())
    330      LBigIntPtrMod(useRegister(ins->lhs()), useRegister(ins->rhs()), temp(),
    331                    LDefinition::BogusTemp());
    332  if (ins->canBeDivideByZero()) {
    333    assignSnapshot(lir, ins->bailoutKind());
    334  }
    335  define(lir, ins);
    336 }
    337 
    338 void LIRGeneratorLOONG64::lowerTruncateDToInt32(MTruncateToInt32* ins) {
    339  MDefinition* opd = ins->input();
    340  MOZ_ASSERT(opd->type() == MIRType::Double);
    341 
    342  define(new (alloc()) LTruncateDToInt32(useRegister(opd), tempDouble()), ins);
    343 }
    344 
    345 void LIRGeneratorLOONG64::lowerTruncateFToInt32(MTruncateToInt32* ins) {
    346  MDefinition* opd = ins->input();
    347  MOZ_ASSERT(opd->type() == MIRType::Float32);
    348 
    349  define(new (alloc()) LTruncateFToInt32(useRegister(opd), tempFloat32()), ins);
    350 }
    351 
    352 void LIRGeneratorLOONG64::lowerBuiltinInt64ToFloatingPoint(
    353    MBuiltinInt64ToFloatingPoint* ins) {
    354  MOZ_CRASH("We don't use it for this architecture");
    355 }
    356 
    357 void LIRGeneratorLOONG64::lowerWasmSelectI(MWasmSelect* select) {
    358  auto* lir = new (alloc())
    359      LWasmSelect(useRegisterAtStart(select->trueExpr()),
    360                  useAny(select->falseExpr()), useRegister(select->condExpr()));
    361  defineReuseInput(lir, select, LWasmSelect::TrueExprIndex);
    362 }
    363 
    364 void LIRGeneratorLOONG64::lowerWasmSelectI64(MWasmSelect* select) {
    365  auto* lir = new (alloc()) LWasmSelectI64(
    366      useInt64RegisterAtStart(select->trueExpr()),
    367      useInt64(select->falseExpr()), useRegister(select->condExpr()));
    368  defineInt64ReuseInput(lir, select, LWasmSelectI64::TrueExprIndex);
    369 }
    370 
    371 // On loong64 we specialize the only cases where compare is {U,}Int32 and select
    372 // is {U,}Int32.
    373 bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
    374    MCompare::CompareType compTy, MIRType insTy) {
    375  return insTy == MIRType::Int32 && (compTy == MCompare::Compare_Int32 ||
    376                                     compTy == MCompare::Compare_UInt32);
    377 }
    378 
    379 void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
    380                                                   MDefinition* lhs,
    381                                                   MDefinition* rhs,
    382                                                   MCompare::CompareType compTy,
    383                                                   JSOp jsop) {
    384  MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
    385  auto* lir = new (alloc()) LWasmCompareAndSelect(
    386      useRegister(lhs), useRegister(rhs), useRegisterAtStart(ins->trueExpr()),
    387      useRegister(ins->falseExpr()), compTy, jsop);
    388  defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
    389 }
    390 
    391 void LIRGeneratorLOONG64::lowerWasmBuiltinTruncateToInt32(
    392    MWasmBuiltinTruncateToInt32* ins) {
    393  MDefinition* opd = ins->input();
    394  MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
    395 
    396  if (opd->type() == MIRType::Double) {
    397    define(new (alloc()) LWasmBuiltinTruncateDToInt32(
    398               useRegister(opd), useFixed(ins->instance(), InstanceReg),
    399               LDefinition::BogusTemp()),
    400           ins);
    401    return;
    402  }
    403 
    404  define(new (alloc()) LWasmBuiltinTruncateFToInt32(
    405             useRegister(opd), useFixed(ins->instance(), InstanceReg),
    406             LDefinition::BogusTemp()),
    407         ins);
    408 }
    409 
    410 void LIRGeneratorLOONG64::lowerWasmBuiltinTruncateToInt64(
    411    MWasmBuiltinTruncateToInt64* ins) {
    412  MOZ_CRASH("We don't use it for this architecture");
    413 }
    414 
    415 void LIRGeneratorLOONG64::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
    416  MOZ_CRASH("We don't use runtime div for this architecture");
    417 }
    418 
    419 void LIRGeneratorLOONG64::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
    420  MOZ_CRASH("We don't use runtime mod for this architecture");
    421 }
    422 
    423 void LIRGeneratorLOONG64::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
    424  const LUse elements = useRegister(ins->elements());
    425  const LAllocation index =
    426      useRegisterOrIndexConstant(ins->index(), ins->storageType());
    427 
    428  auto* lir = new (alloc()) LAtomicLoad64(elements, index);
    429  defineInt64(lir, ins);
    430 }
    431 
    432 void LIRGeneratorLOONG64::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
    433  LUse elements = useRegister(ins->elements());
    434  LAllocation index =
    435      useRegisterOrIndexConstant(ins->index(), ins->writeType());
    436  LInt64Allocation value = useInt64Register(ins->value());
    437 
    438  add(new (alloc()) LAtomicStore64(elements, index, value), ins);
    439 }
    440 
    441 void LIRGenerator::visitBox(MBox* box) {
    442  MDefinition* opd = box->getOperand(0);
    443 
    444  // If the operand is a constant, emit near its uses.
    445  if (opd->isConstant() && box->canEmitAtUses()) {
    446    emitAtUses(box);
    447    return;
    448  }
    449 
    450  if (opd->isConstant()) {
    451    define(new (alloc()) LValue(opd->toConstant()->toJSValue()), box,
    452           LDefinition(LDefinition::BOX));
    453  } else {
    454    LBox* ins = new (alloc()) LBox(useRegisterAtStart(opd), opd->type());
    455    define(ins, box, LDefinition(LDefinition::BOX));
    456  }
    457 }
    458 
    459 void LIRGenerator::visitUnbox(MUnbox* unbox) {
    460  MDefinition* box = unbox->getOperand(0);
    461  MOZ_ASSERT(box->type() == MIRType::Value);
    462 
    463  LInstructionHelper<1, BOX_PIECES, 0>* lir;
    464  if (IsFloatingPointType(unbox->type())) {
    465    MOZ_ASSERT(unbox->type() == MIRType::Double);
    466    lir = new (alloc()) LUnboxFloatingPoint(useBoxAtStart(box));
    467  } else if (unbox->fallible()) {
    468    // If the unbox is fallible, load the Value in a register first to
    469    // avoid multiple loads.
    470    lir = new (alloc()) LUnbox(useRegisterAtStart(box));
    471  } else {
    472    lir = new (alloc()) LUnbox(useAtStart(box));
    473  }
    474 
    475  if (unbox->fallible()) {
    476    assignSnapshot(lir, unbox->bailoutKind());
    477  }
    478 
    479  define(lir, unbox);
    480 }
    481 
    482 void LIRGenerator::visitCopySign(MCopySign* ins) {
    483  MDefinition* lhs = ins->lhs();
    484  MDefinition* rhs = ins->rhs();
    485 
    486  MOZ_ASSERT(IsFloatingPointType(lhs->type()));
    487  MOZ_ASSERT(lhs->type() == rhs->type());
    488  MOZ_ASSERT(lhs->type() == ins->type());
    489 
    490  LInstructionHelper<1, 2, 0>* lir;
    491  if (lhs->type() == MIRType::Double) {
    492    lir = new (alloc()) LCopySignD();
    493  } else {
    494    lir = new (alloc()) LCopySignF();
    495  }
    496 
    497  lowerForFPU(lir, ins, lhs, rhs);
    498 }
    499 
    500 void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
    501  defineInt64(
    502      new (alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input())), ins);
    503 }
    504 
    505 void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
    506  defineInt64(new (alloc())
    507                  LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
    508              ins);
    509 }
    510 
    511 void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
    512  MDefinition* opd = ins->input();
    513  MOZ_ASSERT(opd->type() == MIRType::Int64);
    514  MOZ_ASSERT(IsFloatingPointType(ins->type()));
    515 
    516  define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd)), ins);
    517 }
    518 
    519 void LIRGenerator::visitSubstr(MSubstr* ins) {
    520  LSubstr* lir = new (alloc())
    521      LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
    522              useRegister(ins->length()), temp(), temp(), temp());
    523  define(lir, ins);
    524  assignSafepoint(lir, ins);
    525 }
    526 
    527 void LIRGenerator::visitCompareExchangeTypedArrayElement(
    528    MCompareExchangeTypedArrayElement* ins) {
    529  MOZ_ASSERT(!Scalar::isFloatingType(ins->arrayType()));
    530  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    531  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    532 
    533  const LUse elements = useRegister(ins->elements());
    534  const LAllocation index =
    535      useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    536 
    537  if (Scalar::isBigIntType(ins->arrayType())) {
    538    LInt64Allocation oldval = useInt64Register(ins->oldval());
    539    LInt64Allocation newval = useInt64Register(ins->newval());
    540 
    541    auto* lir = new (alloc())
    542        LCompareExchangeTypedArrayElement64(elements, index, oldval, newval);
    543    defineInt64(lir, ins);
    544    return;
    545  }
    546 
    547  const LAllocation oldval = useRegister(ins->oldval());
    548  const LAllocation newval = useRegister(ins->newval());
    549 
    550  // If the target is a floating register then we need a temp at the
    551  // CodeGenerator level for creating the result.
    552 
    553  LDefinition outTemp = LDefinition::BogusTemp();
    554  LDefinition valueTemp = LDefinition::BogusTemp();
    555  LDefinition offsetTemp = LDefinition::BogusTemp();
    556  LDefinition maskTemp = LDefinition::BogusTemp();
    557 
    558  if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
    559    outTemp = temp();
    560  }
    561 
    562  if (Scalar::byteSize(ins->arrayType()) < 4) {
    563    valueTemp = temp();
    564    offsetTemp = temp();
    565    maskTemp = temp();
    566  }
    567 
    568  LCompareExchangeTypedArrayElement* lir = new (alloc())
    569      LCompareExchangeTypedArrayElement(elements, index, oldval, newval,
    570                                        outTemp, valueTemp, offsetTemp,
    571                                        maskTemp);
    572 
    573  define(lir, ins);
    574 }
    575 
    576 void LIRGenerator::visitAtomicExchangeTypedArrayElement(
    577    MAtomicExchangeTypedArrayElement* ins) {
    578  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    579  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    580 
    581  const LUse elements = useRegister(ins->elements());
    582  const LAllocation index =
    583      useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    584 
    585  if (Scalar::isBigIntType(ins->arrayType())) {
    586    LInt64Allocation value = useInt64Register(ins->value());
    587 
    588    auto* lir = new (alloc())
    589        LAtomicExchangeTypedArrayElement64(elements, index, value);
    590    defineInt64(lir, ins);
    591    return;
    592  }
    593 
    594  // If the target is a floating register then we need a temp at the
    595  // CodeGenerator level for creating the result.
    596 
    597  MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
    598 
    599  const LAllocation value = useRegister(ins->value());
    600 
    601  LDefinition outTemp = LDefinition::BogusTemp();
    602  LDefinition valueTemp = LDefinition::BogusTemp();
    603  LDefinition offsetTemp = LDefinition::BogusTemp();
    604  LDefinition maskTemp = LDefinition::BogusTemp();
    605 
    606  if (ins->arrayType() == Scalar::Uint32) {
    607    MOZ_ASSERT(ins->type() == MIRType::Double);
    608    outTemp = temp();
    609  }
    610 
    611  if (Scalar::byteSize(ins->arrayType()) < 4) {
    612    valueTemp = temp();
    613    offsetTemp = temp();
    614    maskTemp = temp();
    615  }
    616 
    617  LAtomicExchangeTypedArrayElement* lir =
    618      new (alloc()) LAtomicExchangeTypedArrayElement(
    619          elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
    620 
    621  define(lir, ins);
    622 }
    623 
    624 void LIRGenerator::visitAtomicTypedArrayElementBinop(
    625    MAtomicTypedArrayElementBinop* ins) {
    626  MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
    627  MOZ_ASSERT(!Scalar::isFloatingType(ins->arrayType()));
    628  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    629  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    630 
    631  const LUse elements = useRegister(ins->elements());
    632  const LAllocation index =
    633      useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    634 
    635  if (Scalar::isBigIntType(ins->arrayType())) {
    636    LInt64Allocation value = useInt64Register(ins->value());
    637    LInt64Definition temp = tempInt64();
    638 
    639    // Case 1: the result of the operation is not used.
    640 
    641    if (ins->isForEffect()) {
    642      auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
    643          elements, index, value, temp);
    644      add(lir, ins);
    645      return;
    646    }
    647 
    648    // Case 2: the result of the operation is used.
    649 
    650    auto* lir = new (alloc())
    651        LAtomicTypedArrayElementBinop64(elements, index, value, temp);
    652    defineInt64(lir, ins);
    653    return;
    654  }
    655 
    656  LAllocation value = useRegister(ins->value());
    657  LDefinition valueTemp = LDefinition::BogusTemp();
    658  LDefinition offsetTemp = LDefinition::BogusTemp();
    659  LDefinition maskTemp = LDefinition::BogusTemp();
    660 
    661  if (Scalar::byteSize(ins->arrayType()) < 4) {
    662    valueTemp = temp();
    663    offsetTemp = temp();
    664    maskTemp = temp();
    665  }
    666 
    667  if (ins->isForEffect()) {
    668    LAtomicTypedArrayElementBinopForEffect* lir =
    669        new (alloc()) LAtomicTypedArrayElementBinopForEffect(
    670            elements, index, value, valueTemp, offsetTemp, maskTemp);
    671    add(lir, ins);
    672    return;
    673  }
    674 
    675  // For a Uint32Array with a known double result we need a temp for
    676  // the intermediate output.
    677 
    678  LDefinition outTemp = LDefinition::BogusTemp();
    679 
    680  if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
    681    outTemp = temp();
    682  }
    683 
    684  LAtomicTypedArrayElementBinop* lir =
    685      new (alloc()) LAtomicTypedArrayElementBinop(
    686          elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
    687  define(lir, ins);
    688 }
    689 
    690 void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
    691  MOZ_ASSERT(opd->type() == MIRType::Value);
    692 
    693  LReturn* ins = new (alloc()) LReturn(isGenerator);
    694  ins->setOperand(0, useFixed(opd, JSReturnReg));
    695  add(ins);
    696 }
    697 
    698 void LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) {
    699  MDefinition* base = ins->base();
    700  MOZ_ASSERT(base->type() == MIRType::Int32);
    701 
    702  MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
    703  MOZ_ASSERT_IF(ins->needsBoundsCheck(),
    704                boundsCheckLimit->type() == MIRType::Int32);
    705 
    706  LAllocation baseAlloc = useRegisterAtStart(base);
    707 
    708  LAllocation limitAlloc = ins->needsBoundsCheck()
    709                               ? useRegisterAtStart(boundsCheckLimit)
    710                               : LAllocation();
    711 
    712  // We have no memory-base value, meaning that HeapReg is to be used as the
    713  // memory base.  This follows from the definition of
    714  // FunctionCompiler::maybeLoadMemoryBase() in WasmIonCompile.cpp.
    715  MOZ_ASSERT(!ins->hasMemoryBase());
    716  auto* lir =
    717      new (alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc, LAllocation());
    718  define(lir, ins);
    719 }
    720 
    721 void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) {
    722  MDefinition* base = ins->base();
    723  MOZ_ASSERT(base->type() == MIRType::Int32);
    724 
    725  MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
    726  MOZ_ASSERT_IF(ins->needsBoundsCheck(),
    727                boundsCheckLimit->type() == MIRType::Int32);
    728 
    729  LAllocation baseAlloc = useRegisterAtStart(base);
    730 
    731  LAllocation limitAlloc = ins->needsBoundsCheck()
    732                               ? useRegisterAtStart(boundsCheckLimit)
    733                               : LAllocation();
    734 
    735  // See comment in LIRGenerator::visitAsmJSStoreHeap just above.
    736  MOZ_ASSERT(!ins->hasMemoryBase());
    737  add(new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
    738                                    limitAlloc, LAllocation()),
    739      ins);
    740 }
    741 
    742 void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
    743  MDefinition* base = ins->base();
    744  // 'base' is a GPR but may be of either type. If it is 32-bit, it is
    745  // sign-extended on loongarch64 platform and we should explicitly promote it
    746  // to 64-bit by zero-extension when use it as an index register in memory
    747  // accesses.
    748  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    749 
    750  LAllocation memoryBase =
    751      ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
    752                           : LGeneralReg(HeapReg);
    753 
    754  LAllocation ptr = useRegisterAtStart(base);
    755 
    756  LDefinition ptrCopy = LDefinition::BogusTemp();
    757  if (ins->access().offset32()) {
    758    ptrCopy = tempCopy(base, 0);
    759  }
    760 
    761  if (ins->type() == MIRType::Int64) {
    762    auto* lir = new (alloc()) LWasmLoadI64(ptr, memoryBase, ptrCopy);
    763    defineInt64(lir, ins);
    764    return;
    765  }
    766 
    767  auto* lir = new (alloc()) LWasmLoad(ptr, memoryBase, ptrCopy);
    768  define(lir, ins);
    769 }
    770 
    771 void LIRGenerator::visitWasmStore(MWasmStore* ins) {
    772  MDefinition* base = ins->base();
    773  // See comment in visitWasmLoad re the type of 'base'.
    774  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    775 
    776  MDefinition* value = ins->value();
    777  LAllocation memoryBase =
    778      ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
    779                           : LGeneralReg(HeapReg);
    780 
    781  LAllocation baseAlloc = useRegisterAtStart(base);
    782 
    783  LDefinition ptrCopy = LDefinition::BogusTemp();
    784  if (ins->access().offset32()) {
    785    ptrCopy = tempCopy(base, 0);
    786  }
    787 
    788  if (ins->access().type() == Scalar::Int64) {
    789    LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
    790    auto* lir =
    791        new (alloc()) LWasmStoreI64(baseAlloc, valueAlloc, memoryBase, ptrCopy);
    792    add(lir, ins);
    793    return;
    794  }
    795 
    796  LAllocation valueAlloc = useRegisterAtStart(value);
    797  auto* lir =
    798      new (alloc()) LWasmStore(baseAlloc, valueAlloc, memoryBase, ptrCopy);
    799  add(lir, ins);
    800 }
    801 
    802 void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
    803  MDefinition* opd = ins->input();
    804  MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
    805 
    806  defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd)), ins);
    807 }
    808 
    809 void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
    810  MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
    811  LWasmUint32ToDouble* lir =
    812      new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
    813  define(lir, ins);
    814 }
    815 
    816 void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
    817  MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
    818  LWasmUint32ToFloat32* lir =
    819      new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
    820  define(lir, ins);
    821 }
    822 
    823 void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
    824  MDefinition* base = ins->base();
    825  // See comment in visitWasmLoad re the type of 'base'.
    826  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    827  LAllocation memoryBase = ins->hasMemoryBase()
    828                               ? LAllocation(useRegister(ins->memoryBase()))
    829                               : LGeneralReg(HeapReg);
    830 
    831  if (ins->access().type() == Scalar::Int64) {
    832    auto* lir = new (alloc()) LWasmCompareExchangeI64(
    833        useRegister(base), useInt64Register(ins->oldValue()),
    834        useInt64Register(ins->newValue()), memoryBase);
    835    defineInt64(lir, ins);
    836    return;
    837  }
    838 
    839  LDefinition valueTemp = LDefinition::BogusTemp();
    840  LDefinition offsetTemp = LDefinition::BogusTemp();
    841  LDefinition maskTemp = LDefinition::BogusTemp();
    842 
    843  if (ins->access().byteSize() < 4) {
    844    valueTemp = temp();
    845    offsetTemp = temp();
    846    maskTemp = temp();
    847  }
    848 
    849  auto* lir = new (alloc())
    850      LWasmCompareExchangeHeap(useRegister(base), useRegister(ins->oldValue()),
    851                               useRegister(ins->newValue()), memoryBase,
    852                               valueTemp, offsetTemp, maskTemp);
    853 
    854  define(lir, ins);
    855 }
    856 
    857 void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
    858  MDefinition* base = ins->base();
    859  // See comment in visitWasmLoad re the type of 'base'.
    860  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    861  LAllocation memoryBase = ins->hasMemoryBase()
    862                               ? LAllocation(useRegister(ins->memoryBase()))
    863                               : LGeneralReg(HeapReg);
    864 
    865  if (ins->access().type() == Scalar::Int64) {
    866    auto* lir = new (alloc()) LWasmAtomicExchangeI64(
    867        useRegister(base), useInt64Register(ins->value()), memoryBase);
    868    defineInt64(lir, ins);
    869    return;
    870  }
    871 
    872  LDefinition valueTemp = LDefinition::BogusTemp();
    873  LDefinition offsetTemp = LDefinition::BogusTemp();
    874  LDefinition maskTemp = LDefinition::BogusTemp();
    875 
    876  if (ins->access().byteSize() < 4) {
    877    valueTemp = temp();
    878    offsetTemp = temp();
    879    maskTemp = temp();
    880  }
    881 
    882  auto* lir = new (alloc())
    883      LWasmAtomicExchangeHeap(useRegister(base), useRegister(ins->value()),
    884                              memoryBase, valueTemp, offsetTemp, maskTemp);
    885  define(lir, ins);
    886 }
    887 
    888 void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
    889  MDefinition* base = ins->base();
    890  // See comment in visitWasmLoad re the type of 'base'.
    891  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    892  LAllocation memoryBase = ins->hasMemoryBase()
    893                               ? LAllocation(useRegister(ins->memoryBase()))
    894                               : LGeneralReg(HeapReg);
    895 
    896  if (ins->access().type() == Scalar::Int64) {
    897    auto* lir = new (alloc())
    898        LWasmAtomicBinopI64(useRegister(base), useInt64Register(ins->value()),
    899                            memoryBase, tempInt64());
    900    defineInt64(lir, ins);
    901    return;
    902  }
    903 
    904  LDefinition valueTemp = LDefinition::BogusTemp();
    905  LDefinition offsetTemp = LDefinition::BogusTemp();
    906  LDefinition maskTemp = LDefinition::BogusTemp();
    907 
    908  if (ins->access().byteSize() < 4) {
    909    valueTemp = temp();
    910    offsetTemp = temp();
    911    maskTemp = temp();
    912  }
    913 
    914  if (!ins->hasUses()) {
    915    LWasmAtomicBinopHeapForEffect* lir = new (alloc())
    916        LWasmAtomicBinopHeapForEffect(useRegister(base),
    917                                      useRegister(ins->value()), memoryBase,
    918                                      valueTemp, offsetTemp, maskTemp);
    919    add(lir, ins);
    920    return;
    921  }
    922 
    923  auto* lir = new (alloc())
    924      LWasmAtomicBinopHeap(useRegister(base), useRegister(ins->value()),
    925                           memoryBase, valueTemp, offsetTemp, maskTemp);
    926 
    927  define(lir, ins);
    928 }
    929 
    930 void LIRGenerator::visitWasmTernarySimd128(MWasmTernarySimd128* ins) {
    931  MOZ_CRASH("ternary SIMD NYI");
    932 }
    933 
    934 void LIRGenerator::visitWasmBinarySimd128(MWasmBinarySimd128* ins) {
    935  MOZ_CRASH("binary SIMD NYI");
    936 }
    937 
    938 #ifdef ENABLE_WASM_SIMD
    939 bool MWasmTernarySimd128::specializeBitselectConstantMaskAsShuffle(
    940    int8_t shuffle[16]) {
    941  return false;
    942 }
    943 #endif
    944 
    945 bool MWasmBinarySimd128::specializeForConstantRhs() {
    946  // Probably many we want to do here
    947  return false;
    948 }
    949 
    950 void LIRGenerator::visitWasmBinarySimd128WithConstant(
    951    MWasmBinarySimd128WithConstant* ins) {
    952  MOZ_CRASH("binary SIMD with constant NYI");
    953 }
    954 
    955 void LIRGenerator::visitWasmShiftSimd128(MWasmShiftSimd128* ins) {
    956  MOZ_CRASH("shift SIMD NYI");
    957 }
    958 
    959 void LIRGenerator::visitWasmShuffleSimd128(MWasmShuffleSimd128* ins) {
    960  MOZ_CRASH("shuffle SIMD NYI");
    961 }
    962 
    963 void LIRGenerator::visitWasmReplaceLaneSimd128(MWasmReplaceLaneSimd128* ins) {
    964  MOZ_CRASH("replace-lane SIMD NYI");
    965 }
    966 
    967 void LIRGenerator::visitWasmScalarToSimd128(MWasmScalarToSimd128* ins) {
    968  MOZ_CRASH("scalar-to-SIMD NYI");
    969 }
    970 
    971 void LIRGenerator::visitWasmUnarySimd128(MWasmUnarySimd128* ins) {
    972  MOZ_CRASH("unary SIMD NYI");
    973 }
    974 
    975 void LIRGenerator::visitWasmReduceSimd128(MWasmReduceSimd128* ins) {
    976  MOZ_CRASH("reduce-SIMD NYI");
    977 }
    978 
    979 void LIRGenerator::visitWasmLoadLaneSimd128(MWasmLoadLaneSimd128* ins) {
    980  MOZ_CRASH("load-lane SIMD NYI");
    981 }
    982 
    983 void LIRGenerator::visitWasmStoreLaneSimd128(MWasmStoreLaneSimd128* ins) {
    984  MOZ_CRASH("store-lane SIMD NYI");
    985 }