tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Lowering-mips-shared.cpp (29156B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/mips-shared/Lowering-mips-shared.h"
      8 
      9 #include "mozilla/MathAlgorithms.h"
     10 
     11 #include "jit/Lowering.h"
     12 #include "jit/MIR-wasm.h"
     13 #include "jit/MIR.h"
     14 
     15 #include "jit/shared/Lowering-shared-inl.h"
     16 
     17 using namespace js;
     18 using namespace js::jit;
     19 
     20 using mozilla::FloorLog2;
     21 
     22 LAllocation LIRGeneratorMIPSShared::useByteOpRegister(MDefinition* mir) {
     23  return useRegister(mir);
     24 }
     25 
     26 LAllocation LIRGeneratorMIPSShared::useByteOpRegisterAtStart(MDefinition* mir) {
     27  return useRegisterAtStart(mir);
     28 }
     29 
     30 LAllocation LIRGeneratorMIPSShared::useByteOpRegisterOrNonDoubleConstant(
     31    MDefinition* mir) {
     32  return useRegisterOrNonDoubleConstant(mir);
     33 }
     34 
     35 LDefinition LIRGeneratorMIPSShared::tempByteOpRegister() { return temp(); }
     36 
     37 // x = !y
     38 void LIRGeneratorMIPSShared::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
     39                                         MDefinition* mir, MDefinition* input) {
     40  // Unary ALU operations don't read the input after writing to the output, even
     41  // for fallible operations, so we can use at-start allocations.
     42  ins->setOperand(0, useRegisterAtStart(input));
     43  define(ins, mir);
     44 }
     45 
     46 // z = x+y
     47 void LIRGeneratorMIPSShared::lowerForALU(LInstructionHelper<1, 2, 0>* ins,
     48                                         MDefinition* mir, MDefinition* lhs,
     49                                         MDefinition* rhs) {
     50  // Binary ALU operations don't read any input after writing to the output,
     51  // even for fallible operations, so we can use at-start allocations.
     52  ins->setOperand(0, useRegisterAtStart(lhs));
     53  ins->setOperand(1, useRegisterOrConstantAtStart(rhs));
     54  define(ins, mir);
     55 }
     56 
     57 void LIRGeneratorMIPSShared::lowerForALUInt64(
     58    LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
     59    MDefinition* input) {
     60  ins->setInt64Operand(0, useInt64RegisterAtStart(input));
     61  defineInt64(ins, mir);
     62 }
     63 
     64 void LIRGeneratorMIPSShared::lowerForALUInt64(
     65    LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
     66    MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
     67  ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
     68  ins->setInt64Operand(INT64_PIECES, useInt64RegisterOrConstantAtStart(rhs));
     69  defineInt64(ins, mir);
     70 }
     71 
     72 void LIRGeneratorMIPSShared::lowerForMulInt64(LMulI64* ins, MMul* mir,
     73                                              MDefinition* lhs,
     74                                              MDefinition* rhs) {
     75  lowerForALUInt64(ins, mir, lhs, rhs);
     76 }
     77 
     78 template <class LInstr>
     79 void LIRGeneratorMIPSShared::lowerForShiftInt64(LInstr* ins, MDefinition* mir,
     80                                                MDefinition* lhs,
     81                                                MDefinition* rhs) {
     82  if constexpr (std::is_same_v<LInstr, LShiftI64>) {
     83    ins->setLhs(useInt64RegisterAtStart(lhs));
     84    ins->setRhs(useRegisterOrConstantAtStart(rhs));
     85  } else {
     86    ins->setInput(useInt64RegisterAtStart(lhs));
     87    ins->setCount(useRegisterOrConstantAtStart(rhs));
     88  }
     89  defineInt64(ins, mir);
     90 }
     91 
     92 template void LIRGeneratorMIPSShared::lowerForShiftInt64(LShiftI64* ins,
     93                                                         MDefinition* mir,
     94                                                         MDefinition* lhs,
     95                                                         MDefinition* rhs);
     96 template void LIRGeneratorMIPSShared::lowerForShiftInt64(LRotateI64* ins,
     97                                                         MDefinition* mir,
     98                                                         MDefinition* lhs,
     99                                                         MDefinition* rhs);
    100 
    101 void LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 1, 0>* ins,
    102                                         MDefinition* mir, MDefinition* input) {
    103  ins->setOperand(0, useRegisterAtStart(input));
    104  define(ins, mir);
    105 }
    106 
    107 void LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 2, 0>* ins,
    108                                         MDefinition* mir, MDefinition* lhs,
    109                                         MDefinition* rhs) {
    110  ins->setOperand(0, useRegisterAtStart(lhs));
    111  ins->setOperand(1, useRegisterAtStart(rhs));
    112  define(ins, mir);
    113 }
    114 
    115 void LIRGeneratorMIPSShared::lowerWasmBuiltinTruncateToInt32(
    116    MWasmBuiltinTruncateToInt32* ins) {
    117  MDefinition* opd = ins->input();
    118  MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
    119 
    120  if (opd->type() == MIRType::Double) {
    121    define(new (alloc()) LWasmBuiltinTruncateDToInt32(
    122               useRegister(opd), useFixed(ins->instance(), InstanceReg),
    123               LDefinition::BogusTemp()),
    124           ins);
    125    return;
    126  }
    127 
    128  define(new (alloc()) LWasmBuiltinTruncateFToInt32(
    129             useRegister(opd), useFixed(ins->instance(), InstanceReg),
    130             LDefinition::BogusTemp()),
    131         ins);
    132 }
    133 
    134 void LIRGeneratorMIPSShared::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
    135                                           MDefinition* mir, MDefinition* lhs,
    136                                           MDefinition* rhs) {
    137  lowerForALU(ins, mir, lhs, rhs);
    138 }
    139 
    140 void LIRGeneratorMIPSShared::lowerDivI(MDiv* div) {
    141  // Division instructions are slow. Division by constant denominators can be
    142  // rewritten to use other instructions.
    143  if (div->rhs()->isConstant()) {
    144    int32_t rhs = div->rhs()->toConstant()->toInt32();
    145    // Check for division by a positive power of two, which is an easy and
    146    // important case to optimize. Note that other optimizations are also
    147    // possible; division by negative powers of two can be optimized in a
    148    // similar manner as positive powers of two, and division by other
    149    // constants can be optimized by a reciprocal multiplication technique.
    150    int32_t shift = FloorLog2(rhs);
    151    if (rhs > 0 && 1 << shift == rhs) {
    152      LDivPowTwoI* lir =
    153          new (alloc()) LDivPowTwoI(useRegister(div->lhs()), temp(), shift);
    154      if (div->fallible()) {
    155        assignSnapshot(lir, div->bailoutKind());
    156      }
    157      define(lir, div);
    158      return;
    159    }
    160  }
    161 
    162  LDivI* lir = new (alloc())
    163      LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
    164  if (div->fallible()) {
    165    assignSnapshot(lir, div->bailoutKind());
    166  }
    167  define(lir, div);
    168 }
    169 
    170 void LIRGeneratorMIPSShared::lowerMulI(MMul* mul, MDefinition* lhs,
    171                                       MDefinition* rhs) {
    172  LMulI* lir = new (alloc()) LMulI;
    173  if (mul->fallible()) {
    174    assignSnapshot(lir, mul->bailoutKind());
    175  }
    176 
    177  // Negative zero check reads |lhs| and |rhs| after writing to the output, so
    178  // we can't use at-start allocations.
    179  if (mul->canBeNegativeZero() && !rhs->isConstant()) {
    180    lir->setOperand(0, useRegister(lhs));
    181    lir->setOperand(1, useRegister(rhs));
    182    define(lir, mul);
    183    return;
    184  }
    185 
    186  lowerForALU(lir, mul, lhs, rhs);
    187 }
    188 
    189 void LIRGeneratorMIPSShared::lowerModI(MMod* mod) {
    190  if (mod->rhs()->isConstant()) {
    191    int32_t rhs = mod->rhs()->toConstant()->toInt32();
    192    int32_t shift = FloorLog2(rhs);
    193    if (rhs > 0 && 1 << shift == rhs) {
    194      LModPowTwoI* lir =
    195          new (alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
    196      if (mod->fallible()) {
    197        assignSnapshot(lir, mod->bailoutKind());
    198      }
    199      define(lir, mod);
    200      return;
    201    } else if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
    202      LModMaskI* lir = new (alloc())
    203          LModMaskI(useRegister(mod->lhs()), temp(), temp(), shift + 1);
    204      if (mod->fallible()) {
    205        assignSnapshot(lir, mod->bailoutKind());
    206      }
    207      define(lir, mod);
    208      return;
    209    }
    210  }
    211  auto* lir =
    212      new (alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()));
    213 
    214  if (mod->fallible()) {
    215    assignSnapshot(lir, mod->bailoutKind());
    216  }
    217  define(lir, mod);
    218 }
    219 
    220 void LIRGeneratorMIPSShared::lowerWasmSelectI(MWasmSelect* select) {
    221  auto* lir = new (alloc())
    222      LWasmSelect(useRegisterAtStart(select->trueExpr()),
    223                  useAny(select->falseExpr()), useRegister(select->condExpr()));
    224  defineReuseInput(lir, select, LWasmSelect::TrueExprIndex);
    225 }
    226 
    227 void LIRGeneratorMIPSShared::lowerWasmSelectI64(MWasmSelect* select) {
    228  auto* lir = new (alloc()) LWasmSelectI64(
    229      useInt64RegisterAtStart(select->trueExpr()),
    230      useInt64(select->falseExpr()), useRegister(select->condExpr()));
    231  defineInt64ReuseInput(lir, select, LWasmSelectI64::TrueExprIndex);
    232 }
    233 
    234 LTableSwitch* LIRGeneratorMIPSShared::newLTableSwitch(
    235    const LAllocation& in, const LDefinition& inputCopy) {
    236  return new (alloc()) LTableSwitch(in, inputCopy, temp());
    237 }
    238 
    239 LTableSwitchV* LIRGeneratorMIPSShared::newLTableSwitchV(
    240    const LBoxAllocation& in) {
    241  return new (alloc()) LTableSwitchV(in, temp(), tempDouble(), temp());
    242 }
    243 
    244 void LIRGeneratorMIPSShared::lowerUrshD(MUrsh* mir) {
    245  MDefinition* lhs = mir->lhs();
    246  MDefinition* rhs = mir->rhs();
    247 
    248  MOZ_ASSERT(lhs->type() == MIRType::Int32);
    249  MOZ_ASSERT(rhs->type() == MIRType::Int32);
    250 
    251  auto* lir = new (alloc()) LUrshD(useRegisterAtStart(lhs),
    252                                   useRegisterOrConstantAtStart(rhs), temp());
    253  define(lir, mir);
    254 }
    255 
    256 void LIRGeneratorMIPSShared::lowerPowOfTwoI(MPow* mir) {
    257  int32_t base = mir->input()->toConstant()->toInt32();
    258  MDefinition* power = mir->power();
    259 
    260  auto* lir = new (alloc()) LPowOfTwoI(useRegister(power), base);
    261  assignSnapshot(lir, mir->bailoutKind());
    262  define(lir, mir);
    263 }
    264 
    265 void LIRGeneratorMIPSShared::lowerBigIntPtrLsh(MBigIntPtrLsh* ins) {
    266  auto* lir = new (alloc()) LBigIntPtrLsh(
    267      useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp());
    268  assignSnapshot(lir, ins->bailoutKind());
    269  define(lir, ins);
    270 }
    271 
    272 void LIRGeneratorMIPSShared::lowerBigIntPtrRsh(MBigIntPtrRsh* ins) {
    273  auto* lir = new (alloc()) LBigIntPtrRsh(
    274      useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp());
    275  assignSnapshot(lir, ins->bailoutKind());
    276  define(lir, ins);
    277 }
    278 
    279 void LIRGeneratorMIPSShared::lowerBigIntPtrDiv(MBigIntPtrDiv* ins) {
    280  auto* lir = new (alloc())
    281      LBigIntPtrDiv(useRegister(ins->lhs()), useRegister(ins->rhs()),
    282                    LDefinition::BogusTemp(), LDefinition::BogusTemp());
    283  assignSnapshot(lir, ins->bailoutKind());
    284  define(lir, ins);
    285 }
    286 
    287 void LIRGeneratorMIPSShared::lowerBigIntPtrMod(MBigIntPtrMod* ins) {
    288  auto* lir = new (alloc())
    289      LBigIntPtrMod(useRegister(ins->lhs()), useRegister(ins->rhs()), temp(),
    290                    LDefinition::BogusTemp());
    291  if (ins->canBeDivideByZero()) {
    292    assignSnapshot(lir, ins->bailoutKind());
    293  }
    294  define(lir, ins);
    295 }
    296 
    297 void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
    298  MDefinition* base = ins->base();
    299  // 'base' is a GPR but may be of either type. If it is 32-bit, it is
    300  // sign-extended on mips64 platform and we should explicitly promote it to
    301  // 64-bit by zero-extension when use it as an index register in memory
    302  // accesses.
    303  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    304 
    305  LAllocation memoryBase =
    306      ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
    307                           : LGeneralReg(HeapReg);
    308 
    309  LAllocation ptr = useRegisterAtStart(base);
    310 
    311  LDefinition ptrCopy = LDefinition::BogusTemp();
    312  if (ins->access().offset32()) {
    313    ptrCopy = tempCopy(base, 0);
    314  }
    315 
    316  if (ins->access().type() == Scalar::Int64) {
    317    if (IsUnaligned(ins->access())) {
    318      auto* lir =
    319          new (alloc()) LWasmUnalignedLoadI64(ptr, memoryBase, ptrCopy, temp());
    320      defineInt64(lir, ins);
    321      return;
    322    }
    323 
    324    auto* lir = new (alloc()) LWasmLoadI64(ptr, memoryBase, ptrCopy);
    325    defineInt64(lir, ins);
    326    return;
    327  }
    328 
    329  if (IsUnaligned(ins->access())) {
    330    auto* lir =
    331        new (alloc()) LWasmUnalignedLoad(ptr, memoryBase, ptrCopy, temp());
    332    define(lir, ins);
    333    return;
    334  }
    335 
    336  auto* lir = new (alloc()) LWasmLoad(ptr, memoryBase, ptrCopy);
    337  define(lir, ins);
    338 }
    339 
    340 void LIRGenerator::visitWasmStore(MWasmStore* ins) {
    341  MDefinition* base = ins->base();
    342  // See comment in visitWasmLoad re the type of 'base'.
    343  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    344 
    345  MDefinition* value = ins->value();
    346  LAllocation memoryBase =
    347      ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
    348                           : LGeneralReg(HeapReg);
    349 
    350  LAllocation baseAlloc = useRegisterAtStart(base);
    351 
    352  LDefinition ptrCopy = LDefinition::BogusTemp();
    353  if (ins->access().offset32()) {
    354    ptrCopy = tempCopy(base, 0);
    355  }
    356 
    357  if (ins->access().type() == Scalar::Int64) {
    358    LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
    359 
    360    if (IsUnaligned(ins->access())) {
    361      auto* lir = new (alloc()) LWasmUnalignedStoreI64(
    362          baseAlloc, valueAlloc, memoryBase, ptrCopy, temp());
    363      add(lir, ins);
    364      return;
    365    }
    366 
    367    auto* lir =
    368        new (alloc()) LWasmStoreI64(baseAlloc, valueAlloc, memoryBase, ptrCopy);
    369    add(lir, ins);
    370    return;
    371  }
    372 
    373  LAllocation valueAlloc = useRegisterAtStart(value);
    374 
    375  if (IsUnaligned(ins->access())) {
    376    auto* lir = new (alloc())
    377        LWasmUnalignedStore(baseAlloc, valueAlloc, memoryBase, ptrCopy, temp());
    378    add(lir, ins);
    379    return;
    380  }
    381 
    382  auto* lir =
    383      new (alloc()) LWasmStore(baseAlloc, valueAlloc, memoryBase, ptrCopy);
    384  add(lir, ins);
    385 }
    386 
    387 void LIRGeneratorMIPSShared::lowerUDiv(MDiv* div) {
    388  MDefinition* lhs = div->getOperand(0);
    389  MDefinition* rhs = div->getOperand(1);
    390 
    391  LUDivOrMod* lir = new (alloc()) LUDivOrMod;
    392  lir->setOperand(0, useRegister(lhs));
    393  lir->setOperand(1, useRegister(rhs));
    394  if (div->fallible()) {
    395    assignSnapshot(lir, div->bailoutKind());
    396  }
    397 
    398  define(lir, div);
    399 }
    400 
    401 void LIRGeneratorMIPSShared::lowerUMod(MMod* mod) {
    402  MDefinition* lhs = mod->getOperand(0);
    403  MDefinition* rhs = mod->getOperand(1);
    404 
    405  LUDivOrMod* lir = new (alloc()) LUDivOrMod;
    406  lir->setOperand(0, useRegister(lhs));
    407  lir->setOperand(1, useRegister(rhs));
    408  if (mod->fallible()) {
    409    assignSnapshot(lir, mod->bailoutKind());
    410  }
    411 
    412  define(lir, mod);
    413 }
    414 
    415 void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
    416  MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
    417  LWasmUint32ToDouble* lir =
    418      new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
    419  define(lir, ins);
    420 }
    421 
    422 void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
    423  MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
    424  LWasmUint32ToFloat32* lir =
    425      new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
    426  define(lir, ins);
    427 }
    428 
    429 void LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) {
    430  MOZ_ASSERT(ins->access().offset32() == 0);
    431 
    432  MDefinition* base = ins->base();
    433  MOZ_ASSERT(base->type() == MIRType::Int32);
    434  LAllocation baseAlloc;
    435  LAllocation limitAlloc;
    436  // For MIPS it is best to keep the 'base' in a register if a bounds check
    437  // is needed.
    438  if (base->isConstant() && !ins->needsBoundsCheck()) {
    439    // A bounds check is only skipped for a positive index.
    440    MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
    441    baseAlloc = LAllocation(base->toConstant());
    442  } else {
    443    baseAlloc = useRegisterAtStart(base);
    444    if (ins->needsBoundsCheck()) {
    445      MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
    446      MOZ_ASSERT(boundsCheckLimit->type() == MIRType::Int32);
    447      limitAlloc = useRegisterAtStart(boundsCheckLimit);
    448    }
    449  }
    450 
    451  define(new (alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc, LAllocation()),
    452         ins);
    453 }
    454 
    455 void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) {
    456  MOZ_ASSERT(ins->access().offset32() == 0);
    457 
    458  MDefinition* base = ins->base();
    459  MOZ_ASSERT(base->type() == MIRType::Int32);
    460  LAllocation baseAlloc;
    461  LAllocation limitAlloc;
    462  if (base->isConstant() && !ins->needsBoundsCheck()) {
    463    MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
    464    baseAlloc = LAllocation(base->toConstant());
    465  } else {
    466    baseAlloc = useRegisterAtStart(base);
    467    if (ins->needsBoundsCheck()) {
    468      MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
    469      MOZ_ASSERT(boundsCheckLimit->type() == MIRType::Int32);
    470      limitAlloc = useRegisterAtStart(boundsCheckLimit);
    471    }
    472  }
    473 
    474  add(new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
    475                                    limitAlloc, LAllocation()),
    476      ins);
    477 }
    478 
    479 void LIRGenerator::visitSubstr(MSubstr* ins) {
    480  LSubstr* lir = new (alloc())
    481      LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
    482              useRegister(ins->length()), temp(), temp(), tempByteOpRegister());
    483  define(lir, ins);
    484  assignSafepoint(lir, ins);
    485 }
    486 
    487 void LIRGenerator::visitCompareExchangeTypedArrayElement(
    488    MCompareExchangeTypedArrayElement* ins) {
    489  MOZ_ASSERT(!Scalar::isFloatingType(ins->arrayType()));
    490  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    491  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    492 
    493  const LUse elements = useRegister(ins->elements());
    494  const LAllocation index =
    495      useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    496 
    497  if (Scalar::isBigIntType(ins->arrayType())) {
    498    LInt64Allocation oldval = useInt64Register(ins->oldval());
    499    LInt64Allocation newval = useInt64Register(ins->newval());
    500 
    501    auto* lir = new (alloc())
    502        LCompareExchangeTypedArrayElement64(elements, index, oldval, newval);
    503    defineInt64(lir, ins);
    504    return;
    505  }
    506 
    507  const LAllocation oldval = useRegister(ins->oldval());
    508  const LAllocation newval = useRegister(ins->newval());
    509 
    510  // If the target is a floating register then we need a temp at the
    511  // CodeGenerator level for creating the result.
    512 
    513  LDefinition outTemp = LDefinition::BogusTemp();
    514  LDefinition valueTemp = LDefinition::BogusTemp();
    515  LDefinition offsetTemp = LDefinition::BogusTemp();
    516  LDefinition maskTemp = LDefinition::BogusTemp();
    517 
    518  if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
    519    outTemp = temp();
    520  }
    521 
    522  if (Scalar::byteSize(ins->arrayType()) < 4) {
    523    valueTemp = temp();
    524    offsetTemp = temp();
    525    maskTemp = temp();
    526  }
    527 
    528  LCompareExchangeTypedArrayElement* lir = new (alloc())
    529      LCompareExchangeTypedArrayElement(elements, index, oldval, newval,
    530                                        outTemp, valueTemp, offsetTemp,
    531                                        maskTemp);
    532 
    533  define(lir, ins);
    534 }
    535 
    536 void LIRGenerator::visitAtomicExchangeTypedArrayElement(
    537    MAtomicExchangeTypedArrayElement* ins) {
    538  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    539  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    540 
    541  const LUse elements = useRegister(ins->elements());
    542  const LAllocation index =
    543      useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    544 
    545  if (Scalar::isBigIntType(ins->arrayType())) {
    546    LInt64Allocation value = useInt64Register(ins->value());
    547 
    548    auto* lir = new (alloc())
    549        LAtomicExchangeTypedArrayElement64(elements, index, value);
    550    defineInt64(lir, ins);
    551    return;
    552  }
    553 
    554  // If the target is a floating register then we need a temp at the
    555  // CodeGenerator level for creating the result.
    556 
    557  MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
    558 
    559  const LAllocation value = useRegister(ins->value());
    560 
    561  LDefinition outTemp = LDefinition::BogusTemp();
    562  LDefinition valueTemp = LDefinition::BogusTemp();
    563  LDefinition offsetTemp = LDefinition::BogusTemp();
    564  LDefinition maskTemp = LDefinition::BogusTemp();
    565 
    566  if (ins->arrayType() == Scalar::Uint32) {
    567    MOZ_ASSERT(ins->type() == MIRType::Double);
    568    outTemp = temp();
    569  }
    570 
    571  if (Scalar::byteSize(ins->arrayType()) < 4) {
    572    valueTemp = temp();
    573    offsetTemp = temp();
    574    maskTemp = temp();
    575  }
    576 
    577  LAtomicExchangeTypedArrayElement* lir =
    578      new (alloc()) LAtomicExchangeTypedArrayElement(
    579          elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
    580 
    581  define(lir, ins);
    582 }
    583 
    584 void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
    585  MDefinition* base = ins->base();
    586  // See comment in visitWasmLoad re the type of 'base'.
    587  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    588  LAllocation memoryBase =
    589      ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
    590                           : LGeneralReg(HeapReg);
    591 
    592  if (ins->access().type() == Scalar::Int64) {
    593    auto* lir = new (alloc()) LWasmCompareExchangeI64(
    594        useRegister(base), useInt64Register(ins->oldValue()),
    595        useInt64Register(ins->newValue()), memoryBase);
    596    defineInt64(lir, ins);
    597    return;
    598  }
    599 
    600  LDefinition valueTemp = LDefinition::BogusTemp();
    601  LDefinition offsetTemp = LDefinition::BogusTemp();
    602  LDefinition maskTemp = LDefinition::BogusTemp();
    603 
    604  if (ins->access().byteSize() < 4) {
    605    valueTemp = temp();
    606    offsetTemp = temp();
    607    maskTemp = temp();
    608  }
    609 
    610  auto* lir = new (alloc())
    611      LWasmCompareExchangeHeap(useRegister(base), useRegister(ins->oldValue()),
    612                               useRegister(ins->newValue()), memoryBase,
    613                               valueTemp, offsetTemp, maskTemp);
    614 
    615  define(lir, ins);
    616 }
    617 
    618 void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
    619  MDefinition* base = ins->base();
    620  // See comment in visitWasmLoad re the type of 'base'.
    621  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    622  LAllocation memoryBase =
    623      ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
    624                           : LGeneralReg(HeapReg);
    625 
    626  if (ins->access().type() == Scalar::Int64) {
    627    auto* lir = new (alloc()) LWasmAtomicExchangeI64(
    628        useRegister(base), useInt64Register(ins->value()), memoryBase);
    629    defineInt64(lir, ins);
    630    return;
    631  }
    632 
    633  LDefinition valueTemp = LDefinition::BogusTemp();
    634  LDefinition offsetTemp = LDefinition::BogusTemp();
    635  LDefinition maskTemp = LDefinition::BogusTemp();
    636 
    637  if (ins->access().byteSize() < 4) {
    638    valueTemp = temp();
    639    offsetTemp = temp();
    640    maskTemp = temp();
    641  }
    642 
    643  auto* lir = new (alloc())
    644      LWasmAtomicExchangeHeap(useRegister(base), useRegister(ins->value()),
    645                              memoryBase, valueTemp, offsetTemp, maskTemp);
    646  define(lir, ins);
    647 }
    648 
    649 void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
    650  MDefinition* base = ins->base();
    651  // See comment in visitWasmLoad re the type of 'base'.
    652  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    653  LAllocation memoryBase =
    654      ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
    655                           : LGeneralReg(HeapReg);
    656 
    657  if (ins->access().type() == Scalar::Int64) {
    658    auto* lir = new (alloc())
    659        LWasmAtomicBinopI64(useRegister(base), useInt64Register(ins->value()),
    660                            memoryBase, tempInt64());
    661    defineInt64(lir, ins);
    662    return;
    663  }
    664 
    665  LDefinition valueTemp = LDefinition::BogusTemp();
    666  LDefinition offsetTemp = LDefinition::BogusTemp();
    667  LDefinition maskTemp = LDefinition::BogusTemp();
    668 
    669  if (ins->access().byteSize() < 4) {
    670    valueTemp = temp();
    671    offsetTemp = temp();
    672    maskTemp = temp();
    673  }
    674 
    675  if (!ins->hasUses()) {
    676    auto* lir = new (alloc()) LWasmAtomicBinopHeapForEffect(
    677        useRegister(base), useRegister(ins->value()), memoryBase, valueTemp,
    678        offsetTemp, maskTemp);
    679    add(lir, ins);
    680    return;
    681  }
    682 
    683  auto* lir = new (alloc())
    684      LWasmAtomicBinopHeap(useRegister(base), useRegister(ins->value()),
    685                           memoryBase, valueTemp, offsetTemp, maskTemp);
    686 
    687  define(lir, ins);
    688 }
    689 
    690 void LIRGenerator::visitAtomicTypedArrayElementBinop(
    691    MAtomicTypedArrayElementBinop* ins) {
    692  MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
    693  MOZ_ASSERT(!Scalar::isFloatingType(ins->arrayType()));
    694  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    695  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    696 
    697  const LUse elements = useRegister(ins->elements());
    698  const LAllocation index =
    699      useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    700 
    701  if (Scalar::isBigIntType(ins->arrayType())) {
    702    LInt64Allocation value = useInt64Register(ins->value());
    703    LInt64Definition temp = tempInt64();
    704 
    705    // Case 1: the result of the operation is not used.
    706 
    707    if (ins->isForEffect()) {
    708      auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
    709          elements, index, value, temp);
    710      add(lir, ins);
    711      return;
    712    }
    713 
    714    // Case 2: the result of the operation is used.
    715 
    716    auto* lir = new (alloc())
    717        LAtomicTypedArrayElementBinop64(elements, index, value, temp);
    718    defineInt64(lir, ins);
    719    return;
    720  }
    721 
    722  LAllocation value = useRegister(ins->value());
    723  LDefinition valueTemp = LDefinition::BogusTemp();
    724  LDefinition offsetTemp = LDefinition::BogusTemp();
    725  LDefinition maskTemp = LDefinition::BogusTemp();
    726 
    727  if (Scalar::byteSize(ins->arrayType()) < 4) {
    728    valueTemp = temp();
    729    offsetTemp = temp();
    730    maskTemp = temp();
    731  }
    732 
    733  if (ins->isForEffect()) {
    734    LAtomicTypedArrayElementBinopForEffect* lir =
    735        new (alloc()) LAtomicTypedArrayElementBinopForEffect(
    736            elements, index, value, valueTemp, offsetTemp, maskTemp);
    737    add(lir, ins);
    738    return;
    739  }
    740 
    741  // For a Uint32Array with a known double result we need a temp for
    742  // the intermediate output.
    743 
    744  LDefinition outTemp = LDefinition::BogusTemp();
    745 
    746  if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
    747    outTemp = temp();
    748  }
    749 
    750  LAtomicTypedArrayElementBinop* lir =
    751      new (alloc()) LAtomicTypedArrayElementBinop(
    752          elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
    753  define(lir, ins);
    754 }
    755 
    756 void LIRGenerator::visitCopySign(MCopySign* ins) {
    757  MDefinition* lhs = ins->lhs();
    758  MDefinition* rhs = ins->rhs();
    759 
    760  MOZ_ASSERT(IsFloatingPointType(lhs->type()));
    761  MOZ_ASSERT(lhs->type() == rhs->type());
    762  MOZ_ASSERT(lhs->type() == ins->type());
    763 
    764  LInstructionHelper<1, 2, 0>* lir;
    765  if (lhs->type() == MIRType::Double) {
    766    lir = new (alloc()) LCopySignD();
    767  } else {
    768    lir = new (alloc()) LCopySignF();
    769  }
    770 
    771  lowerForFPU(lir, ins, lhs, rhs);
    772 }
    773 
    774 void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
    775  defineInt64(
    776      new (alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input())), ins);
    777 }
    778 
    779 void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
    780  defineInt64(new (alloc())
    781                  LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
    782              ins);
    783 }
    784 
    785 // On mips we specialize the only cases where compare is {U,}Int32 and select
    786 // is {U,}Int32.
    787 bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
    788    MCompare::CompareType compTy, MIRType insTy) {
    789  return insTy == MIRType::Int32 && (compTy == MCompare::Compare_Int32 ||
    790                                     compTy == MCompare::Compare_UInt32);
    791 }
    792 
    793 void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
    794                                                   MDefinition* lhs,
    795                                                   MDefinition* rhs,
    796                                                   MCompare::CompareType compTy,
    797                                                   JSOp jsop) {
    798  MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
    799  auto* lir = new (alloc()) LWasmCompareAndSelect(
    800      useRegister(lhs), useRegister(rhs), useRegisterAtStart(ins->trueExpr()),
    801      useRegister(ins->falseExpr()), compTy, jsop);
    802  defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
    803 }
    804 
    805 void LIRGenerator::visitWasmTernarySimd128(MWasmTernarySimd128* ins) {
    806  MOZ_CRASH("ternary SIMD NYI");
    807 }
    808 
    809 void LIRGenerator::visitWasmBinarySimd128(MWasmBinarySimd128* ins) {
    810  MOZ_CRASH("binary SIMD NYI");
    811 }
    812 
    813 #ifdef ENABLE_WASM_SIMD
    814 bool MWasmTernarySimd128::specializeBitselectConstantMaskAsShuffle(
    815    int8_t shuffle[16]) {
    816  return false;
    817 }
    818 bool MWasmTernarySimd128::canRelaxBitselect() { return false; }
    819 
    820 bool MWasmBinarySimd128::canPmaddubsw() { return false; }
    821 #endif
    822 
    823 bool MWasmBinarySimd128::specializeForConstantRhs() {
    824  // Probably many we want to do here
    825  return false;
    826 }
    827 
    828 void LIRGenerator::visitWasmBinarySimd128WithConstant(
    829    MWasmBinarySimd128WithConstant* ins) {
    830  MOZ_CRASH("binary SIMD with constant NYI");
    831 }
    832 
    833 void LIRGenerator::visitWasmShiftSimd128(MWasmShiftSimd128* ins) {
    834  MOZ_CRASH("shift SIMD NYI");
    835 }
    836 
    837 void LIRGenerator::visitWasmShuffleSimd128(MWasmShuffleSimd128* ins) {
    838  MOZ_CRASH("shuffle SIMD NYI");
    839 }
    840 
    841 void LIRGenerator::visitWasmReplaceLaneSimd128(MWasmReplaceLaneSimd128* ins) {
    842  MOZ_CRASH("replace-lane SIMD NYI");
    843 }
    844 
    845 void LIRGenerator::visitWasmScalarToSimd128(MWasmScalarToSimd128* ins) {
    846  MOZ_CRASH("scalar-to-SIMD NYI");
    847 }
    848 
    849 void LIRGenerator::visitWasmUnarySimd128(MWasmUnarySimd128* ins) {
    850  MOZ_CRASH("unary SIMD NYI");
    851 }
    852 
    853 void LIRGenerator::visitWasmReduceSimd128(MWasmReduceSimd128* ins) {
    854  MOZ_CRASH("reduce-SIMD NYI");
    855 }
    856 
    857 void LIRGenerator::visitWasmLoadLaneSimd128(MWasmLoadLaneSimd128* ins) {
    858  MOZ_CRASH("load-lane SIMD NYI");
    859 }
    860 
    861 void LIRGenerator::visitWasmStoreLaneSimd128(MWasmStoreLaneSimd128* ins) {
    862  MOZ_CRASH("store-lane SIMD NYI");
    863 }