tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Lowering-x64.cpp (26333B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/x64/Lowering-x64.h"
      8 
      9 #include "mozilla/CheckedInt.h"
     10 #include "mozilla/MathAlgorithms.h"
     11 
     12 #include "jit/Lowering.h"
     13 #include "jit/MIR-wasm.h"
     14 #include "jit/MIR.h"
     15 #include "jit/x64/Assembler-x64.h"
     16 
     17 #include "jit/shared/Lowering-shared-inl.h"
     18 
     19 using namespace js;
     20 using namespace js::jit;
     21 
     22 LBoxAllocation LIRGeneratorX64::useBoxFixed(MDefinition* mir, Register reg1,
     23                                            Register, bool useAtStart) {
     24  MOZ_ASSERT(mir->type() == MIRType::Value);
     25 
     26  ensureDefined(mir);
     27  return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
     28 }
     29 
     30 LAllocation LIRGeneratorX64::useByteOpRegister(MDefinition* mir) {
     31  return useRegister(mir);
     32 }
     33 
     34 LAllocation LIRGeneratorX64::useByteOpRegisterAtStart(MDefinition* mir) {
     35  return useRegisterAtStart(mir);
     36 }
     37 
     38 LAllocation LIRGeneratorX64::useByteOpRegisterOrNonDoubleConstant(
     39    MDefinition* mir) {
     40  return useRegisterOrNonDoubleConstant(mir);
     41 }
     42 
     43 LDefinition LIRGeneratorX64::tempByteOpRegister() { return temp(); }
     44 
     45 LDefinition LIRGeneratorX64::tempToUnbox() { return temp(); }
     46 
     47 void LIRGeneratorX64::lowerForALUInt64(
     48    LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
     49    MDefinition* input) {
     50  ins->setInt64Operand(0, useInt64RegisterAtStart(input));
     51  defineInt64ReuseInput(ins, mir, 0);
     52 }
     53 
     54 void LIRGeneratorX64::lowerForALUInt64(
     55    LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
     56    MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
     57  ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
     58  ins->setInt64Operand(INT64_PIECES, willHaveDifferentLIRNodes(lhs, rhs)
     59                                         ? useInt64OrConstant(rhs)
     60                                         : useInt64OrConstantAtStart(rhs));
     61  defineInt64ReuseInput(ins, mir, 0);
     62 }
     63 
     64 void LIRGeneratorX64::lowerForMulInt64(LMulI64* ins, MMul* mir,
     65                                       MDefinition* lhs, MDefinition* rhs) {
     66  // No input reuse needed when we can use imulq with an int32 immediate.
     67  bool reuseInput = true;
     68  if (rhs->isConstant()) {
     69    int64_t constant = rhs->toConstant()->toInt64();
     70    reuseInput = int32_t(constant) != constant;
     71  }
     72 
     73  ins->setLhs(useInt64RegisterAtStart(lhs));
     74  ins->setRhs(willHaveDifferentLIRNodes(lhs, rhs)
     75                  ? useInt64OrConstant(rhs)
     76                  : useInt64OrConstantAtStart(rhs));
     77  if (reuseInput) {
     78    defineInt64ReuseInput(ins, mir, 0);
     79  } else {
     80    defineInt64(ins, mir);
     81  }
     82 }
     83 
     84 template <class LInstr>
     85 void LIRGeneratorX64::lowerForShiftInt64(LInstr* ins, MDefinition* mir,
     86                                         MDefinition* lhs, MDefinition* rhs) {
     87  if constexpr (std::is_same_v<LInstr, LShiftI64>) {
     88    LAllocation rhsAlloc;
     89    if (rhs->isConstant()) {
     90      rhsAlloc = useOrConstantAtStart(rhs);
     91    } else if (Assembler::HasBMI2()) {
     92      rhsAlloc = useRegisterAtStart(rhs);
     93    } else {
     94      rhsAlloc = useShiftRegister(rhs);
     95    }
     96 
     97    ins->setLhs(useInt64RegisterAtStart(lhs));
     98    ins->setRhs(rhsAlloc);
     99    if (rhs->isConstant() || !Assembler::HasBMI2()) {
    100      defineInt64ReuseInput(ins, mir, LShiftI64::LhsIndex);
    101    } else {
    102      defineInt64(ins, mir);
    103    }
    104  } else {
    105    LAllocation rhsAlloc;
    106    if (rhs->isConstant()) {
    107      rhsAlloc = useOrConstantAtStart(rhs);
    108    } else {
    109      rhsAlloc = useFixed(rhs, rcx);
    110    }
    111 
    112    ins->setInput(useInt64RegisterAtStart(lhs));
    113    ins->setCount(rhsAlloc);
    114    defineInt64ReuseInput(ins, mir, LRotateI64::InputIndex);
    115  }
    116 }
    117 
    118 template void LIRGeneratorX64::lowerForShiftInt64(LShiftI64* ins,
    119                                                  MDefinition* mir,
    120                                                  MDefinition* lhs,
    121                                                  MDefinition* rhs);
    122 template void LIRGeneratorX64::lowerForShiftInt64(LRotateI64* ins,
    123                                                  MDefinition* mir,
    124                                                  MDefinition* lhs,
    125                                                  MDefinition* rhs);
    126 
    127 void LIRGenerator::visitBox(MBox* box) {
    128  MDefinition* opd = box->getOperand(0);
    129 
    130  // If the operand is a constant, emit near its uses.
    131  if (opd->isConstant() && box->canEmitAtUses()) {
    132    emitAtUses(box);
    133    return;
    134  }
    135 
    136  if (opd->isConstant()) {
    137    define(new (alloc()) LValue(opd->toConstant()->toJSValue()), box,
    138           LDefinition(LDefinition::BOX));
    139  } else {
    140    LBox* ins = new (alloc()) LBox(useRegisterAtStart(opd), opd->type());
    141    define(ins, box, LDefinition(LDefinition::BOX));
    142  }
    143 }
    144 
    145 void LIRGenerator::visitUnbox(MUnbox* unbox) {
    146  MDefinition* box = unbox->getOperand(0);
    147  MOZ_ASSERT(box->type() == MIRType::Value);
    148 
    149  LInstructionHelper<1, BOX_PIECES, 0>* lir;
    150  if (IsFloatingPointType(unbox->type())) {
    151    MOZ_ASSERT(unbox->type() == MIRType::Double);
    152    lir = new (alloc()) LUnboxFloatingPoint(useBoxAtStart(box));
    153  } else {
    154    lir = new (alloc()) LUnbox(useAtStart(box));
    155  }
    156 
    157  if (unbox->fallible()) {
    158    assignSnapshot(lir, unbox->bailoutKind());
    159  }
    160 
    161  define(lir, unbox);
    162 }
    163 
    164 void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
    165  MOZ_ASSERT(opd->type() == MIRType::Value);
    166 
    167  LReturn* ins = new (alloc()) LReturn(isGenerator);
    168  ins->setOperand(0, useFixed(opd, JSReturnReg));
    169  add(ins);
    170 }
    171 
    172 void LIRGeneratorX64::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
    173                                           LBlock* block, size_t lirIndex) {
    174  lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
    175 }
    176 
    177 void LIRGeneratorX64::defineInt64Phi(MPhi* phi, size_t lirIndex) {
    178  defineTypedPhi(phi, lirIndex);
    179 }
    180 
    181 void LIRGeneratorX64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
    182                                         LBlock* block, size_t lirIndex) {
    183  lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
    184 }
    185 
    186 void LIRGenerator::visitCompareExchangeTypedArrayElement(
    187    MCompareExchangeTypedArrayElement* ins) {
    188  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    189  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    190 
    191  if (Scalar::isBigIntType(ins->arrayType())) {
    192    LUse elements = useRegister(ins->elements());
    193    LAllocation index =
    194        useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    195    LInt64Allocation oldval = useInt64Register(ins->oldval());
    196    LInt64Allocation newval = useInt64Register(ins->newval());
    197 
    198    auto* lir = new (alloc())
    199        LCompareExchangeTypedArrayElement64(elements, index, oldval, newval);
    200    defineInt64Fixed(lir, ins, LInt64Allocation(LAllocation(AnyRegister(rax))));
    201    return;
    202  }
    203 
    204  lowerCompareExchangeTypedArrayElement(ins,
    205                                        /* useI386ByteRegisters = */ false);
    206 }
    207 
    208 void LIRGenerator::visitAtomicExchangeTypedArrayElement(
    209    MAtomicExchangeTypedArrayElement* ins) {
    210  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    211  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    212 
    213  if (Scalar::isBigIntType(ins->arrayType())) {
    214    LUse elements = useRegister(ins->elements());
    215    LAllocation index =
    216        useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    217    LInt64Allocation value = useInt64Register(ins->value());
    218 
    219    auto* lir = new (alloc())
    220        LAtomicExchangeTypedArrayElement64(elements, index, value);
    221    defineInt64(lir, ins);
    222    return;
    223  }
    224 
    225  lowerAtomicExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ false);
    226 }
    227 
    228 void LIRGenerator::visitAtomicTypedArrayElementBinop(
    229    MAtomicTypedArrayElementBinop* ins) {
    230  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    231  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    232 
    233  if (Scalar::isBigIntType(ins->arrayType())) {
    234    LUse elements = useRegister(ins->elements());
    235    LAllocation index =
    236        useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    237 
    238    // Case 1: the result of the operation is not used.
    239 
    240    if (ins->isForEffect()) {
    241      LInt64Allocation value = useInt64Register(ins->value());
    242 
    243      auto* lir = new (alloc())
    244          LAtomicTypedArrayElementBinopForEffect64(elements, index, value);
    245      add(lir, ins);
    246      return;
    247    }
    248 
    249    // Case 2: the result of the operation is used.
    250    //
    251    // For ADD and SUB we'll use XADD.
    252    //
    253    // For AND/OR/XOR we need to use a CMPXCHG loop with rax as the output.
    254 
    255    bool bitOp = !(ins->operation() == AtomicOp::Add ||
    256                   ins->operation() == AtomicOp::Sub);
    257 
    258    LInt64Allocation value;
    259    LInt64Definition temp;
    260    if (bitOp) {
    261      value = useInt64Register(ins->value());
    262      temp = tempInt64();
    263    } else {
    264      value = useInt64RegisterAtStart(ins->value());
    265      temp = LInt64Definition::BogusTemp();
    266    }
    267 
    268    auto* lir = new (alloc())
    269        LAtomicTypedArrayElementBinop64(elements, index, value, temp);
    270    if (bitOp) {
    271      defineInt64Fixed(lir, ins,
    272                       LInt64Allocation(LAllocation(AnyRegister(rax))));
    273    } else {
    274      defineInt64ReuseInput(lir, ins, 2);
    275    }
    276    return;
    277  }
    278 
    279  lowerAtomicTypedArrayElementBinop(ins, /* useI386ByteRegisters = */ false);
    280 }
    281 
    282 void LIRGeneratorX64::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
    283  const LUse elements = useRegister(ins->elements());
    284  const LAllocation index =
    285      useRegisterOrIndexConstant(ins->index(), ins->storageType());
    286 
    287  auto* lir = new (alloc()) LAtomicLoad64(elements, index);
    288  defineInt64(lir, ins);
    289 }
    290 
    291 void LIRGeneratorX64::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
    292  LUse elements = useRegister(ins->elements());
    293  LAllocation index =
    294      useRegisterOrIndexConstant(ins->index(), ins->writeType());
    295  LInt64Allocation value = useInt64Register(ins->value());
    296 
    297  add(new (alloc()) LAtomicStore64(elements, index, value), ins);
    298 }
    299 
    300 void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
    301  MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
    302  LWasmUint32ToDouble* lir =
    303      new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
    304  define(lir, ins);
    305 }
    306 
    307 void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
    308  MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
    309  LWasmUint32ToFloat32* lir =
    310      new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
    311  define(lir, ins);
    312 }
    313 
    314 void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
    315  MDefinition* base = ins->base();
    316  // 'base' is a GPR but may be of either type.  If it is 32-bit it is
    317  // zero-extended and can act as 64-bit.
    318  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    319 
    320  LAllocation memoryBase =
    321      ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
    322                           : LGeneralReg(HeapReg);
    323 
    324  if (ins->type() != MIRType::Int64) {
    325    auto* lir =
    326        new (alloc()) LWasmLoad(useRegisterOrZeroAtStart(base), memoryBase);
    327    define(lir, ins);
    328    return;
    329  }
    330 
    331  auto* lir =
    332      new (alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base), memoryBase);
    333  defineInt64(lir, ins);
    334 }
    335 
    336 static bool CanUseInt32OrInt64Constant(MDefinition* value) {
    337  MOZ_ASSERT(IsIntType(value->type()));
    338  if (!value->isConstant()) {
    339    return false;
    340  }
    341  if (value->type() == MIRType::Int64) {
    342    // Immediate needs to fit into int32 for direct to memory move on x64.
    343    return mozilla::CheckedInt32(value->toConstant()->toInt64()).isValid();
    344  }
    345  MOZ_ASSERT(value->type() == MIRType::Int32);
    346  return true;
    347 }
    348 
    349 void LIRGenerator::visitWasmStore(MWasmStore* ins) {
    350  MDefinition* base = ins->base();
    351  // See comment in visitWasmLoad re the type of 'base'.
    352  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    353 
    354  MDefinition* value = ins->value();
    355  LAllocation valueAlloc;
    356  switch (ins->access().type()) {
    357    case Scalar::Int8:
    358    case Scalar::Uint8:
    359    case Scalar::Int16:
    360    case Scalar::Uint16:
    361    case Scalar::Int32:
    362    case Scalar::Uint32:
    363      valueAlloc = useRegisterOrConstantAtStart(value);
    364      break;
    365    case Scalar::Int64:
    366      if (CanUseInt32OrInt64Constant(value)) {
    367        valueAlloc = useOrConstantAtStart(value);
    368      } else {
    369        valueAlloc = useRegisterAtStart(value);
    370      }
    371      break;
    372    case Scalar::Float32:
    373    case Scalar::Float64:
    374      valueAlloc = useRegisterAtStart(value);
    375      break;
    376    case Scalar::Simd128:
    377 #ifdef ENABLE_WASM_SIMD
    378      valueAlloc = useRegisterAtStart(value);
    379      break;
    380 #else
    381      MOZ_CRASH("unexpected array type");
    382 #endif
    383    case Scalar::BigInt64:
    384    case Scalar::BigUint64:
    385    case Scalar::Uint8Clamped:
    386    case Scalar::Float16:
    387    case Scalar::MaxTypedArrayViewType:
    388      MOZ_CRASH("unexpected array type");
    389  }
    390 
    391  LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
    392  LAllocation memoryBaseAlloc =
    393      ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
    394                           : LGeneralReg(HeapReg);
    395  auto* lir = new (alloc()) LWasmStore(baseAlloc, valueAlloc, memoryBaseAlloc);
    396  add(lir, ins);
    397 }
    398 
    399 void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
    400  MDefinition* base = ins->base();
    401  // See comment in visitWasmLoad re the type of 'base'.
    402  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    403 
    404  // The output may not be used but will be clobbered regardless, so
    405  // pin the output to eax.
    406  //
    407  // The input values must both be in registers.
    408 
    409  const LAllocation oldval = useRegister(ins->oldValue());
    410  const LAllocation newval = useRegister(ins->newValue());
    411  const LAllocation memoryBase =
    412      ins->hasMemoryBase() ? LAllocation(useRegister(ins->memoryBase()))
    413                           : LGeneralReg(HeapReg);
    414 
    415  LWasmCompareExchangeHeap* lir = new (alloc())
    416      LWasmCompareExchangeHeap(useRegister(base), oldval, newval, memoryBase);
    417 
    418  defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
    419 }
    420 
    421 void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
    422  // See comment in visitWasmLoad re the type of 'base'.
    423  MOZ_ASSERT(ins->base()->type() == MIRType::Int32 ||
    424             ins->base()->type() == MIRType::Int64);
    425 
    426  const LAllocation base = useRegister(ins->base());
    427  const LAllocation value = useRegister(ins->value());
    428  const LAllocation memoryBase =
    429      ins->hasMemoryBase() ? LAllocation(useRegister(ins->memoryBase()))
    430                           : LGeneralReg(HeapReg);
    431 
    432  // The output may not be used but will be clobbered regardless,
    433  // so ignore the case where we're not using the value and just
    434  // use the output register as a temp.
    435 
    436  LWasmAtomicExchangeHeap* lir =
    437      new (alloc()) LWasmAtomicExchangeHeap(base, value, memoryBase);
    438  define(lir, ins);
    439 }
    440 
    441 void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
    442  MDefinition* base = ins->base();
    443  // See comment in visitWasmLoad re the type of 'base'.
    444  MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
    445 
    446  const LAllocation memoryBase =
    447      ins->hasMemoryBase() ? LAllocation(useRegister(ins->memoryBase()))
    448                           : LGeneralReg(HeapReg);
    449 
    450  // No support for 64-bit operations with constants at the masm level.
    451 
    452  bool canTakeConstant = ins->access().type() != Scalar::Int64;
    453 
    454  // Case 1: the result of the operation is not used.
    455  //
    456  // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
    457  // LOCK OR, or LOCK XOR.
    458 
    459  if (!ins->hasUses()) {
    460    LAllocation value = canTakeConstant ? useRegisterOrConstant(ins->value())
    461                                        : useRegister(ins->value());
    462    auto* lir = new (alloc())
    463        LWasmAtomicBinopHeapForEffect(useRegister(base), value, memoryBase);
    464    add(lir, ins);
    465    return;
    466  }
    467 
    468  // Case 2: the result of the operation is used.
    469  //
    470  // For ADD and SUB we'll use XADD with word and byte ops as
    471  // appropriate.  Any output register can be used and if value is a
    472  // register it's best if it's the same as output:
    473  //
    474  //    movl       value, output  ; if value != output
    475  //    lock xaddl output, mem
    476  //
    477  // For AND/OR/XOR we need to use a CMPXCHG loop, and the output is
    478  // always in rax:
    479  //
    480  //    movl          *mem, rax
    481  // L: mov           rax, temp
    482  //    andl          value, temp
    483  //    lock cmpxchg  temp, mem  ; reads rax also
    484  //    jnz           L
    485  //    ; result in rax
    486  //
    487  // Note the placement of L, cmpxchg will update rax with *mem if
    488  // *mem does not have the expected value, so reloading it at the
    489  // top of the loop would be redundant.
    490 
    491  bool bitOp =
    492      !(ins->operation() == AtomicOp::Add || ins->operation() == AtomicOp::Sub);
    493  bool reuseInput = false;
    494  LAllocation value;
    495 
    496  if (bitOp || ins->value()->isConstant()) {
    497    value = canTakeConstant ? useRegisterOrConstant(ins->value())
    498                            : useRegister(ins->value());
    499  } else {
    500    reuseInput = true;
    501    value = useRegisterAtStart(ins->value());
    502  }
    503 
    504  auto* lir = new (alloc())
    505      LWasmAtomicBinopHeap(useRegister(base), value, memoryBase,
    506                           bitOp ? temp() : LDefinition::BogusTemp());
    507 
    508  if (reuseInput) {
    509    defineReuseInput(lir, ins, LWasmAtomicBinopHeap::ValueIndex);
    510  } else if (bitOp) {
    511    defineFixed(lir, ins, LAllocation(AnyRegister(rax)));
    512  } else {
    513    define(lir, ins);
    514  }
    515 }
    516 
    517 void LIRGenerator::visitSubstr(MSubstr* ins) {
    518  LSubstr* lir = new (alloc())
    519      LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
    520              useRegister(ins->length()), temp(), temp(), tempByteOpRegister());
    521  define(lir, ins);
    522  assignSafepoint(lir, ins);
    523 }
    524 
    525 void LIRGeneratorX64::lowerDivI64(MDiv* div) {
    526  // Division instructions are slow. Division by constant denominators can be
    527  // rewritten to use other instructions.
    528  if (div->rhs()->isConstant()) {
    529    int64_t rhs = div->rhs()->toConstant()->toInt64();
    530 
    531    // Division by powers of two can be done by shifting, and division by
    532    // other numbers can be done by a reciprocal multiplication technique.
    533    if (mozilla::IsPowerOfTwo(mozilla::Abs(rhs))) {
    534      int32_t shift = mozilla::FloorLog2(mozilla::Abs(rhs));
    535      LAllocation lhs = useRegisterAtStart(div->lhs());
    536 
    537      // We have to round the result toward 0 when the remainder is non-zero.
    538      // This requires an extra register to round up/down when the left-hand
    539      // side is signed.
    540      LAllocation lhsCopy = div->canBeNegativeDividend()
    541                                ? useRegister(div->lhs())
    542                                : LAllocation();
    543 
    544      auto* lir = new (alloc()) LDivPowTwoI64(lhs, lhsCopy, shift, rhs < 0);
    545      defineReuseInput(lir, div, 0);
    546      return;
    547    }
    548 
    549    auto* lir = new (alloc())
    550        LDivConstantI64(useRegister(div->lhs()), tempFixed(rax), rhs);
    551    defineFixed(lir, div, LAllocation(AnyRegister(rdx)));
    552    return;
    553  }
    554 
    555  auto* lir = new (alloc()) LDivI64(useFixedAtStart(div->lhs(), rax),
    556                                    useRegister(div->rhs()), tempFixed(rdx));
    557  defineFixed(lir, div, LAllocation(AnyRegister(rax)));
    558 }
    559 
    560 void LIRGeneratorX64::lowerModI64(MMod* mod) {
    561  if (mod->rhs()->isConstant()) {
    562    int64_t rhs = mod->rhs()->toConstant()->toInt64();
    563 
    564    if (mozilla::IsPowerOfTwo(mozilla::Abs(rhs))) {
    565      int32_t shift = mozilla::FloorLog2(mozilla::Abs(rhs));
    566 
    567      auto* lir =
    568          new (alloc()) LModPowTwoI64(useRegisterAtStart(mod->lhs()), shift);
    569      defineReuseInput(lir, mod, 0);
    570      return;
    571    }
    572 
    573    auto* lir = new (alloc())
    574        LModConstantI64(useRegister(mod->lhs()), tempFixed(rdx), rhs);
    575    defineFixed(lir, mod, LAllocation(AnyRegister(rax)));
    576    return;
    577  }
    578 
    579  auto* lir = new (alloc()) LModI64(useFixedAtStart(mod->lhs(), rax),
    580                                    useRegister(mod->rhs()), tempFixed(rax));
    581  defineFixed(lir, mod, LAllocation(AnyRegister(rdx)));
    582 }
    583 
    584 void LIRGeneratorX64::lowerUDivI64(MDiv* div) {
    585  if (div->rhs()->isConstant()) {
    586    // NOTE: the result of toInt64 is coerced to uint64_t.
    587    uint64_t rhs = div->rhs()->toConstant()->toInt64();
    588 
    589    if (mozilla::IsPowerOfTwo(rhs)) {
    590      int32_t shift = mozilla::FloorLog2(rhs);
    591 
    592      auto* lir = new (alloc()) LDivPowTwoI64(useRegisterAtStart(div->lhs()),
    593                                              LAllocation(), shift, false);
    594      defineReuseInput(lir, div, 0);
    595      return;
    596    }
    597 
    598    auto* lir = new (alloc())
    599        LUDivConstantI64(useRegister(div->lhs()), tempFixed(rax), rhs);
    600    defineFixed(lir, div, LAllocation(AnyRegister(rdx)));
    601    return;
    602  }
    603 
    604  auto* lir = new (alloc()) LUDivI64(useFixedAtStart(div->lhs(), rax),
    605                                     useRegister(div->rhs()), tempFixed(rdx));
    606  defineFixed(lir, div, LAllocation(AnyRegister(rax)));
    607 }
    608 
    609 void LIRGeneratorX64::lowerUModI64(MMod* mod) {
    610  if (mod->rhs()->isConstant()) {
    611    // NOTE: the result of toInt64 is coerced to uint64_t.
    612    uint64_t rhs = mod->rhs()->toConstant()->toInt64();
    613 
    614    if (mozilla::IsPowerOfTwo(rhs)) {
    615      int32_t shift = mozilla::FloorLog2(rhs);
    616 
    617      auto* lir =
    618          new (alloc()) LModPowTwoI64(useRegisterAtStart(mod->lhs()), shift);
    619      defineReuseInput(lir, mod, 0);
    620      return;
    621    }
    622 
    623    auto* lir = new (alloc())
    624        LUModConstantI64(useRegister(mod->lhs()), tempFixed(rdx), rhs);
    625    defineFixed(lir, mod, LAllocation(AnyRegister(rax)));
    626    return;
    627  }
    628 
    629  auto* lir = new (alloc()) LUModI64(useFixedAtStart(mod->lhs(), rax),
    630                                     useRegister(mod->rhs()), tempFixed(rax));
    631  defineFixed(lir, mod, LAllocation(AnyRegister(rdx)));
    632 }
    633 
    634 void LIRGeneratorX64::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
    635  MOZ_CRASH("We don't use runtime div for this architecture");
    636 }
    637 
    638 void LIRGeneratorX64::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
    639  MOZ_CRASH("We don't use runtime mod for this architecture");
    640 }
    641 
    642 void LIRGeneratorX64::lowerBigIntPtrDiv(MBigIntPtrDiv* ins) {
    643  auto* lir = new (alloc())
    644      LBigIntPtrDiv(useRegister(ins->lhs()), useRegister(ins->rhs()),
    645                    tempFixed(rdx), LDefinition::BogusTemp());
    646  assignSnapshot(lir, ins->bailoutKind());
    647  defineFixed(lir, ins, LAllocation(AnyRegister(rax)));
    648 }
    649 
    650 void LIRGeneratorX64::lowerBigIntPtrMod(MBigIntPtrMod* ins) {
    651  auto* lir = new (alloc())
    652      LBigIntPtrMod(useRegister(ins->lhs()), useRegister(ins->rhs()),
    653                    tempFixed(rax), LDefinition::BogusTemp());
    654  if (ins->canBeDivideByZero()) {
    655    assignSnapshot(lir, ins->bailoutKind());
    656  }
    657  defineFixed(lir, ins, LAllocation(AnyRegister(rdx)));
    658 }
    659 
    660 void LIRGeneratorX64::lowerTruncateDToInt32(MTruncateToInt32* ins) {
    661  MDefinition* opd = ins->input();
    662  MOZ_ASSERT(opd->type() == MIRType::Double);
    663 
    664  define(new (alloc()) LTruncateDToInt32(useRegister(opd), tempShift()), ins);
    665 }
    666 
    667 void LIRGeneratorX64::lowerTruncateFToInt32(MTruncateToInt32* ins) {
    668  MDefinition* opd = ins->input();
    669  MOZ_ASSERT(opd->type() == MIRType::Float32);
    670 
    671  LDefinition maybeTemp = LDefinition::BogusTemp();
    672  define(new (alloc()) LTruncateFToInt32(useRegister(opd), maybeTemp), ins);
    673 }
    674 
    675 void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
    676  MDefinition* opd = ins->input();
    677  MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
    678 
    679  LDefinition maybeTemp =
    680      ins->isUnsigned() ? tempDouble() : LDefinition::BogusTemp();
    681  defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd), maybeTemp),
    682              ins);
    683 }
    684 
    685 void LIRGeneratorX64::lowerWasmBuiltinTruncateToInt64(
    686    MWasmBuiltinTruncateToInt64* ins) {
    687  MOZ_CRASH("We don't use it for this architecture");
    688 }
    689 
    690 void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
    691  MDefinition* opd = ins->input();
    692  MOZ_ASSERT(opd->type() == MIRType::Int64);
    693  MOZ_ASSERT(IsFloatingPointType(ins->type()));
    694 
    695  LDefinition maybeTemp = ins->isUnsigned() ? temp() : LDefinition::BogusTemp();
    696  define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd), maybeTemp),
    697         ins);
    698 }
    699 
    700 void LIRGeneratorX64::lowerWasmBuiltinTruncateToInt32(
    701    MWasmBuiltinTruncateToInt32* ins) {
    702  MDefinition* opd = ins->input();
    703  MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
    704 
    705  if (opd->type() == MIRType::Double) {
    706    define(new (alloc()) LWasmBuiltinTruncateDToInt32(
    707               useRegister(opd), LAllocation(), tempShift()),
    708           ins);
    709    return;
    710  }
    711 
    712  LDefinition maybeTemp = LDefinition::BogusTemp();
    713  define(new (alloc()) LWasmBuiltinTruncateFToInt32(useRegister(opd),
    714                                                    LAllocation(), maybeTemp),
    715         ins);
    716 }
    717 
    718 void LIRGeneratorX64::lowerBuiltinInt64ToFloatingPoint(
    719    MBuiltinInt64ToFloatingPoint* ins) {
    720  MOZ_CRASH("We don't use it for this architecture");
    721 }
    722 
    723 void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
    724  defineInt64(new (alloc()) LExtendInt32ToInt64(useAtStart(ins->input())), ins);
    725 }
    726 
    727 void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
    728  defineInt64(new (alloc())
    729                  LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
    730              ins);
    731 }
    732 
    733 // On x64 we specialize the cases: compare is {U,}Int{32,64}, and select is
    734 // {U,}Int{32,64}, independently.
    735 bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
    736    MCompare::CompareType compTy, MIRType insTy) {
    737  return (insTy == MIRType::Int32 || insTy == MIRType::Int64) &&
    738         (compTy == MCompare::Compare_Int32 ||
    739          compTy == MCompare::Compare_UInt32 ||
    740          compTy == MCompare::Compare_Int64 ||
    741          compTy == MCompare::Compare_UInt64);
    742 }
    743 
    744 void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
    745                                                   MDefinition* lhs,
    746                                                   MDefinition* rhs,
    747                                                   MCompare::CompareType compTy,
    748                                                   JSOp jsop) {
    749  MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
    750  auto* lir = new (alloc()) LWasmCompareAndSelect(
    751      useRegister(lhs), useAny(rhs), useRegisterAtStart(ins->trueExpr()),
    752      useAny(ins->falseExpr()), compTy, jsop);
    753  defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
    754 }