tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Lowering-x86.cpp (32750B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/x86/Lowering-x86.h"
      8 
      9 #include "jit/Lowering.h"
     10 #include "jit/MIR-wasm.h"
     11 #include "jit/MIR.h"
     12 #include "jit/x86/Assembler-x86.h"
     13 
     14 #include "jit/shared/Lowering-shared-inl.h"
     15 
     16 using namespace js;
     17 using namespace js::jit;
     18 
     19 LBoxAllocation LIRGeneratorX86::useBoxFixed(MDefinition* mir, Register reg1,
     20                                            Register reg2, bool useAtStart) {
     21  MOZ_ASSERT(mir->type() == MIRType::Value);
     22  MOZ_ASSERT(reg1 != reg2);
     23 
     24  ensureDefined(mir);
     25  return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart),
     26                        LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
     27 }
     28 
     29 LAllocation LIRGeneratorX86::useByteOpRegister(MDefinition* mir) {
     30  return useFixed(mir, eax);
     31 }
     32 
     33 LAllocation LIRGeneratorX86::useByteOpRegisterAtStart(MDefinition* mir) {
     34  return useFixedAtStart(mir, eax);
     35 }
     36 
     37 LAllocation LIRGeneratorX86::useByteOpRegisterOrNonDoubleConstant(
     38    MDefinition* mir) {
     39  return useFixed(mir, eax);
     40 }
     41 
     42 LDefinition LIRGeneratorX86::tempByteOpRegister() { return tempFixed(eax); }
     43 
     44 void LIRGenerator::visitBox(MBox* box) {
     45  MDefinition* inner = box->getOperand(0);
     46 
     47  // If the box wrapped a double, it needs a new register.
     48  if (IsFloatingPointType(inner->type())) {
     49    LDefinition spectreTemp =
     50        JitOptions.spectreValueMasking ? temp() : LDefinition::BogusTemp();
     51    defineBox(new (alloc()) LBoxFloatingPoint(useRegisterAtStart(inner),
     52                                              tempCopy(inner, 0), spectreTemp,
     53                                              inner->type()),
     54              box);
     55    return;
     56  }
     57 
     58  if (box->canEmitAtUses()) {
     59    emitAtUses(box);
     60    return;
     61  }
     62 
     63  if (inner->isConstant()) {
     64    defineBox(new (alloc()) LValue(inner->toConstant()->toJSValue()), box);
     65    return;
     66  }
     67 
     68  LBox* lir = new (alloc()) LBox(use(inner), inner->type());
     69 
     70  // Otherwise, we should not define a new register for the payload portion
     71  // of the output, so bypass defineBox().
     72  uint32_t vreg = getVirtualRegister();
     73 
     74  // Note that because we're using BogusTemp(), we do not change the type of
     75  // the definition. We also do not define the first output as "TYPE",
     76  // because it has no corresponding payload at (vreg + 1). Also note that
     77  // although we copy the input's original type for the payload half of the
     78  // definition, this is only for clarity. BogusTemp() definitions are
     79  // ignored.
     80  lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
     81  lir->setDef(1, LDefinition::BogusTemp());
     82  box->setVirtualRegister(vreg);
     83  addUnchecked(lir);
     84 }
     85 
     86 void LIRGenerator::visitUnbox(MUnbox* unbox) {
     87  MDefinition* inner = unbox->getOperand(0);
     88 
     89  // An unbox on x86 reads in a type tag (either in memory or a register) and
     90  // a payload. Unlike most instructions consuming a box, we ask for the type
     91  // second, so that the result can re-use the first input.
     92  MOZ_ASSERT(inner->type() == MIRType::Value);
     93 
     94  ensureDefined(inner);
     95 
     96  if (IsFloatingPointType(unbox->type())) {
     97    MOZ_ASSERT(unbox->type() == MIRType::Double);
     98    auto* lir = new (alloc()) LUnboxFloatingPoint(useBox(inner));
     99    if (unbox->fallible()) {
    100      assignSnapshot(lir, unbox->bailoutKind());
    101    }
    102    define(lir, unbox);
    103    return;
    104  }
    105 
    106  // Swap the order we use the box pieces so we can re-use the payload register.
    107  LUnbox* lir = new (alloc()) LUnbox;
    108  bool reusePayloadReg = !JitOptions.spectreValueMasking ||
    109                         unbox->type() == MIRType::Int32 ||
    110                         unbox->type() == MIRType::Boolean;
    111  if (reusePayloadReg) {
    112    lir->setOperand(0, usePayloadInRegisterAtStart(inner));
    113    lir->setOperand(1, useType(inner, LUse::ANY));
    114  } else {
    115    lir->setOperand(0, usePayload(inner, LUse::REGISTER));
    116    lir->setOperand(1, useType(inner, LUse::ANY));
    117  }
    118 
    119  if (unbox->fallible()) {
    120    assignSnapshot(lir, unbox->bailoutKind());
    121  }
    122 
    123  // Types and payloads form two separate intervals. If the type becomes dead
    124  // before the payload, it could be used as a Value without the type being
    125  // recoverable. Unbox's purpose is to eagerly kill the definition of a type
    126  // tag, so keeping both alive (for the purpose of gcmaps) is unappealing.
    127  // Instead, we create a new virtual register.
    128  if (reusePayloadReg) {
    129    defineReuseInput(lir, unbox, 0);
    130  } else {
    131    define(lir, unbox);
    132  }
    133 }
    134 
    135 void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
    136  MOZ_ASSERT(opd->type() == MIRType::Value);
    137 
    138  LReturn* ins = new (alloc()) LReturn(isGenerator);
    139  ins->setOperand(0, LUse(JSReturnReg_Type));
    140  ins->setOperand(1, LUse(JSReturnReg_Data));
    141  fillBoxUses(ins, 0, opd);
    142  add(ins);
    143 }
    144 
    145 void LIRGeneratorX86::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
    146                                           LBlock* block, size_t lirIndex) {
    147  MDefinition* operand = phi->getOperand(inputPosition);
    148  LPhi* type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
    149  LPhi* payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
    150  type->setOperand(
    151      inputPosition,
    152      LUse(operand->virtualRegister() + VREG_TYPE_OFFSET, LUse::ANY));
    153  payload->setOperand(inputPosition,
    154                      LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
    155 }
    156 
    157 void LIRGeneratorX86::defineInt64Phi(MPhi* phi, size_t lirIndex) {
    158  LPhi* low = current->getPhi(lirIndex + INT64LOW_INDEX);
    159  LPhi* high = current->getPhi(lirIndex + INT64HIGH_INDEX);
    160 
    161  uint32_t lowVreg = getVirtualRegister();
    162 
    163  phi->setVirtualRegister(lowVreg);
    164 
    165  uint32_t highVreg = getVirtualRegister();
    166  MOZ_ASSERT(lowVreg + INT64HIGH_INDEX == highVreg + INT64LOW_INDEX);
    167 
    168  low->setDef(0, LDefinition(lowVreg, LDefinition::INT32));
    169  high->setDef(0, LDefinition(highVreg, LDefinition::INT32));
    170  annotate(high);
    171  annotate(low);
    172 }
    173 
    174 void LIRGeneratorX86::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
    175                                         LBlock* block, size_t lirIndex) {
    176  MDefinition* operand = phi->getOperand(inputPosition);
    177  LPhi* low = block->getPhi(lirIndex + INT64LOW_INDEX);
    178  LPhi* high = block->getPhi(lirIndex + INT64HIGH_INDEX);
    179  low->setOperand(inputPosition,
    180                  LUse(operand->virtualRegister() + INT64LOW_INDEX, LUse::ANY));
    181  high->setOperand(
    182      inputPosition,
    183      LUse(operand->virtualRegister() + INT64HIGH_INDEX, LUse::ANY));
    184 }
    185 
    186 void LIRGeneratorX86::lowerForALUInt64(
    187    LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
    188    MDefinition* input) {
    189  ins->setInt64Operand(0, useInt64RegisterAtStart(input));
    190  defineInt64ReuseInput(ins, mir, 0);
    191 }
    192 
    193 void LIRGeneratorX86::lowerForALUInt64(
    194    LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
    195    MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
    196  ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
    197  ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
    198  defineInt64ReuseInput(ins, mir, 0);
    199 }
    200 
    201 void LIRGeneratorX86::lowerForMulInt64(LMulI64* ins, MMul* mir,
    202                                       MDefinition* lhs, MDefinition* rhs) {
    203  bool needsTemp = true;
    204 
    205  if (rhs->isConstant()) {
    206    int64_t constant = rhs->toConstant()->toInt64();
    207    int32_t shift = mozilla::FloorLog2(constant);
    208    // See special cases in CodeGeneratorX86Shared::visitMulI64.
    209    if (constant >= -1 && constant <= 2) {
    210      needsTemp = false;
    211    }
    212    if (constant > 0 && int64_t(1) << shift == constant) {
    213      needsTemp = false;
    214    }
    215  }
    216 
    217  // MulI64 on x86 needs output to be in edx, eax;
    218  ins->setLhs(useInt64Fixed(lhs, Register64(edx, eax), /*useAtStart = */ true));
    219  ins->setRhs(useInt64OrConstant(rhs));
    220  if (needsTemp) {
    221    ins->setTemp0(temp());
    222  }
    223 
    224  defineInt64Fixed(ins, mir,
    225                   LInt64Allocation(LAllocation(AnyRegister(edx)),
    226                                    LAllocation(AnyRegister(eax))));
    227 }
    228 
    229 template <class LInstr>
    230 void LIRGeneratorX86::lowerForShiftInt64(LInstr* ins, MDefinition* mir,
    231                                         MDefinition* lhs, MDefinition* rhs) {
    232  LAllocation rhsAlloc;
    233  if (rhs->isConstant()) {
    234    rhsAlloc = useOrConstantAtStart(rhs);
    235  } else {
    236    // The operands are int64, but we only care about the lower 32 bits of the
    237    // RHS. The code below will load that part in ecx and will discard the upper
    238    // half.
    239    rhsAlloc = useLowWordFixed(rhs, ecx);
    240  }
    241 
    242  if constexpr (std::is_same_v<LInstr, LShiftI64>) {
    243    ins->setLhs(useInt64RegisterAtStart(lhs));
    244    ins->setRhs(rhsAlloc);
    245    defineInt64ReuseInput(ins, mir, LShiftI64::LhsIndex);
    246  } else {
    247    ins->setInput(useInt64RegisterAtStart(lhs));
    248    ins->setCount(rhsAlloc);
    249    ins->setTemp0(temp());
    250    defineInt64ReuseInput(ins, mir, LRotateI64::InputIndex);
    251  }
    252 }
    253 
    254 template void LIRGeneratorX86::lowerForShiftInt64(LShiftI64* ins,
    255                                                  MDefinition* mir,
    256                                                  MDefinition* lhs,
    257                                                  MDefinition* rhs);
    258 template void LIRGeneratorX86::lowerForShiftInt64(LRotateI64* ins,
    259                                                  MDefinition* mir,
    260                                                  MDefinition* lhs,
    261                                                  MDefinition* rhs);
    262 
    263 void LIRGenerator::visitCompareExchangeTypedArrayElement(
    264    MCompareExchangeTypedArrayElement* ins) {
    265  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    266  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    267 
    268  if (Scalar::isBigIntType(ins->arrayType())) {
    269    LUse elements = useRegister(ins->elements());
    270    LAllocation index =
    271        useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    272    LInt64Allocation oldval =
    273        useInt64FixedAtStart(ins->oldval(), Register64(edx, eax));
    274    LInt64Allocation newval =
    275        useInt64Fixed(ins->newval(), Register64(ecx, ebx));
    276 
    277    auto* lir = new (alloc())
    278        LCompareExchangeTypedArrayElement64(elements, index, oldval, newval);
    279    defineInt64Fixed(lir, ins,
    280                     LInt64Allocation(LAllocation(AnyRegister(edx)),
    281                                      LAllocation(AnyRegister(eax))));
    282    return;
    283  }
    284 
    285  lowerCompareExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ true);
    286 }
    287 
    288 void LIRGenerator::visitAtomicExchangeTypedArrayElement(
    289    MAtomicExchangeTypedArrayElement* ins) {
    290  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    291  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    292 
    293  if (Scalar::isBigIntType(ins->arrayType())) {
    294    LUse elements = useRegister(ins->elements());
    295    LAllocation index =
    296        useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    297    LInt64Allocation value = useInt64Fixed(ins->value(), Register64(ecx, ebx));
    298 
    299    auto* lir = new (alloc())
    300        LAtomicExchangeTypedArrayElement64(elements, index, value);
    301    defineInt64Fixed(lir, ins,
    302                     LInt64Allocation(LAllocation(AnyRegister(edx)),
    303                                      LAllocation(AnyRegister(eax))));
    304    return;
    305  }
    306 
    307  lowerAtomicExchangeTypedArrayElement(ins, /*useI386ByteRegisters=*/true);
    308 }
    309 
    310 void LIRGenerator::visitAtomicTypedArrayElementBinop(
    311    MAtomicTypedArrayElementBinop* ins) {
    312  MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
    313  MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
    314 
    315  if (Scalar::isBigIntType(ins->arrayType())) {
    316    LUse elements = useRegister(ins->elements());
    317    LAllocation index =
    318        useRegisterOrIndexConstant(ins->index(), ins->arrayType());
    319    LInt64Allocation value = useInt64Fixed(ins->value(), Register64(ecx, ebx));
    320 
    321    // Case 1: the result of the operation is not used.
    322    if (ins->isForEffect()) {
    323      LInt64Definition temp = tempInt64Fixed(Register64(edx, eax));
    324 
    325      auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
    326          elements, index, value, temp);
    327      add(lir, ins);
    328      return;
    329    }
    330 
    331    // Case 2: the result of the operation is used.
    332 
    333    auto* lir =
    334        new (alloc()) LAtomicTypedArrayElementBinop64(elements, index, value);
    335    defineInt64Fixed(lir, ins,
    336                     LInt64Allocation(LAllocation(AnyRegister(edx)),
    337                                      LAllocation(AnyRegister(eax))));
    338    return;
    339  }
    340 
    341  lowerAtomicTypedArrayElementBinop(ins, /* useI386ByteRegisters = */ true);
    342 }
    343 
    344 void LIRGeneratorX86::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
    345  const LUse elements = useRegister(ins->elements());
    346  const LAllocation index =
    347      useRegisterOrIndexConstant(ins->index(), ins->storageType());
    348 
    349  auto* lir = new (alloc())
    350      LAtomicLoad64(elements, index, tempInt64Fixed(Register64(ecx, ebx)));
    351  defineInt64Fixed(lir, ins,
    352                   LInt64Allocation(LAllocation(AnyRegister(edx)),
    353                                    LAllocation(AnyRegister(eax))));
    354 }
    355 
    356 void LIRGeneratorX86::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
    357  LUse elements = useRegister(ins->elements());
    358  LAllocation index =
    359      useRegisterOrIndexConstant(ins->index(), ins->writeType());
    360  LInt64Allocation value = useInt64Fixed(ins->value(), Register64(ecx, ebx));
    361  LInt64Definition temp = tempInt64Fixed(Register64(edx, eax));
    362 
    363  add(new (alloc()) LAtomicStore64(elements, index, value, temp), ins);
    364 }
    365 
    366 void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
    367  MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
    368  LWasmUint32ToDouble* lir = new (alloc())
    369      LWasmUint32ToDouble(useRegisterAtStart(ins->input()), temp());
    370  define(lir, ins);
    371 }
    372 
    373 void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
    374  MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
    375  LWasmUint32ToFloat32* lir = new (alloc())
    376      LWasmUint32ToFloat32(useRegisterAtStart(ins->input()), temp());
    377  define(lir, ins);
    378 }
    379 
    380 // If the base is a constant, and it is zero or its offset is zero, then
    381 // code generation will fold the values into the access.  Allocate the
    382 // pointer to a register only if that can't happen.
    383 
    384 static bool OptimizableConstantAccess(MDefinition* base,
    385                                      const wasm::MemoryAccessDesc& access) {
    386  MOZ_ASSERT(base->isConstant());
    387  MOZ_ASSERT(base->type() == MIRType::Int32);
    388 
    389  if (!(base->toConstant()->isInt32(0) || access.offset32() == 0)) {
    390    return false;
    391  }
    392  if (access.type() == Scalar::Int64) {
    393    // For int64 accesses on 32-bit systems we will need to add another offset
    394    // of 4 to access the high part of the value; make sure this does not
    395    // overflow the value.
    396    int32_t v;
    397    if (base->toConstant()->isInt32(0)) {
    398      v = access.offset32();
    399    } else {
    400      v = base->toConstant()->toInt32();
    401    }
    402    return v <= int32_t(INT32_MAX - INT64HIGH_OFFSET);
    403  }
    404  return true;
    405 }
    406 
    407 void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
    408  MDefinition* base = ins->base();
    409  MOZ_ASSERT(base->type() == MIRType::Int32);
    410 
    411  MDefinition* memoryBase = ins->memoryBase();
    412  MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
    413 
    414  if (ins->access().type() == Scalar::Int64 && ins->access().isAtomic()) {
    415    auto* lir = new (alloc())
    416        LWasmAtomicLoadI64(useRegister(base), useRegister(memoryBase),
    417                           tempInt64Fixed(Register64(ecx, ebx)));
    418    defineInt64Fixed(lir, ins,
    419                     LInt64Allocation(LAllocation(AnyRegister(edx)),
    420                                      LAllocation(AnyRegister(eax))));
    421    return;
    422  }
    423 
    424  LAllocation baseAlloc;
    425  if (!base->isConstant() || !OptimizableConstantAccess(base, ins->access())) {
    426    baseAlloc = ins->type() == MIRType::Int64 ? useRegister(base)
    427                                              : useRegisterAtStart(base);
    428  }
    429 
    430  if (ins->type() != MIRType::Int64) {
    431    auto* lir =
    432        new (alloc()) LWasmLoad(baseAlloc, useRegisterAtStart(memoryBase));
    433    define(lir, ins);
    434    return;
    435  }
    436 
    437  // "AtStart" register usage does not work for the 64-bit case because we
    438  // clobber two registers for the result and may need two registers for a
    439  // scaled address; we can't guarantee non-interference.
    440 
    441  auto* lir = new (alloc()) LWasmLoadI64(baseAlloc, useRegister(memoryBase));
    442 
    443  Scalar::Type accessType = ins->access().type();
    444  if (accessType == Scalar::Int8 || accessType == Scalar::Int16 ||
    445      accessType == Scalar::Int32) {
    446    // We use cdq to sign-extend the result and cdq demands these registers.
    447    defineInt64Fixed(lir, ins,
    448                     LInt64Allocation(LAllocation(AnyRegister(edx)),
    449                                      LAllocation(AnyRegister(eax))));
    450    return;
    451  }
    452 
    453  defineInt64(lir, ins);
    454 }
    455 
    456 void LIRGenerator::visitWasmStore(MWasmStore* ins) {
    457  MDefinition* base = ins->base();
    458  MOZ_ASSERT(base->type() == MIRType::Int32);
    459 
    460  MDefinition* memoryBase = ins->memoryBase();
    461  MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
    462 
    463  if (ins->access().type() == Scalar::Int64 && ins->access().isAtomic()) {
    464    auto* lir = new (alloc()) LWasmAtomicStoreI64(
    465        useRegister(base), useInt64Fixed(ins->value(), Register64(ecx, ebx)),
    466        useRegister(memoryBase), tempInt64Fixed(Register64(edx, eax)));
    467    add(lir, ins);
    468    return;
    469  }
    470 
    471  LAllocation baseAlloc;
    472  if (!base->isConstant() || !OptimizableConstantAccess(base, ins->access())) {
    473    baseAlloc = useRegisterAtStart(base);
    474  }
    475 
    476  LAllocation valueAlloc;
    477  switch (ins->access().type()) {
    478    case Scalar::Int8:
    479    case Scalar::Uint8:
    480      // See comment for LIRGeneratorX86::useByteOpRegister.
    481      if (ins->value()->type() != MIRType::Int64) {
    482        valueAlloc = useFixed(ins->value(), eax);
    483      } else {
    484        valueAlloc = useLowWordFixed(ins->value(), eax);
    485      }
    486      break;
    487    case Scalar::Int16:
    488    case Scalar::Uint16:
    489    case Scalar::Int32:
    490    case Scalar::Uint32:
    491    case Scalar::Float32:
    492    case Scalar::Float64:
    493      // For now, don't allow constant values. The immediate operand affects
    494      // instruction layout which affects patching.
    495      if (ins->value()->type() != MIRType::Int64) {
    496        valueAlloc = useRegisterAtStart(ins->value());
    497      } else {
    498        valueAlloc = useLowWordRegisterAtStart(ins->value());
    499      }
    500      break;
    501    case Scalar::Simd128:
    502 #ifdef ENABLE_WASM_SIMD
    503      valueAlloc = useRegisterAtStart(ins->value());
    504      break;
    505 #else
    506      MOZ_CRASH("unexpected array type");
    507 #endif
    508    case Scalar::Int64: {
    509      LInt64Allocation valueAlloc = useInt64RegisterAtStart(ins->value());
    510      auto* lir = new (alloc())
    511          LWasmStoreI64(baseAlloc, valueAlloc, useRegisterAtStart(memoryBase));
    512      add(lir, ins);
    513      return;
    514    }
    515    case Scalar::Uint8Clamped:
    516    case Scalar::BigInt64:
    517    case Scalar::BigUint64:
    518    case Scalar::Float16:
    519    case Scalar::MaxTypedArrayViewType:
    520      MOZ_CRASH("unexpected array type");
    521  }
    522 
    523  auto* lir = new (alloc())
    524      LWasmStore(baseAlloc, valueAlloc, useRegisterAtStart(memoryBase));
    525  add(lir, ins);
    526 }
    527 
    528 void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
    529  MDefinition* base = ins->base();
    530  MOZ_ASSERT(base->type() == MIRType::Int32);
    531 
    532  MDefinition* memoryBase = ins->memoryBase();
    533  MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
    534 
    535  if (ins->access().type() == Scalar::Int64) {
    536    auto* lir = new (alloc()) LWasmCompareExchangeI64(
    537        useRegisterAtStart(base),
    538        useInt64FixedAtStart(ins->oldValue(), Register64(edx, eax)),
    539        useInt64FixedAtStart(ins->newValue(), Register64(ecx, ebx)),
    540        useRegisterAtStart(memoryBase));
    541    defineInt64Fixed(lir, ins,
    542                     LInt64Allocation(LAllocation(AnyRegister(edx)),
    543                                      LAllocation(AnyRegister(eax))));
    544    return;
    545  }
    546 
    547  MOZ_ASSERT(ins->access().type() < Scalar::Float32);
    548 
    549  bool byteArray = byteSize(ins->access().type()) == 1;
    550 
    551  // Register allocation:
    552  //
    553  // The output may not be used, but eax will be clobbered regardless
    554  // so pin the output to eax.
    555  //
    556  // oldval must be in a register.
    557  //
    558  // newval must be in a register.  If the source is a byte array
    559  // then newval must be a register that has a byte size: this must
    560  // be ebx, ecx, or edx (eax is taken).
    561  //
    562  // Bug #1077036 describes some optimization opportunities.
    563 
    564  const LAllocation oldval = useRegister(ins->oldValue());
    565  const LAllocation newval =
    566      byteArray ? useFixed(ins->newValue(), ebx) : useRegister(ins->newValue());
    567 
    568  auto* lir = new (alloc()) LWasmCompareExchangeHeap(
    569      useRegister(base), oldval, newval, useRegister(memoryBase), temp());
    570  defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
    571 }
    572 
    573 void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
    574  MDefinition* memoryBase = ins->memoryBase();
    575  MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
    576 
    577  if (ins->access().type() == Scalar::Int64) {
    578    MDefinition* base = ins->base();
    579    auto* lir = new (alloc()) LWasmAtomicExchangeI64(
    580        useRegister(base), useInt64Fixed(ins->value(), Register64(ecx, ebx)),
    581        useRegister(memoryBase), ins->access());
    582    defineInt64Fixed(lir, ins,
    583                     LInt64Allocation(LAllocation(AnyRegister(edx)),
    584                                      LAllocation(AnyRegister(eax))));
    585    return;
    586  }
    587 
    588  const LAllocation base = useRegister(ins->base());
    589  const LAllocation value = useRegister(ins->value());
    590 
    591  auto* lir = new (alloc())
    592      LWasmAtomicExchangeHeap(base, value, useRegister(memoryBase), temp());
    593 
    594  if (byteSize(ins->access().type()) == 1) {
    595    defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
    596  } else {
    597    define(lir, ins);
    598  }
    599 }
    600 
    601 void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
    602  MDefinition* base = ins->base();
    603  MOZ_ASSERT(base->type() == MIRType::Int32);
    604 
    605  MDefinition* memoryBase = ins->memoryBase();
    606  MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
    607 
    608  if (ins->access().type() == Scalar::Int64) {
    609    auto* lir = new (alloc()) LWasmAtomicBinopI64(
    610        useRegister(base), useInt64Fixed(ins->value(), Register64(ecx, ebx)),
    611        useRegister(memoryBase), ins->access(), ins->operation());
    612    defineInt64Fixed(lir, ins,
    613                     LInt64Allocation(LAllocation(AnyRegister(edx)),
    614                                      LAllocation(AnyRegister(eax))));
    615    return;
    616  }
    617 
    618  MOZ_ASSERT(ins->access().type() < Scalar::Float32);
    619 
    620  bool byteArray = byteSize(ins->access().type()) == 1;
    621 
    622  // Case 1: the result of the operation is not used.
    623  //
    624  // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
    625  // LOCK OR, or LOCK XOR.  These can all take an immediate.
    626 
    627  if (!ins->hasUses()) {
    628    LAllocation value;
    629    if (byteArray && !ins->value()->isConstant()) {
    630      value = useFixed(ins->value(), ebx);
    631    } else {
    632      value = useRegisterOrConstant(ins->value());
    633    }
    634    auto* lir = new (alloc()) LWasmAtomicBinopHeapForEffect(
    635        useRegister(base), value, useRegister(memoryBase), temp());
    636    add(lir, ins);
    637    return;
    638  }
    639 
    640  // Case 2: the result of the operation is used.
    641  //
    642  // For ADD and SUB we'll use XADD:
    643  //
    644  //    movl       value, output
    645  //    lock xaddl output, mem
    646  //
    647  // For the 8-bit variants XADD needs a byte register for the
    648  // output only, we can still set up with movl; just pin the output
    649  // to eax (or ebx / ecx / edx).
    650  //
    651  // For AND/OR/XOR we need to use a CMPXCHG loop:
    652  //
    653  //    movl          *mem, eax
    654  // L: mov           eax, temp
    655  //    andl          value, temp
    656  //    lock cmpxchg  temp, mem  ; reads eax also
    657  //    jnz           L
    658  //    ; result in eax
    659  //
    660  // Note the placement of L, cmpxchg will update eax with *mem if
    661  // *mem does not have the expected value, so reloading it at the
    662  // top of the loop would be redundant.
    663  //
    664  // We want to fix eax as the output.  We also need a temp for
    665  // the intermediate value.
    666  //
    667  // For the 8-bit variants the temp must have a byte register.
    668  //
    669  // There are optimization opportunities:
    670  //  - better 8-bit register allocation and instruction selection, Bug
    671  //  #1077036.
    672 
    673  bool bitOp =
    674      !(ins->operation() == AtomicOp::Add || ins->operation() == AtomicOp::Sub);
    675  LDefinition tempDef = LDefinition::BogusTemp();
    676  LAllocation value;
    677 
    678  if (byteArray) {
    679    value = useFixed(ins->value(), ebx);
    680    if (bitOp) {
    681      tempDef = tempFixed(ecx);
    682    }
    683  } else if (bitOp || ins->value()->isConstant()) {
    684    value = useRegisterOrConstant(ins->value());
    685    if (bitOp) {
    686      tempDef = temp();
    687    }
    688  } else {
    689    value = useRegisterAtStart(ins->value());
    690  }
    691 
    692  auto* lir = new (alloc()) LWasmAtomicBinopHeap(
    693      useRegister(base), value, useRegister(memoryBase), tempDef, temp());
    694  if (byteArray || bitOp) {
    695    defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
    696  } else if (ins->value()->isConstant()) {
    697    define(lir, ins);
    698  } else {
    699    defineReuseInput(lir, ins, LWasmAtomicBinopHeap::ValueIndex);
    700  }
    701 }
    702 
    703 void LIRGeneratorX86::lowerDivI64(MDiv* div) {
    704  MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
    705 }
    706 
    707 void LIRGeneratorX86::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
    708  MOZ_ASSERT(div->lhs()->type() == div->rhs()->type());
    709  MOZ_ASSERT(IsNumberType(div->type()));
    710 
    711  MOZ_ASSERT(div->type() == MIRType::Int64);
    712 
    713  if (div->isUnsigned()) {
    714    LUDivOrModI64* lir = new (alloc())
    715        LUDivOrModI64(useInt64FixedAtStart(div->lhs(), Register64(eax, ebx)),
    716                      useInt64FixedAtStart(div->rhs(), Register64(ecx, edx)),
    717                      useFixedAtStart(div->instance(), InstanceReg));
    718    defineReturn(lir, div);
    719    return;
    720  }
    721 
    722  LDivOrModI64* lir = new (alloc())
    723      LDivOrModI64(useInt64FixedAtStart(div->lhs(), Register64(eax, ebx)),
    724                   useInt64FixedAtStart(div->rhs(), Register64(ecx, edx)),
    725                   useFixedAtStart(div->instance(), InstanceReg));
    726  defineReturn(lir, div);
    727 }
    728 
    729 void LIRGeneratorX86::lowerModI64(MMod* mod) {
    730  MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
    731 }
    732 
    733 void LIRGeneratorX86::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
    734  MDefinition* lhs = mod->lhs();
    735  MDefinition* rhs = mod->rhs();
    736  MOZ_ASSERT(lhs->type() == rhs->type());
    737  MOZ_ASSERT(IsNumberType(mod->type()));
    738 
    739  MOZ_ASSERT(mod->type() == MIRType::Int64);
    740  MOZ_ASSERT(mod->type() == MIRType::Int64);
    741 
    742  if (mod->isUnsigned()) {
    743    LUDivOrModI64* lir = new (alloc())
    744        LUDivOrModI64(useInt64FixedAtStart(lhs, Register64(eax, ebx)),
    745                      useInt64FixedAtStart(rhs, Register64(ecx, edx)),
    746                      useFixedAtStart(mod->instance(), InstanceReg));
    747    defineReturn(lir, mod);
    748    return;
    749  }
    750 
    751  LDivOrModI64* lir = new (alloc())
    752      LDivOrModI64(useInt64FixedAtStart(lhs, Register64(eax, ebx)),
    753                   useInt64FixedAtStart(rhs, Register64(ecx, edx)),
    754                   useFixedAtStart(mod->instance(), InstanceReg));
    755  defineReturn(lir, mod);
    756 }
    757 
    758 void LIRGeneratorX86::lowerUDivI64(MDiv* div) {
    759  MOZ_CRASH("We use MWasmBuiltinDivI64 instead.");
    760 }
    761 
    762 void LIRGeneratorX86::lowerUModI64(MMod* mod) {
    763  MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
    764 }
    765 
    766 void LIRGeneratorX86::lowerBigIntPtrDiv(MBigIntPtrDiv* ins) {
    767  auto* lir = new (alloc())
    768      LBigIntPtrDiv(useRegister(ins->lhs()), useRegister(ins->rhs()),
    769                    tempFixed(edx), LDefinition::BogusTemp());
    770  assignSnapshot(lir, ins->bailoutKind());
    771  defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
    772 }
    773 
    774 void LIRGeneratorX86::lowerBigIntPtrMod(MBigIntPtrMod* ins) {
    775  auto* lir = new (alloc())
    776      LBigIntPtrMod(useRegister(ins->lhs()), useRegister(ins->rhs()),
    777                    tempFixed(eax), LDefinition::BogusTemp());
    778  if (ins->canBeDivideByZero()) {
    779    assignSnapshot(lir, ins->bailoutKind());
    780  }
    781  defineFixed(lir, ins, LAllocation(AnyRegister(edx)));
    782 }
    783 
    784 void LIRGeneratorX86::lowerTruncateDToInt32(MTruncateToInt32* ins) {
    785  MDefinition* opd = ins->input();
    786  MOZ_ASSERT(opd->type() == MIRType::Double);
    787 
    788  LDefinition maybeTemp =
    789      Assembler::HasSSE3() ? LDefinition::BogusTemp() : tempDouble();
    790  define(new (alloc()) LTruncateDToInt32(useRegister(opd), maybeTemp), ins);
    791 }
    792 
    793 void LIRGeneratorX86::lowerTruncateFToInt32(MTruncateToInt32* ins) {
    794  MDefinition* opd = ins->input();
    795  MOZ_ASSERT(opd->type() == MIRType::Float32);
    796 
    797  LDefinition maybeTemp =
    798      Assembler::HasSSE3() ? LDefinition::BogusTemp() : tempFloat32();
    799  define(new (alloc()) LTruncateFToInt32(useRegister(opd), maybeTemp), ins);
    800 }
    801 
    802 void LIRGenerator::visitSubstr(MSubstr* ins) {
    803  // Due to lack of registers on x86, we reuse the string register as
    804  // temporary. As a result we only need two temporary registers and take a
    805  // bogus temporary as fifth argument.
    806  LSubstr* lir = new (alloc())
    807      LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
    808              useRegister(ins->length()), temp(), LDefinition::BogusTemp(),
    809              tempByteOpRegister());
    810  define(lir, ins);
    811  assignSafepoint(lir, ins);
    812 }
    813 
    814 void LIRGeneratorX86::lowerWasmBuiltinTruncateToInt32(
    815    MWasmBuiltinTruncateToInt32* ins) {
    816  MDefinition* opd = ins->input();
    817  MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
    818 
    819  LDefinition maybeTemp =
    820      Assembler::HasSSE3() ? LDefinition::BogusTemp() : tempDouble();
    821  if (opd->type() == MIRType::Double) {
    822    define(new (alloc()) LWasmBuiltinTruncateDToInt32(
    823               useRegister(opd), useFixed(ins->instance(), InstanceReg),
    824               maybeTemp),
    825           ins);
    826    return;
    827  }
    828 
    829  define(
    830      new (alloc()) LWasmBuiltinTruncateFToInt32(
    831          useRegister(opd), useFixed(ins->instance(), InstanceReg), maybeTemp),
    832      ins);
    833 }
    834 
    835 void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
    836  MDefinition* opd = ins->input();
    837  MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
    838 
    839  LDefinition temp = tempDouble();
    840  defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd), temp), ins);
    841 }
    842 
    843 void LIRGeneratorX86::lowerWasmBuiltinTruncateToInt64(
    844    MWasmBuiltinTruncateToInt64* ins) {
    845  MOZ_CRASH("We don't use it for this architecture");
    846 }
    847 
    848 void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
    849  MDefinition* opd = ins->input();
    850  MOZ_ASSERT(opd->type() == MIRType::Int64);
    851  MOZ_ASSERT(IsFloatingPointType(ins->type()));
    852 
    853  LDefinition maybeTemp = ins->isUnsigned() && ins->type() == MIRType::Float32
    854                              ? temp()
    855                              : LDefinition::BogusTemp();
    856 
    857  define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd), maybeTemp),
    858         ins);
    859 }
    860 
    861 void LIRGeneratorX86::lowerBuiltinInt64ToFloatingPoint(
    862    MBuiltinInt64ToFloatingPoint* ins) {
    863  MOZ_CRASH("We don't use it for this architecture");
    864 }
    865 
    866 void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
    867  if (ins->isUnsigned()) {
    868    defineInt64(new (alloc())
    869                    LExtendInt32ToInt64(useRegisterAtStart(ins->input())),
    870                ins);
    871  } else {
    872    LExtendInt32ToInt64* lir =
    873        new (alloc()) LExtendInt32ToInt64(useFixedAtStart(ins->input(), eax));
    874    defineInt64Fixed(lir, ins,
    875                     LInt64Allocation(LAllocation(AnyRegister(edx)),
    876                                      LAllocation(AnyRegister(eax))));
    877  }
    878 }
    879 
    880 void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
    881  // Here we'll end up using cdq which requires input and output in (edx,eax).
    882  LSignExtendInt64* lir = new (alloc()) LSignExtendInt64(
    883      useInt64FixedAtStart(ins->input(), Register64(edx, eax)));
    884  defineInt64Fixed(lir, ins,
    885                   LInt64Allocation(LAllocation(AnyRegister(edx)),
    886                                    LAllocation(AnyRegister(eax))));
    887 }
    888 
    889 // On x86 we specialize the only cases where compare is {U,}Int32 and select
    890 // is {U,}Int32.
    891 bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
    892    MCompare::CompareType compTy, MIRType insTy) {
    893  return insTy == MIRType::Int32 && (compTy == MCompare::Compare_Int32 ||
    894                                     compTy == MCompare::Compare_UInt32);
    895 }
    896 
    897 void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
    898                                                   MDefinition* lhs,
    899                                                   MDefinition* rhs,
    900                                                   MCompare::CompareType compTy,
    901                                                   JSOp jsop) {
    902  MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
    903  auto* lir = new (alloc()) LWasmCompareAndSelect(
    904      useRegister(lhs), useAny(rhs), useRegisterAtStart(ins->trueExpr()),
    905      useAny(ins->falseExpr()), compTy, jsop);
    906  defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
    907 }