tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

CodeGenerator-x86.cpp (41496B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/x86/CodeGenerator-x86.h"
      8 
      9 #include "mozilla/DebugOnly.h"
     10 
     11 #include "jsnum.h"
     12 
     13 #include "jit/CodeGenerator.h"
     14 #include "jit/MIR-wasm.h"
     15 #include "jit/MIR.h"
     16 #include "jit/MIRGraph.h"
     17 #include "js/Conversions.h"
     18 #include "vm/Shape.h"
     19 #include "wasm/WasmBuiltins.h"
     20 #include "wasm/WasmCodegenTypes.h"
     21 #include "wasm/WasmInstanceData.h"
     22 
     23 #include "jit/MacroAssembler-inl.h"
     24 #include "jit/shared/CodeGenerator-shared-inl.h"
     25 #include "vm/JSScript-inl.h"
     26 
     27 using namespace js;
     28 using namespace js::jit;
     29 
     30 using JS::GenericNaN;
     31 using mozilla::DebugOnly;
     32 using mozilla::FloatingPoint;
     33 
     34 CodeGeneratorX86::CodeGeneratorX86(MIRGenerator* gen, LIRGraph* graph,
     35                                   MacroAssembler* masm,
     36                                   const wasm::CodeMetadata* wasmCodeMeta)
     37    : CodeGeneratorX86Shared(gen, graph, masm, wasmCodeMeta) {}
     38 
     39 void CodeGenerator::visitBox(LBox* box) {
     40  const LDefinition* type = box->getDef(TYPE_INDEX);
     41 
     42  MOZ_ASSERT(!box->payload()->isConstant());
     43 
     44  // On x86, the input operand and the output payload have the same
     45  // virtual register. All that needs to be written is the type tag for
     46  // the type definition.
     47  masm.mov(ImmWord(MIRTypeToTag(box->type())), ToRegister(type));
     48 }
     49 
     50 void CodeGenerator::visitBoxFloatingPoint(LBoxFloatingPoint* box) {
     51  const AnyRegister in = ToAnyRegister(box->input());
     52  const ValueOperand out = ToOutValue(box);
     53 
     54  masm.moveValue(TypedOrValueRegister(box->type(), in), out);
     55 
     56  if (JitOptions.spectreValueMasking) {
     57    Register scratch = ToRegister(box->temp1());
     58    masm.move32(Imm32(JSVAL_TAG_CLEAR), scratch);
     59    masm.cmp32Move32(Assembler::Below, scratch, out.typeReg(), scratch,
     60                     out.typeReg());
     61  }
     62 }
     63 
     64 void CodeGenerator::visitUnbox(LUnbox* unbox) {
     65  // Note that for unbox, the type and payload indexes are switched on the
     66  // inputs.
     67  Operand type = ToOperand(unbox->type());
     68  Operand payload = ToOperand(unbox->payload());
     69  Register output = ToRegister(unbox->output());
     70  MUnbox* mir = unbox->mir();
     71 
     72  JSValueTag tag = MIRTypeToTag(mir->type());
     73  if (mir->fallible()) {
     74    masm.cmp32(type, Imm32(tag));
     75    bailoutIf(Assembler::NotEqual, unbox->snapshot());
     76  } else {
     77 #ifdef DEBUG
     78    Label ok;
     79    masm.branch32(Assembler::Equal, type, Imm32(tag), &ok);
     80    masm.assumeUnreachable("Infallible unbox type mismatch");
     81    masm.bind(&ok);
     82 #endif
     83  }
     84 
     85  // Note: If spectreValueMasking is disabled, then this instruction will
     86  // default to a no-op as long as the lowering allocate the same register for
     87  // the output and the payload.
     88  masm.unboxNonDouble(type, payload, output, ValueTypeFromMIRType(mir->type()));
     89 }
     90 
     91 void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
     92  Register elements = ToRegister(lir->elements());
     93 
     94  MOZ_ASSERT(ToOutRegister64(lir) == Register64(edx, eax));
     95  MOZ_ASSERT(ToRegister64(lir->temp0()) == Register64(ecx, ebx));
     96 
     97  Scalar::Type storageType = lir->mir()->storageType();
     98 
     99  auto source = ToAddressOrBaseIndex(elements, lir->index(), storageType);
    100 
    101  source.match([&](const auto& source) {
    102    masm.atomicLoad64(Synchronization::Load(), source, Register64(ecx, ebx),
    103                      Register64(edx, eax));
    104  });
    105 }
    106 
    107 void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
    108  Register elements = ToRegister(lir->elements());
    109  Register64 value = ToRegister64(lir->value());
    110  Register64 temp = ToRegister64(lir->temp0());
    111 
    112  MOZ_ASSERT(value == Register64(ecx, ebx));
    113  MOZ_ASSERT(temp == Register64(edx, eax));
    114 
    115  Scalar::Type writeType = lir->mir()->writeType();
    116 
    117  auto dest = ToAddressOrBaseIndex(elements, lir->index(), writeType);
    118 
    119  dest.match([&](const auto& dest) {
    120    masm.atomicStore64(Synchronization::Store(), dest, value, temp);
    121  });
    122 }
    123 
    124 void CodeGenerator::visitCompareExchangeTypedArrayElement64(
    125    LCompareExchangeTypedArrayElement64* lir) {
    126  Register elements = ToRegister(lir->elements());
    127  Register64 oldval = ToRegister64(lir->oldval());
    128  Register64 newval = ToRegister64(lir->newval());
    129  Register64 out = ToOutRegister64(lir);
    130 
    131  MOZ_ASSERT(oldval == Register64(edx, eax));
    132  MOZ_ASSERT(newval == Register64(ecx, ebx));
    133  MOZ_ASSERT(out == oldval);
    134 
    135  Scalar::Type arrayType = lir->mir()->arrayType();
    136 
    137  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
    138 
    139  dest.match([&](const auto& dest) {
    140    masm.compareExchange64(Synchronization::Full(), dest, oldval, newval, out);
    141  });
    142 }
    143 
    144 void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
    145    LAtomicExchangeTypedArrayElement64* lir) {
    146  Register elements = ToRegister(lir->elements());
    147  Register64 value = ToRegister64(lir->value());
    148  Register64 out = ToOutRegister64(lir);
    149 
    150  MOZ_ASSERT(value == Register64(ecx, ebx));
    151  MOZ_ASSERT(out == Register64(edx, eax));
    152 
    153  Scalar::Type arrayType = lir->mir()->arrayType();
    154 
    155  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
    156 
    157  dest.match([&](const auto& dest) {
    158    masm.atomicExchange64(Synchronization::Full(), dest, value, out);
    159  });
    160 }
    161 
    162 void CodeGenerator::visitAtomicTypedArrayElementBinop64(
    163    LAtomicTypedArrayElementBinop64* lir) {
    164  MOZ_ASSERT(!lir->mir()->isForEffect());
    165 
    166  Register elements = ToRegister(lir->elements());
    167  Register64 value = ToRegister64(lir->value());
    168  Register64 out = ToOutRegister64(lir);
    169 
    170  MOZ_ASSERT(value == Register64(ecx, ebx));
    171  MOZ_ASSERT(out == Register64(edx, eax));
    172 
    173  Scalar::Type arrayType = lir->mir()->arrayType();
    174  AtomicOp atomicOp = lir->mir()->operation();
    175 
    176  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
    177 
    178  // Save |value| before it's clobbered below.
    179  masm.push64(value);
    180 
    181  Address addr(masm.getStackPointer(), 0);
    182 
    183  dest.match([&](const auto& dest) {
    184    masm.atomicFetchOp64(Synchronization::Full(), atomicOp, addr, dest, value,
    185                         out);
    186  });
    187 
    188  masm.pop64(value);
    189 }
    190 
    191 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
    192    LAtomicTypedArrayElementBinopForEffect64* lir) {
    193  MOZ_ASSERT(lir->mir()->isForEffect());
    194 
    195  Register elements = ToRegister(lir->elements());
    196  Register64 value = ToRegister64(lir->value());
    197  Register64 temp = ToRegister64(lir->temp0());
    198 
    199  MOZ_ASSERT(value == Register64(ecx, ebx));
    200  MOZ_ASSERT(temp == Register64(edx, eax));
    201 
    202  Scalar::Type arrayType = lir->mir()->arrayType();
    203  AtomicOp atomicOp = lir->mir()->operation();
    204 
    205  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
    206 
    207  // Save |value| before it's clobbered below.
    208  masm.push64(value);
    209 
    210  Address addr(masm.getStackPointer(), 0);
    211 
    212  dest.match([&](const auto& dest) {
    213    masm.atomicFetchOp64(Synchronization::Full(), atomicOp, addr, dest, value,
    214                         temp);
    215  });
    216 
    217  masm.pop64(value);
    218 }
    219 
    220 void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
    221  Register input = ToRegister(lir->input());
    222  Register temp = ToRegister(lir->temp0());
    223 
    224  if (input != temp) {
    225    masm.mov(input, temp);
    226  }
    227 
    228  // Beware: convertUInt32ToDouble clobbers input.
    229  masm.convertUInt32ToDouble(temp, ToFloatRegister(lir->output()));
    230 }
    231 
    232 void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
    233  Register input = ToRegister(lir->input());
    234  Register temp = ToRegister(lir->temp0());
    235  FloatRegister output = ToFloatRegister(lir->output());
    236 
    237  if (input != temp) {
    238    masm.mov(input, temp);
    239  }
    240 
    241  // Beware: convertUInt32ToFloat32 clobbers input.
    242  masm.convertUInt32ToFloat32(temp, output);
    243 }
    244 
    245 template <typename T>
    246 void CodeGeneratorX86::emitWasmLoad(T* ins) {
    247  const MWasmLoad* mir = ins->mir();
    248 
    249  mir->access().assertOffsetInGuardPages();
    250  uint32_t offset = mir->access().offset32();
    251 
    252  const LAllocation* ptr = ins->ptr();
    253  const LAllocation* memoryBase = ins->memoryBase();
    254 
    255  // Lowering has set things up so that we can use a BaseIndex form if the
    256  // pointer is constant and the offset is zero, or if the pointer is zero.
    257 
    258  Operand srcAddr =
    259      ptr->isBogus()
    260          ? Operand(ToRegister(memoryBase),
    261                    offset ? offset : mir->base()->toConstant()->toInt32())
    262          : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
    263 
    264  if (mir->type() == MIRType::Int64) {
    265    MOZ_ASSERT_IF(mir->access().isAtomic(),
    266                  mir->access().type() != Scalar::Int64);
    267    masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
    268  } else {
    269    masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(ins->output()));
    270  }
    271 }
    272 
    273 void CodeGenerator::visitWasmLoad(LWasmLoad* ins) { emitWasmLoad(ins); }
    274 
    275 void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* ins) { emitWasmLoad(ins); }
    276 
    277 template <typename T>
    278 void CodeGeneratorX86::emitWasmStore(T* ins) {
    279  const MWasmStore* mir = ins->mir();
    280 
    281  mir->access().assertOffsetInGuardPages();
    282  uint32_t offset = mir->access().offset32();
    283 
    284  const LAllocation* ptr = ins->ptr();
    285  const LAllocation* memoryBase = ins->memoryBase();
    286 
    287  // Lowering has set things up so that we can use a BaseIndex form if the
    288  // pointer is constant and the offset is zero, or if the pointer is zero.
    289 
    290  Operand dstAddr =
    291      ptr->isBogus()
    292          ? Operand(ToRegister(memoryBase),
    293                    offset ? offset : mir->base()->toConstant()->toInt32())
    294          : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
    295 
    296  if constexpr (std::is_same_v<T, LWasmStoreI64>) {
    297    Register64 value = ToRegister64(ins->value());
    298    masm.wasmStoreI64(mir->access(), value, dstAddr);
    299  } else {
    300    AnyRegister value = ToAnyRegister(ins->value());
    301    masm.wasmStore(mir->access(), value, dstAddr);
    302  }
    303 }
    304 
    305 void CodeGenerator::visitWasmStore(LWasmStore* ins) { emitWasmStore(ins); }
    306 
    307 void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* ins) {
    308  emitWasmStore(ins);
    309 }
    310 
    311 void CodeGenerator::visitWasmCompareExchangeHeap(
    312    LWasmCompareExchangeHeap* ins) {
    313  MWasmCompareExchangeHeap* mir = ins->mir();
    314 
    315  Register ptrReg = ToRegister(ins->ptr());
    316  Register oldval = ToRegister(ins->oldValue());
    317  Register newval = ToRegister(ins->newValue());
    318  Register addrTemp = ToRegister(ins->temp0());
    319  Register memoryBase = ToRegister(ins->memoryBase());
    320  Register output = ToRegister(ins->output());
    321 
    322  masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset32()),
    323            addrTemp);
    324 
    325  Address memAddr(addrTemp, 0);
    326  masm.wasmCompareExchange(mir->access(), memAddr, oldval, newval, output);
    327 }
    328 
    329 void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
    330  MWasmAtomicExchangeHeap* mir = ins->mir();
    331 
    332  Register ptrReg = ToRegister(ins->ptr());
    333  Register value = ToRegister(ins->value());
    334  Register addrTemp = ToRegister(ins->temp0());
    335  Register memoryBase = ToRegister(ins->memoryBase());
    336  Register output = ToRegister(ins->output());
    337 
    338  masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset32()),
    339            addrTemp);
    340 
    341  Address memAddr(addrTemp, 0);
    342  masm.wasmAtomicExchange(mir->access(), memAddr, value, output);
    343 }
    344 
    345 void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
    346  MWasmAtomicBinopHeap* mir = ins->mir();
    347 
    348  Register ptrReg = ToRegister(ins->ptr());
    349  Register temp = ToTempRegisterOrInvalid(ins->temp0());
    350  Register addrTemp = ToRegister(ins->temp1());
    351  Register out = ToRegister(ins->output());
    352  const LAllocation* value = ins->value();
    353  AtomicOp op = mir->operation();
    354  Register memoryBase = ToRegister(ins->memoryBase());
    355 
    356  masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset32()),
    357            addrTemp);
    358 
    359  Address memAddr(addrTemp, 0);
    360  if (value->isConstant()) {
    361    masm.wasmAtomicFetchOp(mir->access(), op, Imm32(ToInt32(value)), memAddr,
    362                           temp, out);
    363  } else {
    364    masm.wasmAtomicFetchOp(mir->access(), op, ToRegister(value), memAddr, temp,
    365                           out);
    366  }
    367 }
    368 
    369 void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
    370    LWasmAtomicBinopHeapForEffect* ins) {
    371  MWasmAtomicBinopHeap* mir = ins->mir();
    372  MOZ_ASSERT(!mir->hasUses());
    373 
    374  Register ptrReg = ToRegister(ins->ptr());
    375  Register addrTemp = ToRegister(ins->temp0());
    376  const LAllocation* value = ins->value();
    377  AtomicOp op = mir->operation();
    378  Register memoryBase = ToRegister(ins->memoryBase());
    379 
    380  masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset32()),
    381            addrTemp);
    382 
    383  Address memAddr(addrTemp, 0);
    384  if (value->isConstant()) {
    385    masm.wasmAtomicEffectOp(mir->access(), op, Imm32(ToInt32(value)), memAddr,
    386                            InvalidReg);
    387  } else {
    388    masm.wasmAtomicEffectOp(mir->access(), op, ToRegister(value), memAddr,
    389                            InvalidReg);
    390  }
    391 }
    392 
    393 void CodeGenerator::visitWasmAtomicLoadI64(LWasmAtomicLoadI64* ins) {
    394  ins->mir()->access().assertOffsetInGuardPages();
    395  uint32_t offset = ins->mir()->access().offset32();
    396 
    397  const LAllocation* memoryBase = ins->memoryBase();
    398  const LAllocation* ptr = ins->ptr();
    399  BaseIndex srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
    400 
    401  MOZ_ASSERT(ToRegister64(ins->temp0()) == Register64(ecx, ebx));
    402  MOZ_ASSERT(ToOutRegister64(ins) == Register64(edx, eax));
    403 
    404  masm.wasmAtomicLoad64(ins->mir()->access(), srcAddr, Register64(ecx, ebx),
    405                        Register64(edx, eax));
    406 }
    407 
    408 void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* ins) {
    409  ins->mir()->access().assertOffsetInGuardPages();
    410  uint32_t offset = ins->mir()->access().offset32();
    411 
    412  const LAllocation* memoryBase = ins->memoryBase();
    413  const LAllocation* ptr = ins->ptr();
    414  Operand srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
    415 
    416  MOZ_ASSERT(ToRegister64(ins->expected()).low == eax);
    417  MOZ_ASSERT(ToRegister64(ins->expected()).high == edx);
    418  MOZ_ASSERT(ToRegister64(ins->replacement()).low == ebx);
    419  MOZ_ASSERT(ToRegister64(ins->replacement()).high == ecx);
    420  MOZ_ASSERT(ToOutRegister64(ins).low == eax);
    421  MOZ_ASSERT(ToOutRegister64(ins).high == edx);
    422 
    423  masm.append(ins->mir()->access(), wasm::TrapMachineInsn::Atomic,
    424              FaultingCodeOffset(masm.currentOffset()));
    425  masm.lock_cmpxchg8b(edx, eax, ecx, ebx, srcAddr);
    426 }
    427 
    428 template <typename T>
    429 void CodeGeneratorX86::emitWasmStoreOrExchangeAtomicI64(
    430    T* ins, const wasm::MemoryAccessDesc& access) {
    431  access.assertOffsetInGuardPages();
    432  const LAllocation* memoryBase = ins->memoryBase();
    433  const LAllocation* ptr = ins->ptr();
    434  Operand srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne,
    435                  access.offset32());
    436 
    437  MOZ_ASSERT(ToRegister64(ins->value()) == Register64(ecx, ebx));
    438 
    439  // eax and edx will be overwritten every time through the loop but
    440  // memoryBase and ptr must remain live for a possible second iteration.
    441 
    442  MOZ_ASSERT(ToRegister(memoryBase) != edx && ToRegister(memoryBase) != eax);
    443  MOZ_ASSERT(ToRegister(ptr) != edx && ToRegister(ptr) != eax);
    444 
    445  Label again;
    446  masm.bind(&again);
    447  masm.append(access, wasm::TrapMachineInsn::Atomic,
    448              FaultingCodeOffset(masm.currentOffset()));
    449  masm.lock_cmpxchg8b(edx, eax, ecx, ebx, srcAddr);
    450  masm.j(Assembler::Condition::NonZero, &again);
    451 }
    452 
    453 void CodeGenerator::visitWasmAtomicStoreI64(LWasmAtomicStoreI64* ins) {
    454  MOZ_ASSERT(ToRegister64(ins->temp0()) == Register64(edx, eax));
    455 
    456  emitWasmStoreOrExchangeAtomicI64(ins, ins->mir()->access());
    457 }
    458 
    459 void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* ins) {
    460  MOZ_ASSERT(ToOutRegister64(ins) == Register64(edx, eax));
    461 
    462  emitWasmStoreOrExchangeAtomicI64(ins, ins->access());
    463 }
    464 
    465 void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* ins) {
    466  ins->access().assertOffsetInGuardPages();
    467  uint32_t offset = ins->access().offset32();
    468 
    469  const LAllocation* memoryBase = ins->memoryBase();
    470  const LAllocation* ptr = ins->ptr();
    471 
    472  BaseIndex srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
    473 
    474  MOZ_ASSERT(ToRegister(memoryBase) == esi || ToRegister(memoryBase) == edi);
    475  MOZ_ASSERT(ToRegister(ptr) == esi || ToRegister(ptr) == edi);
    476 
    477  Register64 value = ToRegister64(ins->value());
    478 
    479  MOZ_ASSERT(value.low == ebx);
    480  MOZ_ASSERT(value.high == ecx);
    481 
    482  Register64 output = ToOutRegister64(ins);
    483 
    484  MOZ_ASSERT(output.low == eax);
    485  MOZ_ASSERT(output.high == edx);
    486 
    487  masm.Push(ecx);
    488  masm.Push(ebx);
    489 
    490  Address valueAddr(esp, 0);
    491 
    492  // Here the `value` register acts as a temp, we'll restore it below.
    493  masm.wasmAtomicFetchOp64(ins->access(), ins->operation(), valueAddr, srcAddr,
    494                           value, output);
    495 
    496  masm.Pop(ebx);
    497  masm.Pop(ecx);
    498 }
    499 
    500 namespace js {
    501 namespace jit {
    502 
    503 class OutOfLineTruncate : public OutOfLineCodeBase<CodeGeneratorX86> {
    504  LInstruction* ins_;
    505 
    506 public:
    507  explicit OutOfLineTruncate(LInstruction* ins) : ins_(ins) {
    508    MOZ_ASSERT(ins_->isTruncateDToInt32() ||
    509               ins_->isWasmBuiltinTruncateDToInt32());
    510  }
    511 
    512  void accept(CodeGeneratorX86* codegen) override {
    513    codegen->visitOutOfLineTruncate(this);
    514  }
    515 
    516  LAllocation* input() { return ins_->getOperand(0); }
    517  LDefinition* output() { return ins_->getDef(0); }
    518  LDefinition* tempFloat() { return ins_->getTemp(0); }
    519 
    520  const wasm::TrapSiteDesc& trapSiteDesc() const {
    521    if (ins_->isTruncateDToInt32()) {
    522      return ins_->toTruncateDToInt32()->mir()->trapSiteDesc();
    523    }
    524 
    525    return ins_->toWasmBuiltinTruncateDToInt32()->mir()->trapSiteDesc();
    526  }
    527 };
    528 
    529 class OutOfLineTruncateFloat32 : public OutOfLineCodeBase<CodeGeneratorX86> {
    530  LInstruction* ins_;
    531 
    532 public:
    533  explicit OutOfLineTruncateFloat32(LInstruction* ins) : ins_(ins) {
    534    MOZ_ASSERT(ins_->isTruncateFToInt32() ||
    535               ins_->isWasmBuiltinTruncateFToInt32());
    536  }
    537 
    538  void accept(CodeGeneratorX86* codegen) override {
    539    codegen->visitOutOfLineTruncateFloat32(this);
    540  }
    541 
    542  LAllocation* input() { return ins_->getOperand(0); }
    543  LDefinition* output() { return ins_->getDef(0); }
    544  LDefinition* tempFloat() { return ins_->getTemp(0); }
    545 
    546  const wasm::TrapSiteDesc& trapSiteDesc() const {
    547    if (ins_->isTruncateFToInt32()) {
    548      return ins_->toTruncateDToInt32()->mir()->trapSiteDesc();
    549    }
    550 
    551    return ins_->toWasmBuiltinTruncateFToInt32()->mir()->trapSiteDesc();
    552  }
    553 };
    554 
    555 }  // namespace jit
    556 }  // namespace js
    557 
    558 void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
    559  FloatRegister input = ToFloatRegister(ins->input());
    560  Register output = ToRegister(ins->output());
    561 
    562  OutOfLineTruncate* ool = new (alloc()) OutOfLineTruncate(ins);
    563  addOutOfLineCode(ool, ins->mir());
    564 
    565  masm.branchTruncateDoubleMaybeModUint32(input, output, ool->entry());
    566  masm.bind(ool->rejoin());
    567 }
    568 
    569 void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
    570    LWasmBuiltinTruncateDToInt32* lir) {
    571  FloatRegister input = ToFloatRegister(lir->input());
    572  Register output = ToRegister(lir->output());
    573 
    574  OutOfLineTruncate* ool = new (alloc()) OutOfLineTruncate(lir);
    575  addOutOfLineCode(ool, lir->mir());
    576 
    577  masm.branchTruncateDoubleMaybeModUint32(input, output, ool->entry());
    578  masm.bind(ool->rejoin());
    579 }
    580 
    581 void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
    582  FloatRegister input = ToFloatRegister(ins->input());
    583  Register output = ToRegister(ins->output());
    584 
    585  OutOfLineTruncateFloat32* ool = new (alloc()) OutOfLineTruncateFloat32(ins);
    586  addOutOfLineCode(ool, ins->mir());
    587 
    588  masm.branchTruncateFloat32MaybeModUint32(input, output, ool->entry());
    589  masm.bind(ool->rejoin());
    590 }
    591 
    592 void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
    593    LWasmBuiltinTruncateFToInt32* lir) {
    594  FloatRegister input = ToFloatRegister(lir->input());
    595  Register output = ToRegister(lir->output());
    596 
    597  OutOfLineTruncateFloat32* ool = new (alloc()) OutOfLineTruncateFloat32(lir);
    598  addOutOfLineCode(ool, lir->mir());
    599 
    600  masm.branchTruncateFloat32MaybeModUint32(input, output, ool->entry());
    601  masm.bind(ool->rejoin());
    602 }
    603 
    604 void CodeGeneratorX86::visitOutOfLineTruncate(OutOfLineTruncate* ool) {
    605  FloatRegister input = ToFloatRegister(ool->input());
    606  Register output = ToRegister(ool->output());
    607 
    608  Label fail;
    609 
    610  if (Assembler::HasSSE3()) {
    611    Label failPopDouble;
    612    // Push double.
    613    masm.subl(Imm32(sizeof(double)), esp);
    614    masm.storeDouble(input, Operand(esp, 0));
    615 
    616    // Check exponent to avoid fp exceptions.
    617    masm.branchDoubleNotInInt64Range(Address(esp, 0), output, &failPopDouble);
    618 
    619    // Load double, perform 64-bit truncation.
    620    masm.truncateDoubleToInt64(Address(esp, 0), Address(esp, 0), output);
    621 
    622    // Load low word, pop double and jump back.
    623    masm.load32(Address(esp, 0), output);
    624    masm.addl(Imm32(sizeof(double)), esp);
    625    masm.jump(ool->rejoin());
    626 
    627    masm.bind(&failPopDouble);
    628    masm.addl(Imm32(sizeof(double)), esp);
    629    masm.jump(&fail);
    630  } else {
    631    FloatRegister temp = ToFloatRegister(ool->tempFloat());
    632 
    633    // Try to convert doubles representing integers within 2^32 of a signed
    634    // integer, by adding/subtracting 2^32 and then trying to convert to int32.
    635    // This has to be an exact conversion, as otherwise the truncation works
    636    // incorrectly on the modified value.
    637    {
    638      ScratchDoubleScope fpscratch(masm);
    639      masm.zeroDouble(fpscratch);
    640      masm.vucomisd(fpscratch, input);
    641      masm.j(Assembler::Parity, &fail);
    642    }
    643 
    644    {
    645      Label positive;
    646      masm.j(Assembler::Above, &positive);
    647 
    648      masm.loadConstantDouble(4294967296.0, temp);
    649      Label skip;
    650      masm.jmp(&skip);
    651 
    652      masm.bind(&positive);
    653      masm.loadConstantDouble(-4294967296.0, temp);
    654      masm.bind(&skip);
    655    }
    656 
    657    masm.addDouble(input, temp);
    658    masm.vcvttsd2si(temp, output);
    659    ScratchDoubleScope fpscratch(masm);
    660    masm.vcvtsi2sd(output, fpscratch, fpscratch);
    661 
    662    masm.vucomisd(fpscratch, temp);
    663    masm.j(Assembler::Parity, &fail);
    664    masm.j(Assembler::Equal, ool->rejoin());
    665  }
    666 
    667  masm.bind(&fail);
    668  {
    669    if (gen->compilingWasm()) {
    670      masm.Push(InstanceReg);
    671    }
    672    int32_t framePushedAfterInstance = masm.framePushed();
    673 
    674    saveVolatile(output);
    675 
    676    if (gen->compilingWasm()) {
    677      masm.setupWasmABICall(wasm::SymbolicAddress::ToInt32);
    678      masm.passABIArg(input, ABIType::Float64);
    679 
    680      int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
    681      masm.callWithABI(ool->trapSiteDesc().bytecodeOffset,
    682                       wasm::SymbolicAddress::ToInt32,
    683                       mozilla::Some(instanceOffset));
    684    } else {
    685      using Fn = int32_t (*)(double);
    686      masm.setupUnalignedABICall(output);
    687      masm.passABIArg(input, ABIType::Float64);
    688      masm.callWithABI<Fn, JS::ToInt32>(ABIType::General,
    689                                        CheckUnsafeCallWithABI::DontCheckOther);
    690    }
    691    masm.storeCallInt32Result(output);
    692 
    693    restoreVolatile(output);
    694 
    695    if (gen->compilingWasm()) {
    696      masm.Pop(InstanceReg);
    697    }
    698  }
    699 
    700  masm.jump(ool->rejoin());
    701 }
    702 
    703 void CodeGeneratorX86::visitOutOfLineTruncateFloat32(
    704    OutOfLineTruncateFloat32* ool) {
    705  FloatRegister input = ToFloatRegister(ool->input());
    706  Register output = ToRegister(ool->output());
    707 
    708  Label fail;
    709 
    710  if (Assembler::HasSSE3()) {
    711    Label failPopFloat;
    712 
    713    // Push float32, but subtracts 64 bits so that the value popped by fisttp
    714    // fits
    715    masm.subl(Imm32(sizeof(uint64_t)), esp);
    716    masm.storeFloat32(input, Operand(esp, 0));
    717 
    718    // Check exponent to avoid fp exceptions.
    719    masm.branchFloat32NotInInt64Range(Address(esp, 0), output, &failPopFloat);
    720 
    721    // Load float, perform 32-bit truncation.
    722    masm.truncateFloat32ToInt64(Address(esp, 0), Address(esp, 0), output);
    723 
    724    // Load low word, pop 64bits and jump back.
    725    masm.load32(Address(esp, 0), output);
    726    masm.addl(Imm32(sizeof(uint64_t)), esp);
    727    masm.jump(ool->rejoin());
    728 
    729    masm.bind(&failPopFloat);
    730    masm.addl(Imm32(sizeof(uint64_t)), esp);
    731    masm.jump(&fail);
    732  } else {
    733    FloatRegister temp = ToFloatRegister(ool->tempFloat());
    734 
    735    // Try to convert float32 representing integers within 2^32 of a signed
    736    // integer, by adding/subtracting 2^32 and then trying to convert to int32.
    737    // This has to be an exact conversion, as otherwise the truncation works
    738    // incorrectly on the modified value.
    739    {
    740      ScratchFloat32Scope fpscratch(masm);
    741      masm.zeroFloat32(fpscratch);
    742      masm.vucomiss(fpscratch, input);
    743      masm.j(Assembler::Parity, &fail);
    744    }
    745 
    746    {
    747      Label positive;
    748      masm.j(Assembler::Above, &positive);
    749 
    750      masm.loadConstantFloat32(4294967296.f, temp);
    751      Label skip;
    752      masm.jmp(&skip);
    753 
    754      masm.bind(&positive);
    755      masm.loadConstantFloat32(-4294967296.f, temp);
    756      masm.bind(&skip);
    757    }
    758 
    759    masm.addFloat32(input, temp);
    760    masm.vcvttss2si(temp, output);
    761    ScratchFloat32Scope fpscratch(masm);
    762    masm.vcvtsi2ss(output, fpscratch, fpscratch);
    763 
    764    masm.vucomiss(fpscratch, temp);
    765    masm.j(Assembler::Parity, &fail);
    766    masm.j(Assembler::Equal, ool->rejoin());
    767  }
    768 
    769  masm.bind(&fail);
    770  {
    771    if (gen->compilingWasm()) {
    772      masm.Push(InstanceReg);
    773    }
    774    int32_t framePushedAfterInstance = masm.framePushed();
    775 
    776    saveVolatile(output);
    777 
    778    // Push always pushes a 64-bit double.
    779    masm.Push(input.asDouble());
    780 
    781    if (gen->compilingWasm()) {
    782      masm.setupWasmABICall(wasm::SymbolicAddress::ToInt32);
    783    } else {
    784      masm.setupUnalignedABICall(output);
    785    }
    786 
    787    masm.vcvtss2sd(input, input, input);
    788    masm.passABIArg(input.asDouble(), ABIType::Float64);
    789 
    790    if (gen->compilingWasm()) {
    791      int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
    792      masm.callWithABI(ool->trapSiteDesc().bytecodeOffset,
    793                       wasm::SymbolicAddress::ToInt32,
    794                       mozilla::Some(instanceOffset));
    795    } else {
    796      using Fn = int32_t (*)(double);
    797      masm.callWithABI<Fn, JS::ToInt32>(ABIType::General,
    798                                        CheckUnsafeCallWithABI::DontCheckOther);
    799    }
    800 
    801    masm.storeCallInt32Result(output);
    802    masm.Pop(input.asDouble());
    803 
    804    restoreVolatile(output);
    805 
    806    if (gen->compilingWasm()) {
    807      masm.Pop(InstanceReg);
    808    }
    809  }
    810 
    811  masm.jump(ool->rejoin());
    812 }
    813 
    814 void CodeGenerator::visitMulI64(LMulI64* lir) {
    815  Register64 lhs = ToRegister64(lir->lhs());
    816  LInt64Allocation rhs = lir->rhs();
    817 
    818  MOZ_ASSERT(ToOutRegister64(lir) == lhs);
    819 
    820  if (IsConstant(rhs)) {
    821    int64_t constant = ToInt64(rhs);
    822    switch (constant) {
    823      case -1:
    824        masm.neg64(lhs);
    825        return;
    826      case 0:
    827        masm.xor64(lhs, lhs);
    828        return;
    829      case 1:
    830        // nop
    831        return;
    832      case 2:
    833        masm.add64(lhs, lhs);
    834        return;
    835      default:
    836        if (constant > 0) {
    837          // Use shift if constant is power of 2.
    838          int32_t shift = mozilla::FloorLog2(constant);
    839          if (int64_t(1) << shift == constant) {
    840            masm.lshift64(Imm32(shift), lhs);
    841            return;
    842          }
    843        }
    844        Register temp = ToTempRegisterOrInvalid(lir->temp0());
    845        masm.mul64(Imm64(constant), lhs, temp);
    846    }
    847  } else {
    848    Register temp = ToTempRegisterOrInvalid(lir->temp0());
    849    masm.mul64(ToOperandOrRegister64(rhs), lhs, temp);
    850  }
    851 }
    852 
    853 void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
    854  MOZ_ASSERT(gen->compilingWasm());
    855  MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
    856 
    857  masm.Push(InstanceReg);
    858  int32_t framePushedAfterInstance = masm.framePushed();
    859 
    860  Register64 lhs = ToRegister64(lir->lhs());
    861  Register64 rhs = ToRegister64(lir->rhs());
    862  Register64 output = ToOutRegister64(lir);
    863 
    864  MOZ_ASSERT(output == ReturnReg64);
    865 
    866  Label done;
    867 
    868  // Handle divide by zero.
    869  if (lir->canBeDivideByZero()) {
    870    Label nonZero;
    871    // We can use InstanceReg as temp register because we preserved it
    872    // before.
    873    masm.branchTest64(Assembler::NonZero, rhs, rhs, InstanceReg, &nonZero);
    874    masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->trapSiteDesc());
    875    masm.bind(&nonZero);
    876  }
    877 
    878  MDefinition* mir = lir->mir();
    879 
    880  // Handle an integer overflow exception from INT64_MIN / -1.
    881  if (lir->canBeNegativeOverflow()) {
    882    Label notOverflow;
    883    masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), &notOverflow);
    884    masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notOverflow);
    885    if (mir->isWasmBuiltinModI64()) {
    886      masm.xor64(output, output);
    887      masm.jump(&done);
    888    } else {
    889      masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->trapSiteDesc());
    890    }
    891    masm.bind(&notOverflow);
    892  }
    893 
    894  wasm::SymbolicAddress callee = mir->isWasmBuiltinModI64()
    895                                     ? wasm::SymbolicAddress::ModI64
    896                                     : wasm::SymbolicAddress::DivI64;
    897  masm.setupWasmABICall(callee);
    898  masm.passABIArg(lhs.high);
    899  masm.passABIArg(lhs.low);
    900  masm.passABIArg(rhs.high);
    901  masm.passABIArg(rhs.low);
    902 
    903  int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
    904  masm.callWithABI(lir->trapSiteDesc().bytecodeOffset, callee,
    905                   mozilla::Some(instanceOffset));
    906 
    907  // output in edx:eax, move to output register.
    908  masm.movl(edx, output.high);
    909  MOZ_ASSERT(eax == output.low);
    910 
    911  masm.bind(&done);
    912  masm.Pop(InstanceReg);
    913 }
    914 
    915 void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
    916  MOZ_ASSERT(gen->compilingWasm());
    917  MOZ_ASSERT(ToRegister(lir->instance()) == InstanceReg);
    918 
    919  masm.Push(InstanceReg);
    920  int32_t framePushedAfterInstance = masm.framePushed();
    921 
    922  Register64 lhs = ToRegister64(lir->lhs());
    923  Register64 rhs = ToRegister64(lir->rhs());
    924  Register64 output = ToOutRegister64(lir);
    925 
    926  MOZ_ASSERT(output == ReturnReg64);
    927 
    928  // Prevent divide by zero.
    929  if (lir->canBeDivideByZero()) {
    930    Label nonZero;
    931    // We can use InstanceReg as temp register because we preserved it
    932    // before.
    933    masm.branchTest64(Assembler::NonZero, rhs, rhs, InstanceReg, &nonZero);
    934    masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->trapSiteDesc());
    935    masm.bind(&nonZero);
    936  }
    937 
    938  MDefinition* mir = lir->mir();
    939  wasm::SymbolicAddress callee = mir->isWasmBuiltinModI64()
    940                                     ? wasm::SymbolicAddress::UModI64
    941                                     : wasm::SymbolicAddress::UDivI64;
    942  masm.setupWasmABICall(callee);
    943  masm.passABIArg(lhs.high);
    944  masm.passABIArg(lhs.low);
    945  masm.passABIArg(rhs.high);
    946  masm.passABIArg(rhs.low);
    947 
    948  int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
    949  masm.callWithABI(lir->trapSiteDesc().bytecodeOffset, callee,
    950                   mozilla::Some(instanceOffset));
    951 
    952  // output in edx:eax, move to output register.
    953  masm.movl(edx, output.high);
    954  MOZ_ASSERT(eax == output.low);
    955 
    956  masm.Pop(InstanceReg);
    957 }
    958 
    959 void CodeGeneratorX86::emitBigIntPtrDiv(LBigIntPtrDiv* ins, Register dividend,
    960                                        Register divisor, Register output) {
    961  // Callers handle division by zero and integer overflow.
    962 
    963  MOZ_ASSERT(ToRegister(ins->temp0()) == edx);
    964  MOZ_ASSERT(output == eax);
    965 
    966  if (dividend != eax) {
    967    masm.movePtr(dividend, eax);
    968  }
    969 
    970  // Sign extend the lhs into edx to make edx:eax.
    971  masm.cdq();
    972 
    973  masm.idiv(divisor);
    974 }
    975 
    976 void CodeGeneratorX86::emitBigIntPtrMod(LBigIntPtrMod* ins, Register dividend,
    977                                        Register divisor, Register output) {
    978  // Callers handle division by zero and integer overflow.
    979 
    980  MOZ_ASSERT(dividend == eax);
    981  MOZ_ASSERT(output == edx);
    982 
    983  // Sign extend the lhs into edx to make edx:eax.
    984  masm.cdq();
    985 
    986  masm.idiv(divisor);
    987 }
    988 
    989 void CodeGenerator::visitShiftIntPtr(LShiftIntPtr* ins) {
    990  Register lhs = ToRegister(ins->lhs());
    991  const LAllocation* rhs = ins->rhs();
    992  Register out = ToRegister(ins->output());
    993 
    994  if (rhs->isConstant()) {
    995    MOZ_ASSERT(out == lhs);
    996 
    997    int32_t shift = ToIntPtr(rhs) & 0x1F;
    998    switch (ins->bitop()) {
    999      case JSOp::Lsh:
   1000        if (shift) {
   1001          masm.lshiftPtr(Imm32(shift), lhs);
   1002        }
   1003        break;
   1004      case JSOp::Rsh:
   1005        if (shift) {
   1006          masm.rshiftPtrArithmetic(Imm32(shift), lhs);
   1007        }
   1008        break;
   1009      case JSOp::Ursh:
   1010        if (shift) {
   1011          masm.rshiftPtr(Imm32(shift), lhs);
   1012        }
   1013        break;
   1014      default:
   1015        MOZ_CRASH("Unexpected shift op");
   1016    }
   1017  } else {
   1018    Register shift = ToRegister(rhs);
   1019    MOZ_ASSERT_IF(out != lhs, Assembler::HasBMI2());
   1020 
   1021    switch (ins->bitop()) {
   1022      case JSOp::Lsh:
   1023        if (out != lhs) {
   1024          masm.shlxl(lhs, shift, out);
   1025        } else {
   1026          masm.lshiftPtr(shift, lhs);
   1027        }
   1028        break;
   1029      case JSOp::Rsh:
   1030        if (out != lhs) {
   1031          masm.sarxl(lhs, shift, out);
   1032        } else {
   1033          masm.rshiftPtrArithmetic(shift, lhs);
   1034        }
   1035        break;
   1036      case JSOp::Ursh:
   1037        if (out != lhs) {
   1038          masm.shrxl(lhs, shift, out);
   1039        } else {
   1040          masm.rshiftPtr(shift, lhs);
   1041        }
   1042        break;
   1043      default:
   1044        MOZ_CRASH("Unexpected shift op");
   1045    }
   1046  }
   1047 }
   1048 
   1049 void CodeGenerator::visitShiftI64(LShiftI64* lir) {
   1050  Register64 lhs = ToRegister64(lir->lhs());
   1051  const LAllocation* rhs = lir->rhs();
   1052 
   1053  MOZ_ASSERT(ToOutRegister64(lir) == lhs);
   1054 
   1055  if (rhs->isConstant()) {
   1056    int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
   1057    switch (lir->bitop()) {
   1058      case JSOp::Lsh:
   1059        if (shift) {
   1060          masm.lshift64(Imm32(shift), lhs);
   1061        }
   1062        break;
   1063      case JSOp::Rsh:
   1064        if (shift) {
   1065          masm.rshift64Arithmetic(Imm32(shift), lhs);
   1066        }
   1067        break;
   1068      case JSOp::Ursh:
   1069        if (shift) {
   1070          masm.rshift64(Imm32(shift), lhs);
   1071        }
   1072        break;
   1073      default:
   1074        MOZ_CRASH("Unexpected shift op");
   1075    }
   1076    return;
   1077  }
   1078 
   1079  Register shift = ToRegister(rhs);
   1080  MOZ_ASSERT(shift == ecx);
   1081  switch (lir->bitop()) {
   1082    case JSOp::Lsh:
   1083      masm.lshift64(shift, lhs);
   1084      break;
   1085    case JSOp::Rsh:
   1086      masm.rshift64Arithmetic(shift, lhs);
   1087      break;
   1088    case JSOp::Ursh:
   1089      masm.rshift64(shift, lhs);
   1090      break;
   1091    default:
   1092      MOZ_CRASH("Unexpected shift op");
   1093  }
   1094 }
   1095 
   1096 void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
   1097  MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
   1098 
   1099  Register cond = ToRegister(lir->condExpr());
   1100  Register64 falseExpr = ToRegister64(lir->falseExpr());
   1101  Register64 out = ToOutRegister64(lir);
   1102 
   1103  MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
   1104             "true expr is reused for input");
   1105 
   1106  Label done;
   1107  masm.branchTest32(Assembler::NonZero, cond, cond, &done);
   1108  masm.movl(falseExpr.low, out.low);
   1109  masm.movl(falseExpr.high, out.high);
   1110  masm.bind(&done);
   1111 }
   1112 
   1113 // We expect to handle only the case where compare is {U,}Int32 and select is
   1114 // {U,}Int32.  Some values may be stack allocated, and the "true" input is
   1115 // reused for the output.
   1116 void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
   1117  bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
   1118                    ins->compareType() == MCompare::Compare_UInt32;
   1119  bool selIs32bit = ins->mir()->type() == MIRType::Int32;
   1120 
   1121  MOZ_RELEASE_ASSERT(
   1122      cmpIs32bit && selIs32bit,
   1123      "CodeGenerator::visitWasmCompareAndSelect: unexpected types");
   1124 
   1125  Register trueExprAndDest = ToRegister(ins->output());
   1126  MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == trueExprAndDest,
   1127             "true expr input is reused for output");
   1128 
   1129  Assembler::Condition cond = Assembler::InvertCondition(
   1130      JSOpToCondition(ins->compareType(), ins->jsop()));
   1131  const LAllocation* rhs = ins->rightExpr();
   1132  const LAllocation* falseExpr = ins->ifFalseExpr();
   1133  Register lhs = ToRegister(ins->leftExpr());
   1134 
   1135  if (rhs->isGeneralReg()) {
   1136    if (falseExpr->isGeneralReg()) {
   1137      masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
   1138                       trueExprAndDest);
   1139    } else {
   1140      masm.cmp32Load32(cond, lhs, ToRegister(rhs), ToAddress(falseExpr),
   1141                       trueExprAndDest);
   1142    }
   1143  } else {
   1144    if (falseExpr->isGeneralReg()) {
   1145      masm.cmp32Move32(cond, lhs, ToAddress(rhs), ToRegister(falseExpr),
   1146                       trueExprAndDest);
   1147    } else {
   1148      masm.cmp32Load32(cond, lhs, ToAddress(rhs), ToAddress(falseExpr),
   1149                       trueExprAndDest);
   1150    }
   1151  }
   1152 }
   1153 
   1154 void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
   1155  Register64 output = ToOutRegister64(lir);
   1156  Register input = ToRegister(lir->input());
   1157 
   1158  if (lir->mir()->isUnsigned()) {
   1159    if (output.low != input) {
   1160      masm.movl(input, output.low);
   1161    }
   1162    masm.xorl(output.high, output.high);
   1163  } else {
   1164    MOZ_ASSERT(output.low == input);
   1165    MOZ_ASSERT(output.low == eax);
   1166    MOZ_ASSERT(output.high == edx);
   1167    masm.cdq();
   1168  }
   1169 }
   1170 
   1171 void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
   1172 #ifdef DEBUG
   1173  Register64 input = ToRegister64(lir->input());
   1174  Register64 output = ToOutRegister64(lir);
   1175  MOZ_ASSERT(input.low == eax);
   1176  MOZ_ASSERT(output.low == eax);
   1177  MOZ_ASSERT(input.high == edx);
   1178  MOZ_ASSERT(output.high == edx);
   1179 #endif
   1180  switch (lir->mir()->mode()) {
   1181    case MSignExtendInt64::Byte:
   1182      masm.move8SignExtend(eax, eax);
   1183      break;
   1184    case MSignExtendInt64::Half:
   1185      masm.move16SignExtend(eax, eax);
   1186      break;
   1187    case MSignExtendInt64::Word:
   1188      break;
   1189  }
   1190  masm.cdq();
   1191 }
   1192 
   1193 void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
   1194  LInt64Allocation input = lir->input();
   1195  Register output = ToRegister(lir->output());
   1196 
   1197  if (lir->mir()->bottomHalf()) {
   1198    masm.move32(ToRegister(input.low()), output);
   1199  } else {
   1200    masm.move32(ToRegister(input.high()), output);
   1201  }
   1202 }
   1203 
   1204 void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index*) {
   1205  MOZ_CRASH("64-bit only");
   1206 }
   1207 
   1208 void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
   1209  // Generates no code on this platform because we just return the low part of
   1210  // the input register pair.
   1211  MOZ_ASSERT(ToRegister(lir->input()) == ToRegister(lir->output()));
   1212 }
   1213 
   1214 void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
   1215  FloatRegister input = ToFloatRegister(lir->input());
   1216  Register64 output = ToOutRegister64(lir);
   1217 
   1218  MWasmTruncateToInt64* mir = lir->mir();
   1219  FloatRegister floatTemp = ToFloatRegister(lir->temp0());
   1220 
   1221  Label fail, convert;
   1222 
   1223  MOZ_ASSERT(mir->input()->type() == MIRType::Double ||
   1224             mir->input()->type() == MIRType::Float32);
   1225 
   1226  auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
   1227  addOutOfLineCode(ool, mir);
   1228 
   1229  bool isSaturating = mir->isSaturating();
   1230  if (mir->input()->type() == MIRType::Float32) {
   1231    if (mir->isUnsigned()) {
   1232      masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating,
   1233                                       ool->entry(), ool->rejoin(), floatTemp);
   1234    } else {
   1235      masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, ool->entry(),
   1236                                      ool->rejoin(), floatTemp);
   1237    }
   1238  } else {
   1239    if (mir->isUnsigned()) {
   1240      masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, ool->entry(),
   1241                                      ool->rejoin(), floatTemp);
   1242    } else {
   1243      masm.wasmTruncateDoubleToInt64(input, output, isSaturating, ool->entry(),
   1244                                     ool->rejoin(), floatTemp);
   1245    }
   1246  }
   1247 }
   1248 
   1249 void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
   1250  Register64 input = ToRegister64(lir->input());
   1251  FloatRegister output = ToFloatRegister(lir->output());
   1252  Register temp = ToTempRegisterOrInvalid(lir->temp0());
   1253 
   1254  MIRType outputType = lir->mir()->type();
   1255  MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
   1256 
   1257  if (outputType == MIRType::Double) {
   1258    if (lir->mir()->isUnsigned()) {
   1259      masm.convertUInt64ToDouble(input, output, temp);
   1260    } else {
   1261      masm.convertInt64ToDouble(input, output);
   1262    }
   1263  } else {
   1264    if (lir->mir()->isUnsigned()) {
   1265      masm.convertUInt64ToFloat32(input, output, temp);
   1266    } else {
   1267      masm.convertInt64ToFloat32(input, output);
   1268    }
   1269  }
   1270 }
   1271 
   1272 void CodeGenerator::visitBitNotI64(LBitNotI64* ins) {
   1273  LInt64Allocation input = ins->input();
   1274  Register64 inputR = ToRegister64(input);
   1275  MOZ_ASSERT(inputR == ToOutRegister64(ins));
   1276  masm.notl(inputR.high);
   1277  masm.notl(inputR.low);
   1278 }
   1279 
   1280 void CodeGenerator::visitAddIntPtr(LAddIntPtr* ins) {
   1281  Register lhs = ToRegister(ins->lhs());
   1282  MOZ_ASSERT(ToRegister(ins->output()) == lhs);
   1283 
   1284  if (ins->rhs()->isConstant()) {
   1285    masm.addPtr(ImmWord(ToIntPtr(ins->rhs())), lhs);
   1286  } else {
   1287    masm.addl(ToOperand(ins->rhs()), lhs);
   1288  }
   1289 }
   1290 
   1291 void CodeGenerator::visitSubIntPtr(LSubIntPtr* ins) {
   1292  Register lhs = ToRegister(ins->lhs());
   1293  MOZ_ASSERT(ToRegister(ins->output()) == lhs);
   1294 
   1295  if (ins->rhs()->isConstant()) {
   1296    masm.subPtr(ImmWord(ToIntPtr(ins->rhs())), lhs);
   1297  } else {
   1298    masm.subl(ToOperand(ins->rhs()), lhs);
   1299  }
   1300 }
   1301 
   1302 void CodeGenerator::visitMulIntPtr(LMulIntPtr* ins) {
   1303  Register lhs = ToRegister(ins->lhs());
   1304  MOZ_ASSERT(ToRegister(ins->output()) == lhs);
   1305  const LAllocation* rhs = ins->rhs();
   1306 
   1307  if (rhs->isConstant()) {
   1308    intptr_t constant = ToIntPtr(rhs);
   1309 
   1310    switch (constant) {
   1311      case -1:
   1312        masm.negPtr(lhs);
   1313        return;
   1314      case 0:
   1315        masm.xorPtr(lhs, lhs);
   1316        return;
   1317      case 1:
   1318        return;
   1319      case 2:
   1320        masm.addPtr(lhs, lhs);
   1321        return;
   1322    }
   1323 
   1324    // Use shift if constant is a power of 2.
   1325    if (constant > 0 && mozilla::IsPowerOfTwo(uintptr_t(constant))) {
   1326      uint32_t shift = mozilla::FloorLog2(constant);
   1327      masm.lshiftPtr(Imm32(shift), lhs);
   1328      return;
   1329    }
   1330 
   1331    masm.mulPtr(ImmWord(constant), lhs);
   1332  } else {
   1333    masm.imull(ToOperand(rhs), lhs);
   1334  }
   1335 }