tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

CodeGenerator-mips-shared.cpp (66485B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/mips-shared/CodeGenerator-mips-shared.h"
      8 
      9 #include "mozilla/MathAlgorithms.h"
     10 
     11 #include "jsnum.h"
     12 
     13 #include "jit/CodeGenerator.h"
     14 #include "jit/InlineScriptTree.h"
     15 #include "jit/JitRuntime.h"
     16 #include "jit/MIR-wasm.h"
     17 #include "jit/MIR.h"
     18 #include "jit/MIRGraph.h"
     19 #include "js/Conversions.h"
     20 #include "vm/JSContext.h"
     21 #include "vm/Realm.h"
     22 #include "vm/Shape.h"
     23 
     24 #include "jit/MacroAssembler-inl.h"
     25 #include "jit/shared/CodeGenerator-shared-inl.h"
     26 #include "vm/JSScript-inl.h"
     27 
     28 using namespace js;
     29 using namespace js::jit;
     30 
     31 using JS::GenericNaN;
     32 using mozilla::NegativeInfinity;
     33 
     34 // shared
     35 CodeGeneratorMIPSShared::CodeGeneratorMIPSShared(
     36    MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm,
     37    const wasm::CodeMetadata* wasmCodeMeta)
     38    : CodeGeneratorShared(gen, graph, masm, wasmCodeMeta) {}
     39 
     40 Operand CodeGeneratorMIPSShared::ToOperand(const LAllocation& a) {
     41  if (a.isGeneralReg()) {
     42    return Operand(a.toGeneralReg()->reg());
     43  }
     44  if (a.isFloatReg()) {
     45    return Operand(a.toFloatReg()->reg());
     46  }
     47  return Operand(ToAddress(a));
     48 }
     49 
     50 Operand CodeGeneratorMIPSShared::ToOperand(const LAllocation* a) {
     51  return ToOperand(*a);
     52 }
     53 
     54 Operand CodeGeneratorMIPSShared::ToOperand(const LDefinition* def) {
     55  return ToOperand(def->output());
     56 }
     57 
     58 void CodeGeneratorMIPSShared::branchToBlock(Assembler::FloatFormat fmt,
     59                                            FloatRegister lhs,
     60                                            FloatRegister rhs, MBasicBlock* mir,
     61                                            Assembler::DoubleCondition cond) {
     62  // Skip past trivial blocks.
     63  Label* label = skipTrivialBlocks(mir)->lir()->label();
     64  if (fmt == Assembler::DoubleFloat) {
     65    masm.branchDouble(cond, lhs, rhs, label);
     66  } else {
     67    masm.branchFloat(cond, lhs, rhs, label);
     68  }
     69 }
     70 
     71 bool CodeGeneratorMIPSShared::generateOutOfLineCode() {
     72  if (!CodeGeneratorShared::generateOutOfLineCode()) {
     73    return false;
     74  }
     75 
     76  if (deoptLabel_.used()) {
     77    // All non-table-based bailouts will go here.
     78    masm.bind(&deoptLabel_);
     79 
     80    // Push the frame size, so the handler can recover the IonScript.
     81    // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk
     82    // We have to use 'ra' because generateBailoutTable will implicitly do
     83    // the same.
     84    masm.move32(Imm32(frameSize()), ra);
     85 
     86    TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
     87    masm.jump(handler);
     88  }
     89 
     90  return !masm.oom();
     91 }
     92 
     93 void CodeGeneratorMIPSShared::bailoutFrom(Label* label, LSnapshot* snapshot) {
     94  MOZ_ASSERT_IF(!masm.oom(), label->used());
     95  MOZ_ASSERT_IF(!masm.oom(), !label->bound());
     96 
     97  encode(snapshot);
     98 
     99  InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
    100  auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) {
    101    // Push snapshotOffset and make sure stack is aligned.
    102    masm.subPtr(Imm32(sizeof(Value)), StackPointer);
    103    masm.storePtr(ImmWord(snapshot->snapshotOffset()),
    104                  Address(StackPointer, 0));
    105 
    106    masm.jump(&deoptLabel_);
    107  });
    108  addOutOfLineCode(ool,
    109                   new (alloc()) BytecodeSite(tree, tree->script()->code()));
    110 
    111  masm.retarget(label, ool->entry());
    112 }
    113 
    114 void CodeGeneratorMIPSShared::bailout(LSnapshot* snapshot) {
    115  Label label;
    116  masm.jump(&label);
    117  bailoutFrom(&label, snapshot);
    118 }
    119 
    120 void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
    121  FloatRegister first = ToFloatRegister(ins->first());
    122  FloatRegister second = ToFloatRegister(ins->second());
    123 
    124  MOZ_ASSERT(first == ToFloatRegister(ins->output()));
    125 
    126  if (ins->mir()->isMax()) {
    127    masm.maxDouble(second, first, true);
    128  } else {
    129    masm.minDouble(second, first, true);
    130  }
    131 }
    132 
    133 void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
    134  FloatRegister first = ToFloatRegister(ins->first());
    135  FloatRegister second = ToFloatRegister(ins->second());
    136 
    137  MOZ_ASSERT(first == ToFloatRegister(ins->output()));
    138 
    139  if (ins->mir()->isMax()) {
    140    masm.maxFloat32(second, first, true);
    141  } else {
    142    masm.minFloat32(second, first, true);
    143  }
    144 }
    145 
    146 void CodeGenerator::visitAddI(LAddI* ins) {
    147  const LAllocation* lhs = ins->lhs();
    148  const LAllocation* rhs = ins->rhs();
    149  const LDefinition* dest = ins->output();
    150 
    151  MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
    152 
    153  // If there is no snapshot, we don't need to check for overflow
    154  if (!ins->snapshot()) {
    155    if (rhs->isConstant()) {
    156      masm.ma_addu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
    157    } else {
    158      masm.as_addu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
    159    }
    160    return;
    161  }
    162 
    163  Label overflow;
    164  if (rhs->isConstant()) {
    165    masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
    166                              Imm32(ToInt32(rhs)), &overflow);
    167  } else {
    168    masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
    169                              ToRegister(rhs), &overflow);
    170  }
    171 
    172  bailoutFrom(&overflow, ins->snapshot());
    173 }
    174 
    175 void CodeGenerator::visitAddIntPtr(LAddIntPtr* ins) {
    176  Register lhs = ToRegister(ins->lhs());
    177  const LAllocation* rhs = ins->rhs();
    178  Register dest = ToRegister(ins->output());
    179 
    180  if (rhs->isConstant()) {
    181    masm.ma_daddu(dest, lhs, ImmWord(ToIntPtr(rhs)));
    182  } else {
    183    masm.as_daddu(dest, lhs, ToRegister(rhs));
    184  }
    185 }
    186 
    187 void CodeGenerator::visitAddI64(LAddI64* lir) {
    188  Register lhs = ToRegister64(lir->lhs()).reg;
    189  LInt64Allocation rhs = lir->rhs();
    190  Register dest = ToOutRegister64(lir).reg;
    191 
    192  if (IsConstant(rhs)) {
    193    masm.ma_daddu(dest, lhs, ImmWord(ToInt64(rhs)));
    194  } else {
    195    masm.as_daddu(dest, lhs, ToRegister64(rhs).reg);
    196  }
    197 }
    198 
    199 void CodeGenerator::visitSubI(LSubI* ins) {
    200  const LAllocation* lhs = ins->lhs();
    201  const LAllocation* rhs = ins->rhs();
    202  const LDefinition* dest = ins->output();
    203 
    204  MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
    205 
    206  // If there is no snapshot, we don't need to check for overflow
    207  if (!ins->snapshot()) {
    208    if (rhs->isConstant()) {
    209      masm.ma_subu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
    210    } else {
    211      masm.as_subu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
    212    }
    213    return;
    214  }
    215 
    216  Label overflow;
    217  if (rhs->isConstant()) {
    218    masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
    219                              Imm32(ToInt32(rhs)), &overflow);
    220  } else {
    221    masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
    222                              ToRegister(rhs), &overflow);
    223  }
    224 
    225  bailoutFrom(&overflow, ins->snapshot());
    226 }
    227 
    228 void CodeGenerator::visitSubIntPtr(LSubIntPtr* ins) {
    229  Register lhs = ToRegister(ins->lhs());
    230  const LAllocation* rhs = ins->rhs();
    231  Register dest = ToRegister(ins->output());
    232 
    233  if (rhs->isConstant()) {
    234    masm.ma_dsubu(dest, lhs, ImmWord(ToIntPtr(rhs)));
    235  } else {
    236    masm.as_dsubu(dest, lhs, ToRegister(rhs));
    237  }
    238 }
    239 
    240 void CodeGenerator::visitSubI64(LSubI64* lir) {
    241  Register lhs = ToRegister64(lir->lhs()).reg;
    242  LInt64Allocation rhs = lir->rhs();
    243  Register dest = ToOutRegister64(lir).reg;
    244 
    245  if (IsConstant(rhs)) {
    246    masm.ma_dsubu(dest, lhs, ImmWord(ToInt64(rhs)));
    247  } else {
    248    masm.as_dsubu(dest, lhs, ToRegister64(rhs).reg);
    249  }
    250 }
    251 
    252 void CodeGenerator::visitMulI(LMulI* ins) {
    253  Register lhs = ToRegister(ins->lhs());
    254  const LAllocation* rhs = ins->rhs();
    255  Register dest = ToRegister(ins->output());
    256  MMul* mul = ins->mir();
    257 
    258  MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
    259                !mul->canBeNegativeZero() && !mul->canOverflow());
    260 
    261  if (rhs->isConstant()) {
    262    int32_t constant = ToInt32(rhs);
    263 
    264    // Bailout on -0.0
    265    if (mul->canBeNegativeZero() && constant <= 0) {
    266      Assembler::Condition cond =
    267          (constant == 0) ? Assembler::LessThan : Assembler::Equal;
    268      bailoutCmp32(cond, lhs, Imm32(0), ins->snapshot());
    269    }
    270 
    271    switch (constant) {
    272      case -1:
    273        if (mul->canOverflow()) {
    274          bailoutCmp32(Assembler::Equal, lhs, Imm32(INT32_MIN),
    275                       ins->snapshot());
    276        }
    277 
    278        masm.ma_negu(dest, lhs);
    279        return;
    280      case 0:
    281        masm.move32(Imm32(0), dest);
    282        return;
    283      case 1:
    284        masm.move32(lhs, dest);
    285        return;
    286      case 2:
    287        if (mul->canOverflow()) {
    288          Label mulTwoOverflow;
    289          masm.ma_add32TestOverflow(dest, lhs, lhs, &mulTwoOverflow);
    290 
    291          bailoutFrom(&mulTwoOverflow, ins->snapshot());
    292        } else {
    293          masm.as_addu(dest, lhs, lhs);
    294        }
    295        return;
    296    }
    297 
    298    if (constant > 0) {
    299      uint32_t shift = mozilla::FloorLog2(constant);
    300 
    301      if (!mul->canOverflow()) {
    302        // If it cannot overflow, we can do lots of optimizations.
    303 
    304        // See if the constant has one bit set, meaning it can be
    305        // encoded as a bitshift.
    306        if ((1 << shift) == constant) {
    307          masm.ma_sll(dest, lhs, Imm32(shift));
    308          return;
    309        }
    310 
    311        // If the constant cannot be encoded as (1<<C1), see if it can
    312        // be encoded as (1<<C1) | (1<<C2), which can be computed
    313        // using an add and a shift.
    314        uint32_t rest = constant - (1 << shift);
    315        uint32_t shift_rest = mozilla::FloorLog2(rest);
    316        if ((1u << shift_rest) == rest) {
    317          UseScratchRegisterScope temps(masm);
    318          Register scratch = temps.Acquire();
    319 
    320          masm.ma_sll(scratch, lhs, Imm32(shift - shift_rest));
    321          masm.as_addu(dest, scratch, lhs);
    322          if (shift_rest != 0) {
    323            masm.ma_sll(dest, dest, Imm32(shift_rest));
    324          }
    325          return;
    326        }
    327      } else {
    328        // To stay on the safe side, only optimize things that are a power of 2.
    329        if ((1 << shift) == constant) {
    330          UseScratchRegisterScope temps(masm);
    331          Register scratch = temps.Acquire();
    332 
    333          // dest = lhs * pow(2, shift)
    334          masm.ma_dsll(dest, lhs, Imm32(shift));
    335 
    336          // At runtime, check (dest >> shift == intptr_t(dest) >> shift), if
    337          // this does not hold, some bits were lost due to overflow, and the
    338          // computation should be resumed as a double.
    339          masm.ma_sll(scratch, dest, Imm32(0));
    340          bailoutCmp32(Assembler::NotEqual, dest, scratch, ins->snapshot());
    341          return;
    342        }
    343      }
    344    }
    345 
    346    if (mul->canOverflow()) {
    347      Label mulConstOverflow;
    348      masm.ma_mul32TestOverflow(dest, lhs, Imm32(constant), &mulConstOverflow);
    349 
    350      bailoutFrom(&mulConstOverflow, ins->snapshot());
    351    } else {
    352      masm.ma_mul(dest, lhs, Imm32(constant));
    353    }
    354  } else {
    355    if (mul->canOverflow()) {
    356      Label multRegOverflow;
    357      masm.ma_mul32TestOverflow(dest, lhs, ToRegister(rhs), &multRegOverflow);
    358 
    359      bailoutFrom(&multRegOverflow, ins->snapshot());
    360    } else {
    361      masm.as_mul(dest, lhs, ToRegister(rhs));
    362    }
    363 
    364    if (mul->canBeNegativeZero()) {
    365      Label done;
    366      masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump);
    367 
    368      // Result is -0 if lhs or rhs is negative.
    369      // In that case result must be double value so bailout
    370      UseScratchRegisterScope temps(masm);
    371      Register scratch = temps.Acquire();
    372      masm.as_or(scratch, lhs, ToRegister(rhs));
    373      bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot());
    374 
    375      masm.bind(&done);
    376    }
    377  }
    378 }
    379 
    380 void CodeGeneratorMIPSShared::emitMulI64(Register lhs, int64_t rhs,
    381                                         Register dest) {
    382  switch (rhs) {
    383    case -1:
    384      masm.as_dsubu(dest, zero, lhs);
    385      return;
    386    case 0:
    387      masm.movePtr(zero, dest);
    388      return;
    389    case 1:
    390      masm.movePtr(lhs, dest);
    391      return;
    392    case 2:
    393      masm.as_daddu(dest, lhs, lhs);
    394      return;
    395  }
    396 
    397  if (rhs > 0) {
    398    if (mozilla::IsPowerOfTwo(static_cast<uint64_t>(rhs + 1))) {
    399      int32_t shift = mozilla::FloorLog2(rhs + 1);
    400 
    401      UseScratchRegisterScope temps(masm);
    402      Register savedLhs = lhs;
    403      if (dest == lhs) {
    404        savedLhs = temps.Acquire();
    405        masm.movePtr(lhs, savedLhs);
    406      }
    407      masm.lshiftPtr(Imm32(shift), lhs, dest);
    408      masm.subPtr(savedLhs, dest);
    409      return;
    410    }
    411 
    412    if (mozilla::IsPowerOfTwo(static_cast<uint64_t>(rhs - 1))) {
    413      int32_t shift = mozilla::FloorLog2(rhs - 1u);
    414 
    415      UseScratchRegisterScope temps(masm);
    416      Register savedLhs = lhs;
    417      if (dest == lhs) {
    418        savedLhs = temps.Acquire();
    419        masm.movePtr(lhs, savedLhs);
    420      }
    421      masm.lshiftPtr(Imm32(shift), lhs, dest);
    422      masm.addPtr(savedLhs, dest);
    423      return;
    424    }
    425 
    426    // Use shift if constant is power of 2.
    427    int32_t shift = mozilla::FloorLog2(rhs);
    428    if (int64_t(1) << shift == rhs) {
    429      masm.lshiftPtr(Imm32(shift), lhs, dest);
    430      return;
    431    }
    432  }
    433 
    434  masm.ma_dmulu(dest, lhs, ImmWord(rhs));
    435 }
    436 
    437 void CodeGenerator::visitMulIntPtr(LMulIntPtr* ins) {
    438  Register lhs = ToRegister(ins->lhs());
    439  const LAllocation* rhs = ins->rhs();
    440  Register dest = ToRegister(ins->output());
    441 
    442  if (rhs->isConstant()) {
    443    emitMulI64(lhs, ToIntPtr(rhs), dest);
    444  } else {
    445    masm.ma_dmulu(dest, lhs, ToRegister(rhs));
    446  }
    447 }
    448 
    449 void CodeGenerator::visitMulI64(LMulI64* lir) {
    450  Register lhs = ToRegister64(lir->lhs()).reg;
    451  LInt64Allocation rhs = lir->rhs();
    452  Register dest = ToOutRegister64(lir).reg;
    453 
    454  if (IsConstant(rhs)) {
    455    emitMulI64(lhs, ToInt64(rhs), dest);
    456  } else {
    457    masm.ma_dmulu(dest, lhs, ToRegister64(rhs).reg);
    458  }
    459 }
    460 
    461 void CodeGenerator::visitDivI(LDivI* ins) {
    462  Register lhs = ToRegister(ins->lhs());
    463  Register rhs = ToRegister(ins->rhs());
    464  Register dest = ToRegister(ins->output());
    465  Register temp = ToRegister(ins->temp0());
    466  MDiv* mir = ins->mir();
    467 
    468  Label done;
    469 
    470  // Handle divide by zero.
    471  if (mir->canBeDivideByZero()) {
    472    if (mir->trapOnError()) {
    473      Label nonZero;
    474      masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
    475      masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->trapSiteDesc());
    476      masm.bind(&nonZero);
    477    } else if (mir->canTruncateInfinities()) {
    478      // Truncated division by zero is zero (Infinity|0 == 0)
    479      Label notzero;
    480      masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
    481      masm.move32(Imm32(0), dest);
    482      masm.ma_b(&done, ShortJump);
    483      masm.bind(&notzero);
    484    } else {
    485      MOZ_ASSERT(mir->fallible());
    486      bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot());
    487    }
    488  }
    489 
    490  // Handle an integer overflow exception from -2147483648 / -1.
    491  if (mir->canBeNegativeOverflow()) {
    492    Label notMinInt;
    493    masm.move32(Imm32(INT32_MIN), temp);
    494    masm.ma_b(lhs, temp, &notMinInt, Assembler::NotEqual, ShortJump);
    495 
    496    masm.move32(Imm32(-1), temp);
    497    if (mir->trapOnError()) {
    498      Label ok;
    499      masm.ma_b(rhs, temp, &ok, Assembler::NotEqual);
    500      masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->trapSiteDesc());
    501      masm.bind(&ok);
    502    } else if (mir->canTruncateOverflow()) {
    503      // (-INT32_MIN)|0 == INT32_MIN
    504      Label skip;
    505      masm.ma_b(rhs, temp, &skip, Assembler::NotEqual, ShortJump);
    506      masm.move32(Imm32(INT32_MIN), dest);
    507      masm.ma_b(&done, ShortJump);
    508      masm.bind(&skip);
    509    } else {
    510      MOZ_ASSERT(mir->fallible());
    511      bailoutCmp32(Assembler::Equal, rhs, temp, ins->snapshot());
    512    }
    513    masm.bind(&notMinInt);
    514  }
    515 
    516  // Handle negative 0. (0/-Y)
    517  if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
    518    Label nonzero;
    519    masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump);
    520    bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot());
    521    masm.bind(&nonzero);
    522  }
    523 
    524  // All regular. Lets call div.
    525  if (mir->canTruncateRemainder()) {
    526 #ifdef MIPSR6
    527    masm.as_div(dest, lhs, rhs);
    528 #else
    529    masm.as_div(lhs, rhs);
    530    masm.as_mflo(dest);
    531 #endif
    532  } else {
    533    MOZ_ASSERT(mir->fallible());
    534 
    535 #ifdef MIPSR6
    536    masm.as_mod(temp, lhs, rhs);
    537 #else
    538    masm.as_div(lhs, rhs);
    539    masm.as_mfhi(temp);
    540 #endif
    541 
    542    bailoutCmp32(Assembler::NonZero, temp, temp, ins->snapshot());
    543 
    544 #ifdef MIPSR6
    545    masm.as_div(dest, lhs, rhs);
    546 #else
    547    masm.as_mflo(dest);
    548 #endif
    549  }
    550 
    551  masm.bind(&done);
    552 }
    553 
    554 void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
    555  Register lhs = ToRegister(ins->numerator());
    556  Register dest = ToRegister(ins->output());
    557  Register tmp = ToRegister(ins->temp0());
    558  int32_t shift = ins->shift();
    559 
    560  if (shift != 0) {
    561    MDiv* mir = ins->mir();
    562    if (!mir->isTruncated()) {
    563      // If the remainder is going to be != 0, bailout since this must
    564      // be a double.
    565      masm.ma_sll(tmp, lhs, Imm32(32 - shift));
    566      bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot());
    567    }
    568 
    569    if (!mir->canBeNegativeDividend()) {
    570      // Numerator is unsigned, so needs no adjusting. Do the shift.
    571      masm.ma_sra(dest, lhs, Imm32(shift));
    572      return;
    573    }
    574 
    575    // Adjust the value so that shifting produces a correctly rounded result
    576    // when the numerator is negative. See 10-1 "Signed Division by a Known
    577    // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
    578    if (shift > 1) {
    579      masm.ma_sra(tmp, lhs, Imm32(31));
    580      masm.ma_srl(tmp, tmp, Imm32(32 - shift));
    581      masm.add32(lhs, tmp);
    582    } else {
    583      masm.ma_srl(tmp, lhs, Imm32(32 - shift));
    584      masm.add32(lhs, tmp);
    585    }
    586 
    587    // Do the shift.
    588    masm.ma_sra(dest, tmp, Imm32(shift));
    589  } else {
    590    masm.move32(lhs, dest);
    591  }
    592 }
    593 
    594 void CodeGenerator::visitModI(LModI* ins) {
    595  Register lhs = ToRegister(ins->lhs());
    596  Register rhs = ToRegister(ins->rhs());
    597  Register dest = ToRegister(ins->output());
    598  MMod* mir = ins->mir();
    599  Label done;
    600 
    601  // Prevent X % 0.
    602  // For X % Y, Compare Y with 0.
    603  // There are two cases: (Y == 0) and (Y != 0)
    604  // If (Y == 0), then we want to bail.
    605  // if (Y != 0), we don't bail.
    606 
    607  if (mir->canBeDivideByZero()) {
    608    if (mir->isTruncated()) {
    609      if (mir->trapOnError()) {
    610        Label nonZero;
    611        masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
    612        masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->trapSiteDesc());
    613        masm.bind(&nonZero);
    614      } else {
    615        Label skip;
    616        masm.ma_b(rhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
    617        // (X % 0) | 0 == 0
    618        masm.move32(Imm32(0), dest);
    619        masm.ma_b(&done, ShortJump);
    620        masm.bind(&skip);
    621      }
    622    } else {
    623      MOZ_ASSERT(mir->fallible());
    624      bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
    625    }
    626  }
    627 
    628 #ifdef MIPSR6
    629  masm.as_mod(dest, lhs, rhs);
    630 #else
    631  masm.as_div(lhs, rhs);
    632  masm.as_mfhi(dest);
    633 #endif
    634 
    635  // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
    636  // This also catches INT_MIN % -1 since we want -0.
    637  if (mir->canBeNegativeDividend()) {
    638    if (mir->isTruncated()) {
    639      // -0.0|0 == 0
    640    } else {
    641      MOZ_ASSERT(mir->fallible());
    642      // See if X < 0
    643      masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump);
    644      bailoutCmp32(Assembler::Signed, lhs, Imm32(0), ins->snapshot());
    645    }
    646  }
    647  masm.bind(&done);
    648 }
    649 
    650 void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
    651  Register in = ToRegister(ins->input());
    652  Register out = ToRegister(ins->output());
    653  MMod* mir = ins->mir();
    654  Label negative, done;
    655 
    656  masm.move32(in, out);
    657  masm.ma_b(in, in, &done, Assembler::Zero, ShortJump);
    658  // Switch based on sign of the lhs.
    659  // Positive numbers are just a bitmask
    660  masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump);
    661  {
    662    masm.and32(Imm32((1 << ins->shift()) - 1), out);
    663    masm.ma_b(&done, ShortJump);
    664  }
    665 
    666  // Negative numbers need a negate, bitmask, negate
    667  {
    668    masm.bind(&negative);
    669    masm.neg32(out);
    670    masm.and32(Imm32((1 << ins->shift()) - 1), out);
    671    masm.neg32(out);
    672  }
    673  if (mir->canBeNegativeDividend()) {
    674    if (!mir->isTruncated()) {
    675      MOZ_ASSERT(mir->fallible());
    676      bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot());
    677    } else {
    678      // -0|0 == 0
    679    }
    680  }
    681  masm.bind(&done);
    682 }
    683 
    684 void CodeGenerator::visitModMaskI(LModMaskI* ins) {
    685  Register src = ToRegister(ins->input());
    686  Register dest = ToRegister(ins->output());
    687  Register tmp0 = ToRegister(ins->temp0());
    688  Register tmp1 = ToRegister(ins->temp1());
    689  MMod* mir = ins->mir();
    690 
    691  if (!mir->isTruncated() && mir->canBeNegativeDividend()) {
    692    MOZ_ASSERT(mir->fallible());
    693 
    694    Label bail;
    695    masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), &bail);
    696    bailoutFrom(&bail, ins->snapshot());
    697  } else {
    698    masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), nullptr);
    699  }
    700 }
    701 
    702 void CodeGenerator::visitBitNotI(LBitNotI* ins) {
    703  Register input = ToRegister(ins->input());
    704  Register dest = ToRegister(ins->output());
    705  masm.ma_not(dest, input);
    706 }
    707 
    708 void CodeGenerator::visitBitOpI(LBitOpI* ins) {
    709  Register lhs = ToRegister(ins->lhs());
    710  const LAllocation* rhs = ins->rhs();
    711  Register dest = ToRegister(ins->output());
    712 
    713  // all of these bitops should be either imm32's, or integer registers.
    714  switch (ins->bitop()) {
    715    case JSOp::BitOr:
    716      if (rhs->isConstant()) {
    717        masm.ma_or(dest, lhs, Imm32(ToInt32(rhs)));
    718      } else {
    719        masm.as_or(dest, lhs, ToRegister(rhs));
    720      }
    721      break;
    722    case JSOp::BitXor:
    723      if (rhs->isConstant()) {
    724        masm.ma_xor(dest, lhs, Imm32(ToInt32(rhs)));
    725      } else {
    726        masm.as_xor(dest, lhs, ToRegister(rhs));
    727      }
    728      break;
    729    case JSOp::BitAnd:
    730      if (rhs->isConstant()) {
    731        masm.ma_and(dest, lhs, Imm32(ToInt32(rhs)));
    732      } else {
    733        masm.as_and(dest, lhs, ToRegister(rhs));
    734      }
    735      break;
    736    default:
    737      MOZ_CRASH("unexpected binary opcode");
    738  }
    739 }
    740 
    741 void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
    742  Register lhs = ToRegister64(lir->lhs()).reg;
    743  Register rhs;
    744  Register dest = ToOutRegister64(lir).reg;
    745 
    746  UseScratchRegisterScope temps(masm);
    747  if (IsConstant(lir->rhs())) {
    748    rhs = temps.Acquire();
    749 
    750    // Small immediates can be handled without the load immediate instruction,
    751    // but this optimisation isn't yet implemented.
    752    masm.ma_li(rhs, ImmWord(ToInt64(lir->rhs())));
    753  } else {
    754    rhs = ToRegister64(lir->rhs()).reg;
    755  }
    756 
    757  switch (lir->bitop()) {
    758    case JSOp::BitOr:
    759      masm.as_or(dest, lhs, rhs);
    760      break;
    761    case JSOp::BitXor:
    762      masm.as_xor(dest, lhs, rhs);
    763      break;
    764    case JSOp::BitAnd:
    765      masm.as_and(dest, lhs, rhs);
    766      break;
    767    default:
    768      MOZ_CRASH("unexpected binary opcode");
    769  }
    770 }
    771 
    772 void CodeGenerator::visitShiftI(LShiftI* ins) {
    773  Register lhs = ToRegister(ins->lhs());
    774  const LAllocation* rhs = ins->rhs();
    775  Register dest = ToRegister(ins->output());
    776 
    777  if (rhs->isConstant()) {
    778    int32_t shift = ToInt32(rhs) & 0x1F;
    779    switch (ins->bitop()) {
    780      case JSOp::Lsh:
    781        if (shift) {
    782          masm.ma_sll(dest, lhs, Imm32(shift));
    783        } else {
    784          masm.move32(lhs, dest);
    785        }
    786        break;
    787      case JSOp::Rsh:
    788        if (shift) {
    789          masm.ma_sra(dest, lhs, Imm32(shift));
    790        } else {
    791          masm.move32(lhs, dest);
    792        }
    793        break;
    794      case JSOp::Ursh:
    795        if (shift) {
    796          masm.ma_srl(dest, lhs, Imm32(shift));
    797        } else {
    798          // x >>> 0 can overflow.
    799          if (ins->mir()->toUrsh()->fallible()) {
    800            bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
    801          }
    802          masm.move32(lhs, dest);
    803        }
    804        break;
    805      default:
    806        MOZ_CRASH("Unexpected shift op");
    807    }
    808  } else {
    809    Register shift = ToRegister(rhs);
    810    switch (ins->bitop()) {
    811      case JSOp::Lsh:
    812        masm.ma_sll(dest, lhs, shift);
    813        break;
    814      case JSOp::Rsh:
    815        masm.ma_sra(dest, lhs, shift);
    816        break;
    817      case JSOp::Ursh:
    818        masm.ma_srl(dest, lhs, shift);
    819        if (ins->mir()->toUrsh()->fallible()) {
    820          // x >>> 0 can overflow.
    821          bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot());
    822        }
    823        break;
    824      default:
    825        MOZ_CRASH("Unexpected shift op");
    826    }
    827  }
    828 }
    829 
    830 void CodeGenerator::visitShiftIntPtr(LShiftIntPtr* ins) {
    831  Register lhs = ToRegister(ins->lhs());
    832  const LAllocation* rhs = ins->rhs();
    833  Register dest = ToRegister(ins->output());
    834 
    835  if (rhs->isConstant()) {
    836    int32_t shift = ToIntPtr(rhs) & 0x3F;
    837    if (shift) {
    838      switch (ins->bitop()) {
    839        case JSOp::Lsh:
    840          masm.ma_dsll(dest, lhs, Imm32(shift));
    841          break;
    842        case JSOp::Rsh:
    843          masm.ma_dsra(dest, lhs, Imm32(shift));
    844          break;
    845        case JSOp::Ursh:
    846          masm.ma_dsrl(dest, lhs, Imm32(shift));
    847          break;
    848        default:
    849          MOZ_CRASH("Unexpected shift op");
    850      }
    851    } else if (lhs != dest) {
    852      masm.movePtr(lhs, dest);
    853    }
    854  } else {
    855    Register shift = ToRegister(rhs);
    856    switch (ins->bitop()) {
    857      case JSOp::Lsh:
    858        masm.ma_dsll(dest, lhs, shift);
    859        break;
    860      case JSOp::Rsh:
    861        masm.ma_dsra(dest, lhs, shift);
    862        break;
    863      case JSOp::Ursh:
    864        masm.ma_dsrl(dest, lhs, shift);
    865        break;
    866      default:
    867        MOZ_CRASH("Unexpected shift op");
    868    }
    869  }
    870 }
    871 
    872 void CodeGenerator::visitShiftI64(LShiftI64* lir) {
    873  Register lhs = ToRegister64(lir->lhs()).reg;
    874  const LAllocation* rhs = lir->rhs();
    875  Register dest = ToOutRegister64(lir).reg;
    876 
    877  if (rhs->isConstant()) {
    878    int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
    879    if (shift) {
    880      switch (lir->bitop()) {
    881        case JSOp::Lsh:
    882          masm.ma_dsll(dest, lhs, Imm32(shift));
    883          break;
    884        case JSOp::Rsh:
    885          masm.ma_dsra(dest, lhs, Imm32(shift));
    886          break;
    887        case JSOp::Ursh:
    888          masm.ma_dsrl(dest, lhs, Imm32(shift));
    889          break;
    890        default:
    891          MOZ_CRASH("Unexpected shift op");
    892      }
    893    } else if (lhs != dest) {
    894      masm.movePtr(lhs, dest);
    895    }
    896    return;
    897  }
    898 
    899  Register shift = ToRegister(rhs);
    900  switch (lir->bitop()) {
    901    case JSOp::Lsh:
    902      masm.ma_dsll(dest, lhs, shift);
    903      break;
    904    case JSOp::Rsh:
    905      masm.ma_dsra(dest, lhs, shift);
    906      break;
    907    case JSOp::Ursh:
    908      masm.ma_dsrl(dest, lhs, shift);
    909      break;
    910    default:
    911      MOZ_CRASH("Unexpected shift op");
    912  }
    913 }
    914 
    915 void CodeGenerator::visitUrshD(LUrshD* ins) {
    916  Register lhs = ToRegister(ins->lhs());
    917  Register temp = ToRegister(ins->temp0());
    918 
    919  const LAllocation* rhs = ins->rhs();
    920  FloatRegister out = ToFloatRegister(ins->output());
    921 
    922  if (rhs->isConstant()) {
    923    masm.ma_srl(temp, lhs, Imm32(ToInt32(rhs)));
    924  } else {
    925    masm.ma_srl(temp, lhs, ToRegister(rhs));
    926  }
    927 
    928  masm.convertUInt32ToDouble(temp, out);
    929 }
    930 
    931 void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
    932  FloatRegister input = ToFloatRegister(ins->input());
    933  FloatRegister output = ToFloatRegister(ins->output());
    934 
    935  Label done, skip;
    936 
    937  // Masm.pow(-Infinity, 0.5) == Infinity.
    938  masm.loadConstantDouble(NegativeInfinity<double>(), ScratchDoubleReg);
    939  masm.ma_bc1d(input, ScratchDoubleReg, &skip,
    940               Assembler::DoubleNotEqualOrUnordered, ShortJump);
    941  masm.as_negd(output, ScratchDoubleReg);
    942  masm.ma_b(&done, ShortJump);
    943 
    944  masm.bind(&skip);
    945  // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
    946  // Adding 0 converts any -0 to 0.
    947  masm.loadConstantDouble(0.0, ScratchDoubleReg);
    948  masm.as_addd(output, input, ScratchDoubleReg);
    949  masm.as_sqrtd(output, output);
    950 
    951  masm.bind(&done);
    952 }
    953 
    954 MoveOperand CodeGeneratorMIPSShared::toMoveOperand(LAllocation a) const {
    955  if (a.isGeneralReg()) {
    956    return MoveOperand(ToRegister(a));
    957  }
    958  if (a.isFloatReg()) {
    959    return MoveOperand(ToFloatRegister(a));
    960  }
    961  MoveOperand::Kind kind = a.isStackArea() ? MoveOperand::Kind::EffectiveAddress
    962                                           : MoveOperand::Kind::Memory;
    963  Address address = ToAddress(a);
    964  MOZ_ASSERT((address.offset & 3) == 0);
    965  return MoveOperand(address, kind);
    966 }
    967 
    968 void CodeGenerator::visitMathD(LMathD* math) {
    969  FloatRegister src1 = ToFloatRegister(math->lhs());
    970  FloatRegister src2 = ToFloatRegister(math->rhs());
    971  FloatRegister output = ToFloatRegister(math->output());
    972 
    973  switch (math->jsop()) {
    974    case JSOp::Add:
    975      masm.as_addd(output, src1, src2);
    976      break;
    977    case JSOp::Sub:
    978      masm.as_subd(output, src1, src2);
    979      break;
    980    case JSOp::Mul:
    981      masm.as_muld(output, src1, src2);
    982      break;
    983    case JSOp::Div:
    984      masm.as_divd(output, src1, src2);
    985      break;
    986    default:
    987      MOZ_CRASH("unexpected opcode");
    988  }
    989 }
    990 
    991 void CodeGenerator::visitMathF(LMathF* math) {
    992  FloatRegister src1 = ToFloatRegister(math->lhs());
    993  FloatRegister src2 = ToFloatRegister(math->rhs());
    994  FloatRegister output = ToFloatRegister(math->output());
    995 
    996  switch (math->jsop()) {
    997    case JSOp::Add:
    998      masm.as_adds(output, src1, src2);
    999      break;
   1000    case JSOp::Sub:
   1001      masm.as_subs(output, src1, src2);
   1002      break;
   1003    case JSOp::Mul:
   1004      masm.as_muls(output, src1, src2);
   1005      break;
   1006    case JSOp::Div:
   1007      masm.as_divs(output, src1, src2);
   1008      break;
   1009    default:
   1010      MOZ_CRASH("unexpected opcode");
   1011  }
   1012 }
   1013 
   1014 void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
   1015  emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
   1016                     ins->mir());
   1017 }
   1018 
   1019 void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
   1020  emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
   1021                      ins->mir());
   1022 }
   1023 
   1024 void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
   1025    LWasmBuiltinTruncateDToInt32* lir) {
   1026  emitTruncateDouble(ToFloatRegister(lir->input()), ToRegister(lir->output()),
   1027                     lir->mir());
   1028 }
   1029 
   1030 void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
   1031    LWasmBuiltinTruncateFToInt32* lir) {
   1032  emitTruncateFloat32(ToFloatRegister(lir->input()), ToRegister(lir->output()),
   1033                      lir->mir());
   1034 }
   1035 
   1036 void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
   1037  auto input = ToFloatRegister(lir->input());
   1038  auto output = ToRegister(lir->output());
   1039 
   1040  MWasmTruncateToInt32* mir = lir->mir();
   1041  MIRType fromType = mir->input()->type();
   1042 
   1043  MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
   1044 
   1045  auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
   1046  addOutOfLineCode(ool, mir);
   1047 
   1048  Label* oolEntry = ool->entry();
   1049  if (mir->isUnsigned()) {
   1050    if (fromType == MIRType::Double) {
   1051      masm.wasmTruncateDoubleToUInt32(input, output, mir->isSaturating(),
   1052                                      oolEntry);
   1053    } else if (fromType == MIRType::Float32) {
   1054      masm.wasmTruncateFloat32ToUInt32(input, output, mir->isSaturating(),
   1055                                       oolEntry);
   1056    } else {
   1057      MOZ_CRASH("unexpected type");
   1058    }
   1059 
   1060    masm.bind(ool->rejoin());
   1061    return;
   1062  }
   1063 
   1064  if (fromType == MIRType::Double) {
   1065    masm.wasmTruncateDoubleToInt32(input, output, mir->isSaturating(),
   1066                                   oolEntry);
   1067  } else if (fromType == MIRType::Float32) {
   1068    masm.wasmTruncateFloat32ToInt32(input, output, mir->isSaturating(),
   1069                                    oolEntry);
   1070  } else {
   1071    MOZ_CRASH("unexpected type");
   1072  }
   1073 
   1074  masm.bind(ool->rejoin());
   1075 }
   1076 
   1077 void CodeGeneratorMIPSShared::visitOutOfLineWasmTruncateCheck(
   1078    OutOfLineWasmTruncateCheck* ool) {
   1079  if (ool->toType() == MIRType::Int32) {
   1080    masm.outOfLineWasmTruncateToInt32Check(ool->input(), ool->output(),
   1081                                           ool->fromType(), ool->flags(),
   1082                                           ool->rejoin(), ool->trapSiteDesc());
   1083  } else {
   1084    MOZ_ASSERT(ool->toType() == MIRType::Int64);
   1085    masm.outOfLineWasmTruncateToInt64Check(ool->input(), ool->output64(),
   1086                                           ool->fromType(), ool->flags(),
   1087                                           ool->rejoin(), ool->trapSiteDesc());
   1088  }
   1089 }
   1090 
   1091 void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
   1092  FloatRegister input = ToFloatRegister(test->input());
   1093 
   1094  MBasicBlock* ifTrue = test->ifTrue();
   1095  MBasicBlock* ifFalse = test->ifFalse();
   1096 
   1097  masm.loadConstantDouble(0.0, ScratchDoubleReg);
   1098  // If 0, or NaN, the result is false.
   1099 
   1100  if (isNextBlock(ifFalse->lir())) {
   1101    branchToBlock(Assembler::DoubleFloat, input, ScratchDoubleReg, ifTrue,
   1102                  Assembler::DoubleNotEqual);
   1103  } else {
   1104    branchToBlock(Assembler::DoubleFloat, input, ScratchDoubleReg, ifFalse,
   1105                  Assembler::DoubleEqualOrUnordered);
   1106    jumpToBlock(ifTrue);
   1107  }
   1108 }
   1109 
   1110 void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
   1111  FloatRegister input = ToFloatRegister(test->input());
   1112 
   1113  MBasicBlock* ifTrue = test->ifTrue();
   1114  MBasicBlock* ifFalse = test->ifFalse();
   1115 
   1116  masm.loadConstantFloat32(0.0f, ScratchFloat32Reg);
   1117  // If 0, or NaN, the result is false.
   1118 
   1119  if (isNextBlock(ifFalse->lir())) {
   1120    branchToBlock(Assembler::SingleFloat, input, ScratchFloat32Reg, ifTrue,
   1121                  Assembler::DoubleNotEqual);
   1122  } else {
   1123    branchToBlock(Assembler::SingleFloat, input, ScratchFloat32Reg, ifFalse,
   1124                  Assembler::DoubleEqualOrUnordered);
   1125    jumpToBlock(ifTrue);
   1126  }
   1127 }
   1128 
   1129 void CodeGenerator::visitCompareD(LCompareD* comp) {
   1130  FloatRegister lhs = ToFloatRegister(comp->left());
   1131  FloatRegister rhs = ToFloatRegister(comp->right());
   1132  Register dest = ToRegister(comp->output());
   1133 
   1134  Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
   1135  masm.ma_cmp_set_double(dest, lhs, rhs, cond);
   1136 }
   1137 
   1138 void CodeGenerator::visitCompareF(LCompareF* comp) {
   1139  FloatRegister lhs = ToFloatRegister(comp->left());
   1140  FloatRegister rhs = ToFloatRegister(comp->right());
   1141  Register dest = ToRegister(comp->output());
   1142 
   1143  Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
   1144  masm.ma_cmp_set_float32(dest, lhs, rhs, cond);
   1145 }
   1146 
   1147 void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
   1148  FloatRegister lhs = ToFloatRegister(comp->left());
   1149  FloatRegister rhs = ToFloatRegister(comp->right());
   1150 
   1151  Assembler::DoubleCondition cond =
   1152      JSOpToDoubleCondition(comp->cmpMir()->jsop());
   1153  MBasicBlock* ifTrue = comp->ifTrue();
   1154  MBasicBlock* ifFalse = comp->ifFalse();
   1155 
   1156  if (isNextBlock(ifFalse->lir())) {
   1157    branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifTrue, cond);
   1158  } else {
   1159    branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifFalse,
   1160                  Assembler::InvertCondition(cond));
   1161    jumpToBlock(ifTrue);
   1162  }
   1163 }
   1164 
   1165 void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
   1166  FloatRegister lhs = ToFloatRegister(comp->left());
   1167  FloatRegister rhs = ToFloatRegister(comp->right());
   1168 
   1169  Assembler::DoubleCondition cond =
   1170      JSOpToDoubleCondition(comp->cmpMir()->jsop());
   1171  MBasicBlock* ifTrue = comp->ifTrue();
   1172  MBasicBlock* ifFalse = comp->ifFalse();
   1173 
   1174  if (isNextBlock(ifFalse->lir())) {
   1175    branchToBlock(Assembler::SingleFloat, lhs, rhs, ifTrue, cond);
   1176  } else {
   1177    branchToBlock(Assembler::SingleFloat, lhs, rhs, ifFalse,
   1178                  Assembler::InvertCondition(cond));
   1179    jumpToBlock(ifTrue);
   1180  }
   1181 }
   1182 
   1183 void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
   1184  masm.convertUInt32ToDouble(ToRegister(lir->input()),
   1185                             ToFloatRegister(lir->output()));
   1186 }
   1187 
   1188 void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
   1189  masm.convertUInt32ToFloat32(ToRegister(lir->input()),
   1190                              ToFloatRegister(lir->output()));
   1191 }
   1192 
   1193 void CodeGenerator::visitNotD(LNotD* ins) {
   1194  // Since this operation is not, we want to set a bit if
   1195  // the double is falsey, which means 0.0, -0.0 or NaN.
   1196  FloatRegister in = ToFloatRegister(ins->input());
   1197  Register dest = ToRegister(ins->output());
   1198 
   1199  masm.loadConstantDouble(0.0, ScratchDoubleReg);
   1200  masm.ma_cmp_set_double(dest, in, ScratchDoubleReg,
   1201                         Assembler::DoubleEqualOrUnordered);
   1202 }
   1203 
   1204 void CodeGenerator::visitNotF(LNotF* ins) {
   1205  // Since this operation is not, we want to set a bit if
   1206  // the float32 is falsey, which means 0.0, -0.0 or NaN.
   1207  FloatRegister in = ToFloatRegister(ins->input());
   1208  Register dest = ToRegister(ins->output());
   1209 
   1210  masm.loadConstantFloat32(0.0f, ScratchFloat32Reg);
   1211  masm.ma_cmp_set_float32(dest, in, ScratchFloat32Reg,
   1212                          Assembler::DoubleEqualOrUnordered);
   1213 }
   1214 
   1215 void CodeGeneratorMIPSShared::generateInvalidateEpilogue() {
   1216  // Ensure that there is enough space in the buffer for the OsiPoint
   1217  // patching to occur. Otherwise, we could overwrite the invalidation
   1218  // epilogue.
   1219  for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
   1220    masm.nop();
   1221  }
   1222 
   1223  masm.bind(&invalidate_);
   1224 
   1225  // Push the return address of the point that we bailed out at to the stack
   1226  masm.Push(ra);
   1227 
   1228  // Push the Ion script onto the stack (when we determine what that
   1229  // pointer is).
   1230  invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
   1231 
   1232  // Jump to the invalidator which will replace the current frame.
   1233  TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
   1234  masm.jump(thunk);
   1235 }
   1236 
   1237 class js::jit::OutOfLineTableSwitch
   1238    : public OutOfLineCodeBase<CodeGeneratorMIPSShared> {
   1239  MTableSwitch* mir_;
   1240  CodeLabel jumpLabel_;
   1241 
   1242  void accept(CodeGeneratorMIPSShared* codegen) {
   1243    codegen->visitOutOfLineTableSwitch(this);
   1244  }
   1245 
   1246 public:
   1247  explicit OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
   1248 
   1249  MTableSwitch* mir() const { return mir_; }
   1250 
   1251  CodeLabel* jumpLabel() { return &jumpLabel_; }
   1252 };
   1253 
   1254 void CodeGeneratorMIPSShared::visitOutOfLineTableSwitch(
   1255    OutOfLineTableSwitch* ool) {
   1256  MTableSwitch* mir = ool->mir();
   1257 
   1258  masm.haltingAlign(sizeof(void*));
   1259  masm.bind(ool->jumpLabel());
   1260  masm.addCodeLabel(*ool->jumpLabel());
   1261 
   1262  for (size_t i = 0; i < mir->numCases(); i++) {
   1263    LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
   1264    Label* caseheader = caseblock->label();
   1265    uint32_t caseoffset = caseheader->offset();
   1266 
   1267    // The entries of the jump table need to be absolute addresses and thus
   1268    // must be patched after codegen is finished.
   1269    CodeLabel cl;
   1270    masm.writeCodePointer(&cl);
   1271    cl.target()->bind(caseoffset);
   1272    masm.addCodeLabel(cl);
   1273  }
   1274 }
   1275 
   1276 void CodeGeneratorMIPSShared::emitTableSwitchDispatch(MTableSwitch* mir,
   1277                                                      Register index,
   1278                                                      Register base) {
   1279  Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
   1280 
   1281  // Lower value with low value
   1282  if (mir->low() != 0) {
   1283    masm.subPtr(Imm32(mir->low()), index);
   1284  }
   1285 
   1286  // Jump to default case if input is out of range
   1287  int32_t cases = mir->numCases();
   1288  masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase);
   1289 
   1290  // To fill in the CodeLabels for the case entries, we need to first
   1291  // generate the case entries (we don't yet know their offsets in the
   1292  // instruction stream).
   1293  OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
   1294  addOutOfLineCode(ool, mir);
   1295 
   1296  // Compute the position where a pointer to the right case stands.
   1297  masm.ma_li(base, ool->jumpLabel());
   1298 
   1299  BaseIndex pointer(base, index, ScalePointer);
   1300 
   1301  // Jump to the right case
   1302  masm.branchToComputedAddress(pointer);
   1303 }
   1304 
   1305 template <typename T>
   1306 void CodeGeneratorMIPSShared::emitWasmLoad(T* lir) {
   1307  const MWasmLoad* mir = lir->mir();
   1308  UseScratchRegisterScope temps(masm);
   1309  Register scratch2 = temps.Acquire();
   1310 
   1311  Register memoryBase = ToRegister(lir->memoryBase());
   1312  Register ptr = ToRegister(lir->ptr());
   1313  Register ptrScratch = ToTempRegisterOrInvalid(lir->temp0());
   1314 
   1315  if (mir->base()->type() == MIRType::Int32) {
   1316    masm.move32To64ZeroExtend(ptr, Register64(scratch2));
   1317    ptr = scratch2;
   1318    ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
   1319  }
   1320 
   1321  if constexpr (std::is_same_v<T, LWasmUnalignedLoad>) {
   1322    MOZ_ASSERT(IsUnaligned(mir->access()));
   1323    if (IsFloatingPointType(mir->type())) {
   1324      masm.wasmUnalignedLoadFP(mir->access(), memoryBase, ptr, ptrScratch,
   1325                               ToFloatRegister(lir->output()),
   1326                               ToRegister(lir->temp1()));
   1327    } else {
   1328      masm.wasmUnalignedLoad(mir->access(), memoryBase, ptr, ptrScratch,
   1329                             ToRegister(lir->output()),
   1330                             ToRegister(lir->temp1()));
   1331    }
   1332  } else {
   1333    MOZ_ASSERT(!IsUnaligned(mir->access()));
   1334    masm.wasmLoad(mir->access(), memoryBase, ptr, ptrScratch,
   1335                  ToAnyRegister(lir->output()));
   1336  }
   1337 }
   1338 
   1339 void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
   1340 
   1341 void CodeGenerator::visitWasmUnalignedLoad(LWasmUnalignedLoad* lir) {
   1342  emitWasmLoad(lir);
   1343 }
   1344 
   1345 template <typename T>
   1346 void CodeGeneratorMIPSShared::emitWasmStore(T* lir) {
   1347  const MWasmStore* mir = lir->mir();
   1348  UseScratchRegisterScope temps(masm);
   1349  Register scratch2 = temps.Acquire();
   1350 
   1351  Register memoryBase = ToRegister(lir->memoryBase());
   1352  Register ptr = ToRegister(lir->ptr());
   1353  Register ptrScratch = ToTempRegisterOrInvalid(lir->temp0());
   1354 
   1355  if (mir->base()->type() == MIRType::Int32) {
   1356    masm.move32To64ZeroExtend(ptr, Register64(scratch2));
   1357    ptr = scratch2;
   1358    ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
   1359  }
   1360 
   1361  if constexpr (std::is_same_v<T, LWasmUnalignedStore>) {
   1362    MOZ_ASSERT(IsUnaligned(mir->access()));
   1363    if (mir->access().type() == Scalar::Float32 ||
   1364        mir->access().type() == Scalar::Float64) {
   1365      masm.wasmUnalignedStoreFP(mir->access(), ToFloatRegister(lir->value()),
   1366                                memoryBase, ptr, ptrScratch,
   1367                                ToRegister(lir->temp1()));
   1368    } else {
   1369      masm.wasmUnalignedStore(mir->access(), ToRegister(lir->value()),
   1370                              memoryBase, ptr, ptrScratch,
   1371                              ToRegister(lir->temp1()));
   1372    }
   1373  } else {
   1374    MOZ_ASSERT(!IsUnaligned(mir->access()));
   1375    masm.wasmStore(mir->access(), ToAnyRegister(lir->value()), memoryBase, ptr,
   1376                   ptrScratch);
   1377  }
   1378 }
   1379 
   1380 void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
   1381 
   1382 void CodeGenerator::visitWasmUnalignedStore(LWasmUnalignedStore* lir) {
   1383  emitWasmStore(lir);
   1384 }
   1385 
   1386 void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
   1387  const MAsmJSLoadHeap* mir = ins->mir();
   1388  const LAllocation* ptr = ins->ptr();
   1389  const LDefinition* out = ins->output();
   1390  const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
   1391 
   1392  Scalar::Type accessType = mir->access().type();
   1393  bool isSigned = Scalar::isSignedIntType(accessType);
   1394  int size = Scalar::byteSize(accessType) * 8;
   1395  bool isFloat = Scalar::isFloatingType(accessType);
   1396 
   1397  if (ptr->isConstant()) {
   1398    MOZ_ASSERT(!mir->needsBoundsCheck());
   1399    int32_t ptrImm = ptr->toConstant()->toInt32();
   1400    MOZ_ASSERT(ptrImm >= 0);
   1401    if (isFloat) {
   1402      if (size == 32) {
   1403        masm.loadFloat32(Address(HeapReg, ptrImm), ToFloatRegister(out));
   1404      } else {
   1405        masm.loadDouble(Address(HeapReg, ptrImm), ToFloatRegister(out));
   1406      }
   1407    } else {
   1408      masm.ma_load(ToRegister(out), Address(HeapReg, ptrImm),
   1409                   static_cast<LoadStoreSize>(size),
   1410                   isSigned ? SignExtend : ZeroExtend);
   1411    }
   1412    return;
   1413  }
   1414 
   1415  Register ptrReg = ToRegister(ptr);
   1416 
   1417  if (!mir->needsBoundsCheck()) {
   1418    if (isFloat) {
   1419      if (size == 32) {
   1420        masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
   1421                         ToFloatRegister(out));
   1422      } else {
   1423        masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
   1424                        ToFloatRegister(out));
   1425      }
   1426    } else {
   1427      masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
   1428                   static_cast<LoadStoreSize>(size),
   1429                   isSigned ? SignExtend : ZeroExtend);
   1430    }
   1431    return;
   1432  }
   1433 
   1434  Label done, outOfRange;
   1435  masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg,
   1436                         ToRegister(boundsCheckLimit), &outOfRange);
   1437  // Offset is ok, let's load value.
   1438  if (isFloat) {
   1439    if (size == 32) {
   1440      masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
   1441                       ToFloatRegister(out));
   1442    } else {
   1443      masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
   1444                      ToFloatRegister(out));
   1445    }
   1446  } else {
   1447    masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
   1448                 static_cast<LoadStoreSize>(size),
   1449                 isSigned ? SignExtend : ZeroExtend);
   1450  }
   1451  masm.ma_b(&done, ShortJump);
   1452  masm.bind(&outOfRange);
   1453  // Offset is out of range. Load default values.
   1454  if (isFloat) {
   1455    if (size == 32) {
   1456      masm.loadConstantFloat32(float(GenericNaN()), ToFloatRegister(out));
   1457    } else {
   1458      masm.loadConstantDouble(GenericNaN(), ToFloatRegister(out));
   1459    }
   1460  } else {
   1461    masm.move32(Imm32(0), ToRegister(out));
   1462  }
   1463  masm.bind(&done);
   1464 }
   1465 
   1466 void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
   1467  const MAsmJSStoreHeap* mir = ins->mir();
   1468  const LAllocation* value = ins->value();
   1469  const LAllocation* ptr = ins->ptr();
   1470  const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
   1471 
   1472  Scalar::Type accessType = mir->access().type();
   1473  bool isSigned = Scalar::isSignedIntType(accessType);
   1474  int size = Scalar::byteSize(accessType) * 8;
   1475  bool isFloat = Scalar::isFloatingType(accessType);
   1476 
   1477  if (ptr->isConstant()) {
   1478    MOZ_ASSERT(!mir->needsBoundsCheck());
   1479    int32_t ptrImm = ptr->toConstant()->toInt32();
   1480    MOZ_ASSERT(ptrImm >= 0);
   1481 
   1482    if (isFloat) {
   1483      FloatRegister freg = ToFloatRegister(value);
   1484      Address addr(HeapReg, ptrImm);
   1485      if (size == 32) {
   1486        masm.storeFloat32(freg, addr);
   1487      } else {
   1488        masm.storeDouble(freg, addr);
   1489      }
   1490    } else {
   1491      masm.ma_store(ToRegister(value), Address(HeapReg, ptrImm),
   1492                    static_cast<LoadStoreSize>(size),
   1493                    isSigned ? SignExtend : ZeroExtend);
   1494    }
   1495    return;
   1496  }
   1497 
   1498  Register ptrReg = ToRegister(ptr);
   1499  Address dstAddr(ptrReg, 0);
   1500 
   1501  if (!mir->needsBoundsCheck()) {
   1502    if (isFloat) {
   1503      FloatRegister freg = ToFloatRegister(value);
   1504      BaseIndex bi(HeapReg, ptrReg, TimesOne);
   1505      if (size == 32) {
   1506        masm.storeFloat32(freg, bi);
   1507      } else {
   1508        masm.storeDouble(freg, bi);
   1509      }
   1510    } else {
   1511      masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
   1512                    static_cast<LoadStoreSize>(size),
   1513                    isSigned ? SignExtend : ZeroExtend);
   1514    }
   1515    return;
   1516  }
   1517 
   1518  Label outOfRange;
   1519  masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg,
   1520                         ToRegister(boundsCheckLimit), &outOfRange);
   1521 
   1522  // Offset is ok, let's store value.
   1523  if (isFloat) {
   1524    if (size == 32) {
   1525      masm.storeFloat32(ToFloatRegister(value),
   1526                        BaseIndex(HeapReg, ptrReg, TimesOne));
   1527    } else
   1528      masm.storeDouble(ToFloatRegister(value),
   1529                       BaseIndex(HeapReg, ptrReg, TimesOne));
   1530  } else {
   1531    masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
   1532                  static_cast<LoadStoreSize>(size),
   1533                  isSigned ? SignExtend : ZeroExtend);
   1534  }
   1535 
   1536  masm.bind(&outOfRange);
   1537 }
   1538 
   1539 void CodeGenerator::visitWasmCompareExchangeHeap(
   1540    LWasmCompareExchangeHeap* ins) {
   1541  MWasmCompareExchangeHeap* mir = ins->mir();
   1542  Register memoryBase = ToRegister(ins->memoryBase());
   1543  Register ptrReg = ToRegister(ins->ptr());
   1544  BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset32());
   1545 
   1546  Register oldval = ToRegister(ins->oldValue());
   1547  Register newval = ToRegister(ins->newValue());
   1548  Register valueTemp = ToTempRegisterOrInvalid(ins->temp0());
   1549  Register offsetTemp = ToTempRegisterOrInvalid(ins->temp1());
   1550  Register maskTemp = ToTempRegisterOrInvalid(ins->temp2());
   1551 
   1552  masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, valueTemp,
   1553                           offsetTemp, maskTemp, ToRegister(ins->output()));
   1554 }
   1555 
   1556 void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
   1557  MWasmAtomicExchangeHeap* mir = ins->mir();
   1558  Register memoryBase = ToRegister(ins->memoryBase());
   1559  Register ptrReg = ToRegister(ins->ptr());
   1560  Register value = ToRegister(ins->value());
   1561  BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset32());
   1562 
   1563  Register valueTemp = ToTempRegisterOrInvalid(ins->temp0());
   1564  Register offsetTemp = ToTempRegisterOrInvalid(ins->temp1());
   1565  Register maskTemp = ToTempRegisterOrInvalid(ins->temp2());
   1566 
   1567  masm.wasmAtomicExchange(mir->access(), srcAddr, value, valueTemp, offsetTemp,
   1568                          maskTemp, ToRegister(ins->output()));
   1569 }
   1570 
   1571 void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
   1572  MOZ_ASSERT(ins->mir()->hasUses());
   1573 
   1574  MWasmAtomicBinopHeap* mir = ins->mir();
   1575  Register memoryBase = ToRegister(ins->memoryBase());
   1576  Register ptrReg = ToRegister(ins->ptr());
   1577  Register valueTemp = ToTempRegisterOrInvalid(ins->temp0());
   1578  Register offsetTemp = ToTempRegisterOrInvalid(ins->temp1());
   1579  Register maskTemp = ToTempRegisterOrInvalid(ins->temp2());
   1580 
   1581  BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset32());
   1582 
   1583  masm.wasmAtomicFetchOp(mir->access(), mir->operation(),
   1584                         ToRegister(ins->value()), srcAddr, valueTemp,
   1585                         offsetTemp, maskTemp, ToRegister(ins->output()));
   1586 }
   1587 
   1588 void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
   1589    LWasmAtomicBinopHeapForEffect* ins) {
   1590  MOZ_ASSERT(!ins->mir()->hasUses());
   1591 
   1592  MWasmAtomicBinopHeap* mir = ins->mir();
   1593  Register memoryBase = ToRegister(ins->memoryBase());
   1594  Register ptrReg = ToRegister(ins->ptr());
   1595  Register valueTemp = ToTempRegisterOrInvalid(ins->temp0());
   1596  Register offsetTemp = ToTempRegisterOrInvalid(ins->temp1());
   1597  Register maskTemp = ToTempRegisterOrInvalid(ins->temp2());
   1598 
   1599  BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset32());
   1600  masm.wasmAtomicEffectOp(mir->access(), mir->operation(),
   1601                          ToRegister(ins->value()), srcAddr, valueTemp,
   1602                          offsetTemp, maskTemp);
   1603 }
   1604 
   1605 void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
   1606  const MWasmStackArg* mir = ins->mir();
   1607  if (ins->arg()->isConstant()) {
   1608    masm.storePtr(ImmWord(ToInt32(ins->arg())),
   1609                  Address(StackPointer, mir->spOffset()));
   1610  } else {
   1611    if (ins->arg()->isGeneralReg()) {
   1612      masm.storePtr(ToRegister(ins->arg()),
   1613                    Address(StackPointer, mir->spOffset()));
   1614    } else if (mir->input()->type() == MIRType::Double) {
   1615      masm.storeDouble(ToFloatRegister(ins->arg()).doubleOverlay(),
   1616                       Address(StackPointer, mir->spOffset()));
   1617    } else {
   1618      masm.storeFloat32(ToFloatRegister(ins->arg()),
   1619                        Address(StackPointer, mir->spOffset()));
   1620    }
   1621  }
   1622 }
   1623 
   1624 void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
   1625  const MWasmStackArg* mir = ins->mir();
   1626  Address dst(StackPointer, mir->spOffset());
   1627  if (IsConstant(ins->arg())) {
   1628    masm.store64(Imm64(ToInt64(ins->arg())), dst);
   1629  } else {
   1630    masm.store64(ToRegister64(ins->arg()), dst);
   1631  }
   1632 }
   1633 
   1634 void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
   1635  MIRType mirType = ins->mir()->type();
   1636 
   1637  Register cond = ToRegister(ins->condExpr());
   1638  const LAllocation* falseExpr = ins->falseExpr();
   1639 
   1640  if (mirType == MIRType::Int32 || mirType == MIRType::WasmAnyRef) {
   1641    Register out = ToRegister(ins->output());
   1642    MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
   1643               "true expr input is reused for output");
   1644    if (falseExpr->isGeneralReg()) {
   1645      masm.as_movz(out, ToRegister(falseExpr), cond);
   1646    } else {
   1647      masm.cmp32Load32(Assembler::Zero, cond, cond, ToAddress(falseExpr), out);
   1648    }
   1649    return;
   1650  }
   1651 
   1652  FloatRegister out = ToFloatRegister(ins->output());
   1653  MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
   1654             "true expr input is reused for output");
   1655 
   1656  if (falseExpr->isFloatReg()) {
   1657    if (mirType == MIRType::Float32) {
   1658      masm.as_movz(Assembler::SingleFloat, out, ToFloatRegister(falseExpr),
   1659                   cond);
   1660    } else if (mirType == MIRType::Double) {
   1661      masm.as_movz(Assembler::DoubleFloat, out, ToFloatRegister(falseExpr),
   1662                   cond);
   1663    } else {
   1664      MOZ_CRASH("unhandled type in visitWasmSelect!");
   1665    }
   1666  } else {
   1667    Label done;
   1668    masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
   1669 
   1670    if (mirType == MIRType::Float32) {
   1671      masm.loadFloat32(ToAddress(falseExpr), out);
   1672    } else if (mirType == MIRType::Double) {
   1673      masm.loadDouble(ToAddress(falseExpr), out);
   1674    } else {
   1675      MOZ_CRASH("unhandled type in visitWasmSelect!");
   1676    }
   1677 
   1678    masm.bind(&done);
   1679  }
   1680 }
   1681 
   1682 // We expect to handle only the case where compare is {U,}Int32 and select is
   1683 // {U,}Int32, and the "true" input is reused for the output.
   1684 void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
   1685  bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
   1686                    ins->compareType() == MCompare::Compare_UInt32;
   1687  bool selIs32bit = ins->mir()->type() == MIRType::Int32;
   1688 
   1689  MOZ_RELEASE_ASSERT(
   1690      cmpIs32bit && selIs32bit,
   1691      "CodeGenerator::visitWasmCompareAndSelect: unexpected types");
   1692 
   1693  Register trueExprAndDest = ToRegister(ins->output());
   1694  MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == trueExprAndDest,
   1695             "true expr input is reused for output");
   1696 
   1697  Assembler::Condition cond = Assembler::InvertCondition(
   1698      JSOpToCondition(ins->compareType(), ins->jsop()));
   1699  const LAllocation* rhs = ins->rightExpr();
   1700  const LAllocation* falseExpr = ins->ifFalseExpr();
   1701  Register lhs = ToRegister(ins->leftExpr());
   1702 
   1703  masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
   1704                   trueExprAndDest);
   1705 }
   1706 
   1707 void CodeGenerator::visitUDivOrMod(LUDivOrMod* ins) {
   1708  Register lhs = ToRegister(ins->lhs());
   1709  Register rhs = ToRegister(ins->rhs());
   1710  Register output = ToRegister(ins->output());
   1711  Label done;
   1712 
   1713  // Prevent divide by zero.
   1714  if (ins->canBeDivideByZero()) {
   1715    if (ins->mir()->isTruncated()) {
   1716      if (ins->trapOnError()) {
   1717        Label nonZero;
   1718        masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
   1719        masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->trapSiteDesc());
   1720        masm.bind(&nonZero);
   1721      } else {
   1722        // Infinity|0 == 0
   1723        Label notzero;
   1724        masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
   1725        masm.move32(Imm32(0), output);
   1726        masm.ma_b(&done, ShortJump);
   1727        masm.bind(&notzero);
   1728      }
   1729    } else {
   1730      bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
   1731    }
   1732  }
   1733 
   1734 #ifdef MIPSR6
   1735  masm.as_modu(output, lhs, rhs);
   1736 #else
   1737  masm.as_divu(lhs, rhs);
   1738  masm.as_mfhi(output);
   1739 #endif
   1740 
   1741  // If the remainder is > 0, bailout since this must be a double.
   1742  if (ins->mir()->isDiv()) {
   1743    if (!ins->mir()->toDiv()->canTruncateRemainder()) {
   1744      bailoutCmp32(Assembler::NonZero, output, output, ins->snapshot());
   1745    }
   1746    // Get quotient
   1747 #ifdef MIPSR6
   1748    masm.as_divu(output, lhs, rhs);
   1749 #else
   1750    masm.as_mflo(output);
   1751 #endif
   1752  }
   1753 
   1754  if (!ins->mir()->isTruncated()) {
   1755    bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
   1756  }
   1757 
   1758  masm.bind(&done);
   1759 }
   1760 
   1761 void CodeGenerator::visitEffectiveAddress3(LEffectiveAddress3* ins) {
   1762  const MEffectiveAddress3* mir = ins->mir();
   1763  Register base = ToRegister(ins->base());
   1764  Register index = ToRegister(ins->index());
   1765  Register output = ToRegister(ins->output());
   1766 
   1767  BaseIndex address(base, index, mir->scale(), mir->displacement());
   1768  masm.computeEffectiveAddress32(address, output);
   1769 }
   1770 
   1771 void CodeGenerator::visitEffectiveAddress2(LEffectiveAddress2* ins) {
   1772  const MEffectiveAddress2* mir = ins->mir();
   1773  Register index = ToRegister(ins->index());
   1774  Register output = ToRegister(ins->output());
   1775 
   1776  BaseIndex address(zero, index, mir->scale(), mir->displacement());
   1777  masm.computeEffectiveAddress32(address, output);
   1778 }
   1779 
   1780 void CodeGenerator::visitNegI(LNegI* ins) {
   1781  Register input = ToRegister(ins->input());
   1782  Register output = ToRegister(ins->output());
   1783 
   1784  masm.ma_negu(output, input);
   1785 }
   1786 
   1787 void CodeGenerator::visitNegI64(LNegI64* ins) {
   1788  Register input = ToRegister64(ins->input()).reg;
   1789  Register output = ToOutRegister64(ins).reg;
   1790 
   1791  masm.ma_dnegu(output, input);
   1792 }
   1793 
   1794 void CodeGenerator::visitNegD(LNegD* ins) {
   1795  FloatRegister input = ToFloatRegister(ins->input());
   1796  FloatRegister output = ToFloatRegister(ins->output());
   1797 
   1798  masm.as_negd(output, input);
   1799 }
   1800 
   1801 void CodeGenerator::visitNegF(LNegF* ins) {
   1802  FloatRegister input = ToFloatRegister(ins->input());
   1803  FloatRegister output = ToFloatRegister(ins->output());
   1804 
   1805  masm.as_negs(output, input);
   1806 }
   1807 
   1808 void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
   1809  MWasmAddOffset* mir = lir->mir();
   1810  Register base = ToRegister(lir->base());
   1811  Register out = ToRegister(lir->output());
   1812 
   1813  Label ok;
   1814  masm.ma_add32TestCarry(Assembler::CarryClear, out, base, Imm32(mir->offset()),
   1815                         &ok);
   1816  masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
   1817  masm.bind(&ok);
   1818 }
   1819 
   1820 void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) {
   1821  MWasmAddOffset* mir = lir->mir();
   1822  Register64 base = ToRegister64(lir->base());
   1823  Register64 out = ToOutRegister64(lir);
   1824 
   1825  Label ok;
   1826  masm.ma_addPtrTestCarry(Assembler::CarryClear, out.reg, base.reg,
   1827                          ImmWord(mir->offset()), &ok);
   1828  masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc());
   1829  masm.bind(&ok);
   1830 }
   1831 
   1832 void CodeGenerator::visitAtomicTypedArrayElementBinop(
   1833    LAtomicTypedArrayElementBinop* lir) {
   1834  MOZ_ASSERT(!lir->mir()->isForEffect());
   1835 
   1836  AnyRegister output = ToAnyRegister(lir->output());
   1837  Register elements = ToRegister(lir->elements());
   1838  Register outTemp = ToTempRegisterOrInvalid(lir->temp0());
   1839  Register valueTemp = ToTempRegisterOrInvalid(lir->temp1());
   1840  Register offsetTemp = ToTempRegisterOrInvalid(lir->temp2());
   1841  Register maskTemp = ToTempRegisterOrInvalid(lir->temp3());
   1842  Register value = ToRegister(lir->value());
   1843 
   1844  Scalar::Type arrayType = lir->mir()->arrayType();
   1845 
   1846  auto mem = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   1847 
   1848  mem.match([&](const auto& mem) {
   1849    masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
   1850                         lir->mir()->operation(), value, mem, valueTemp,
   1851                         offsetTemp, maskTemp, outTemp, output);
   1852  });
   1853 }
   1854 
   1855 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
   1856    LAtomicTypedArrayElementBinopForEffect* lir) {
   1857  MOZ_ASSERT(lir->mir()->isForEffect());
   1858 
   1859  Register elements = ToRegister(lir->elements());
   1860  Register valueTemp = ToTempRegisterOrInvalid(lir->temp0());
   1861  Register offsetTemp = ToTempRegisterOrInvalid(lir->temp1());
   1862  Register maskTemp = ToTempRegisterOrInvalid(lir->temp2());
   1863  Register value = ToRegister(lir->value());
   1864  Scalar::Type arrayType = lir->mir()->arrayType();
   1865 
   1866  auto mem = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   1867 
   1868  mem.match([&](const auto& mem) {
   1869    masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
   1870                          lir->mir()->operation(), value, mem, valueTemp,
   1871                          offsetTemp, maskTemp);
   1872  });
   1873 }
   1874 
   1875 void CodeGenerator::visitCompareExchangeTypedArrayElement(
   1876    LCompareExchangeTypedArrayElement* lir) {
   1877  Register elements = ToRegister(lir->elements());
   1878  AnyRegister output = ToAnyRegister(lir->output());
   1879  Register outTemp = ToTempRegisterOrInvalid(lir->temp0());
   1880 
   1881  Register oldval = ToRegister(lir->oldval());
   1882  Register newval = ToRegister(lir->newval());
   1883  Register valueTemp = ToTempRegisterOrInvalid(lir->temp1());
   1884  Register offsetTemp = ToTempRegisterOrInvalid(lir->temp2());
   1885  Register maskTemp = ToTempRegisterOrInvalid(lir->temp3());
   1886 
   1887  Scalar::Type arrayType = lir->mir()->arrayType();
   1888 
   1889  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   1890 
   1891  dest.match([&](const auto& dest) {
   1892    masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
   1893                           newval, valueTemp, offsetTemp, maskTemp, outTemp,
   1894                           output);
   1895  });
   1896 }
   1897 
   1898 void CodeGenerator::visitAtomicExchangeTypedArrayElement(
   1899    LAtomicExchangeTypedArrayElement* lir) {
   1900  Register elements = ToRegister(lir->elements());
   1901  AnyRegister output = ToAnyRegister(lir->output());
   1902  Register outTemp = ToTempRegisterOrInvalid(lir->temp0());
   1903 
   1904  Register value = ToRegister(lir->value());
   1905  Register valueTemp = ToTempRegisterOrInvalid(lir->temp1());
   1906  Register offsetTemp = ToTempRegisterOrInvalid(lir->temp2());
   1907  Register maskTemp = ToTempRegisterOrInvalid(lir->temp3());
   1908 
   1909  Scalar::Type arrayType = lir->mir()->arrayType();
   1910 
   1911  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   1912 
   1913  dest.match([&](const auto& dest) {
   1914    masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
   1915                          valueTemp, offsetTemp, maskTemp, outTemp, output);
   1916  });
   1917 }
   1918 
   1919 void CodeGenerator::visitCompareExchangeTypedArrayElement64(
   1920    LCompareExchangeTypedArrayElement64* lir) {
   1921  Register elements = ToRegister(lir->elements());
   1922  Register64 oldval = ToRegister64(lir->oldval());
   1923  Register64 newval = ToRegister64(lir->newval());
   1924  Register64 out = ToOutRegister64(lir);
   1925 
   1926  Scalar::Type arrayType = lir->mir()->arrayType();
   1927 
   1928  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   1929 
   1930  dest.match([&](const auto& dest) {
   1931    masm.compareExchange64(Synchronization::Full(), dest, oldval, newval, out);
   1932  });
   1933 }
   1934 
   1935 void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
   1936    LAtomicExchangeTypedArrayElement64* lir) {
   1937  Register elements = ToRegister(lir->elements());
   1938  Register64 value = ToRegister64(lir->value());
   1939  Register64 out = ToOutRegister64(lir);
   1940 
   1941  Scalar::Type arrayType = lir->mir()->arrayType();
   1942 
   1943  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   1944 
   1945  dest.match([&](const auto& dest) {
   1946    masm.atomicExchange64(Synchronization::Full(), dest, value, out);
   1947  });
   1948 }
   1949 
   1950 void CodeGenerator::visitAtomicTypedArrayElementBinop64(
   1951    LAtomicTypedArrayElementBinop64* lir) {
   1952  MOZ_ASSERT(lir->mir()->hasUses());
   1953 
   1954  Register elements = ToRegister(lir->elements());
   1955  Register64 value = ToRegister64(lir->value());
   1956  Register64 temp = ToRegister64(lir->temp0());
   1957  Register64 out = ToOutRegister64(lir);
   1958 
   1959  Scalar::Type arrayType = lir->mir()->arrayType();
   1960  AtomicOp atomicOp = lir->mir()->operation();
   1961 
   1962  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   1963 
   1964  dest.match([&](const auto& dest) {
   1965    masm.atomicFetchOp64(Synchronization::Full(), atomicOp, value, dest, temp,
   1966                         out);
   1967  });
   1968 }
   1969 
   1970 void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
   1971    LAtomicTypedArrayElementBinopForEffect64* lir) {
   1972  MOZ_ASSERT(!lir->mir()->hasUses());
   1973 
   1974  Register elements = ToRegister(lir->elements());
   1975  Register64 value = ToRegister64(lir->value());
   1976  Register64 temp = ToRegister64(lir->temp0());
   1977 
   1978  Scalar::Type arrayType = lir->mir()->arrayType();
   1979  AtomicOp atomicOp = lir->mir()->operation();
   1980 
   1981  auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType);
   1982 
   1983  dest.match([&](const auto& dest) {
   1984    masm.atomicEffectOp64(Synchronization::Full(), atomicOp, value, dest, temp);
   1985  });
   1986 }
   1987 
   1988 void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir) {
   1989  Register memoryBase = ToRegister(lir->memoryBase());
   1990  Register ptr = ToRegister(lir->ptr());
   1991  Register64 oldValue = ToRegister64(lir->oldValue());
   1992  Register64 newValue = ToRegister64(lir->newValue());
   1993  Register64 output = ToOutRegister64(lir);
   1994  uint32_t offset = lir->mir()->access().offset32();
   1995 
   1996  BaseIndex addr(memoryBase, ptr, TimesOne, offset);
   1997  masm.wasmCompareExchange64(lir->mir()->access(), addr, oldValue, newValue,
   1998                             output);
   1999 }
   2000 
   2001 void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir) {
   2002  Register memoryBase = ToRegister(lir->memoryBase());
   2003  Register ptr = ToRegister(lir->ptr());
   2004  Register64 value = ToRegister64(lir->value());
   2005  Register64 output = ToOutRegister64(lir);
   2006  uint32_t offset = lir->mir()->access().offset32();
   2007 
   2008  BaseIndex addr(memoryBase, ptr, TimesOne, offset);
   2009  masm.wasmAtomicExchange64(lir->mir()->access(), addr, value, output);
   2010 }
   2011 
   2012 void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir) {
   2013  Register memoryBase = ToRegister(lir->memoryBase());
   2014  Register ptr = ToRegister(lir->ptr());
   2015  Register64 value = ToRegister64(lir->value());
   2016  Register64 output = ToOutRegister64(lir);
   2017  Register64 temp = ToRegister64(lir->temp0());
   2018  uint32_t offset = lir->mir()->access().offset32();
   2019 
   2020  BaseIndex addr(memoryBase, ptr, TimesOne, offset);
   2021 
   2022  masm.wasmAtomicFetchOp64(lir->mir()->access(), lir->mir()->operation(), value,
   2023                           addr, temp, output);
   2024 }
   2025 
   2026 void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH("No SIMD"); }
   2027 
   2028 void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
   2029  MOZ_CRASH("No SIMD");
   2030 }
   2031 
   2032 void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
   2033  MOZ_CRASH("No SIMD");
   2034 }
   2035 
   2036 void CodeGenerator::visitWasmBinarySimd128WithConstant(
   2037    LWasmBinarySimd128WithConstant* ins) {
   2038  MOZ_CRASH("No SIMD");
   2039 }
   2040 
   2041 void CodeGenerator::visitWasmVariableShiftSimd128(
   2042    LWasmVariableShiftSimd128* ins) {
   2043  MOZ_CRASH("No SIMD");
   2044 }
   2045 
   2046 void CodeGenerator::visitWasmConstantShiftSimd128(
   2047    LWasmConstantShiftSimd128* ins) {
   2048  MOZ_CRASH("No SIMD");
   2049 }
   2050 
   2051 void CodeGenerator::visitWasmSignReplicationSimd128(
   2052    LWasmSignReplicationSimd128* ins) {
   2053  MOZ_CRASH("No SIMD");
   2054 }
   2055 
   2056 void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
   2057  MOZ_CRASH("No SIMD");
   2058 }
   2059 
   2060 void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
   2061  MOZ_CRASH("No SIMD");
   2062 }
   2063 
   2064 void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
   2065  MOZ_CRASH("No SIMD");
   2066 }
   2067 
   2068 void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
   2069    LWasmReplaceInt64LaneSimd128* ins) {
   2070  MOZ_CRASH("No SIMD");
   2071 }
   2072 
   2073 void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
   2074  MOZ_CRASH("No SIMD");
   2075 }
   2076 
   2077 void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
   2078  MOZ_CRASH("No SIMD");
   2079 }
   2080 
   2081 void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
   2082  MOZ_CRASH("No SIMD");
   2083 }
   2084 
   2085 void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
   2086  MOZ_CRASH("No SIMD");
   2087 }
   2088 
   2089 void CodeGenerator::visitWasmReduceAndBranchSimd128(
   2090    LWasmReduceAndBranchSimd128* ins) {
   2091  MOZ_CRASH("No SIMD");
   2092 }
   2093 
   2094 void CodeGenerator::visitWasmReduceSimd128ToInt64(
   2095    LWasmReduceSimd128ToInt64* ins) {
   2096  MOZ_CRASH("No SIMD");
   2097 }
   2098 
   2099 void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
   2100  MOZ_CRASH("No SIMD");
   2101 }
   2102 
   2103 void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
   2104  MOZ_CRASH("No SIMD");
   2105 }