tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

MacroAssembler-mips-shared.cpp (111535B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/mips-shared/MacroAssembler-mips-shared.h"
      8 
      9 #include "mozilla/EndianUtils.h"
     10 
     11 #include "jsmath.h"
     12 
     13 #include "jit/MacroAssembler.h"
     14 
     15 using namespace js;
     16 using namespace jit;
     17 
     18 void MacroAssemblerMIPSShared::ma_move(Register rd, Register rs) {
     19  as_or(rd, rs, zero);
     20 }
     21 
     22 void MacroAssemblerMIPSShared::ma_li(Register dest, ImmGCPtr ptr) {
     23  writeDataRelocation(ptr);
     24  asMasm().ma_liPatchable(dest, ImmPtr(ptr.value));
     25 }
     26 
     27 void MacroAssemblerMIPSShared::ma_li(Register dest, Imm32 imm) {
     28  if (Imm16::IsInSignedRange(imm.value)) {
     29    as_addiu(dest, zero, imm.value);
     30  } else if (Imm16::IsInUnsignedRange(imm.value)) {
     31    as_ori(dest, zero, Imm16::Lower(imm).encode());
     32  } else if (Imm16::Lower(imm).encode() == 0) {
     33    as_lui(dest, Imm16::Upper(imm).encode());
     34  } else {
     35    as_lui(dest, Imm16::Upper(imm).encode());
     36    as_ori(dest, dest, Imm16::Lower(imm).encode());
     37  }
     38 }
     39 
     40 // This method generates lui and ori instruction pair that can be modified by
     41 // UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or
     42 // during execution (eg. jit::PatchJump).
     43 void MacroAssemblerMIPSShared::ma_liPatchable(Register dest, Imm32 imm) {
     44  m_buffer.ensureSpace(2 * sizeof(uint32_t));
     45  as_lui(dest, Imm16::Upper(imm).encode());
     46  as_ori(dest, dest, Imm16::Lower(imm).encode());
     47 }
     48 
     49 // Shifts
     50 void MacroAssemblerMIPSShared::ma_sll(Register rd, Register rt, Imm32 shift) {
     51  as_sll(rd, rt, shift.value & 0x1f);
     52 }
     53 void MacroAssemblerMIPSShared::ma_srl(Register rd, Register rt, Imm32 shift) {
     54  as_srl(rd, rt, shift.value & 0x1f);
     55 }
     56 
     57 void MacroAssemblerMIPSShared::ma_sra(Register rd, Register rt, Imm32 shift) {
     58  as_sra(rd, rt, shift.value & 0x1f);
     59 }
     60 
     61 void MacroAssemblerMIPSShared::ma_ror(Register rd, Register rt, Imm32 shift) {
     62  if (hasR2()) {
     63    as_rotr(rd, rt, shift.value & 0x1f);
     64  } else {
     65    UseScratchRegisterScope temps(*this);
     66    Register scratch = temps.Acquire();
     67    as_srl(scratch, rt, shift.value & 0x1f);
     68    as_sll(rd, rt, 32 - (shift.value & 0x1f));
     69    as_or(rd, rd, scratch);
     70  }
     71 }
     72 
     73 void MacroAssemblerMIPSShared::ma_rol(Register rd, Register rt, Imm32 shift) {
     74  if (hasR2()) {
     75    as_rotr(rd, rt, 32 - (shift.value & 0x1f));
     76  } else {
     77    UseScratchRegisterScope temps(*this);
     78    Register scratch = temps.Acquire();
     79    as_srl(scratch, rt, 32 - (shift.value & 0x1f));
     80    as_sll(rd, rt, shift.value & 0x1f);
     81    as_or(rd, rd, scratch);
     82  }
     83 }
     84 
     85 void MacroAssemblerMIPSShared::ma_sll(Register rd, Register rt,
     86                                      Register shift) {
     87  as_sllv(rd, rt, shift);
     88 }
     89 
     90 void MacroAssemblerMIPSShared::ma_srl(Register rd, Register rt,
     91                                      Register shift) {
     92  as_srlv(rd, rt, shift);
     93 }
     94 
     95 void MacroAssemblerMIPSShared::ma_sra(Register rd, Register rt,
     96                                      Register shift) {
     97  as_srav(rd, rt, shift);
     98 }
     99 
    100 void MacroAssemblerMIPSShared::ma_ror(Register rd, Register rt,
    101                                      Register shift) {
    102  if (hasR2()) {
    103    as_rotrv(rd, rt, shift);
    104  } else {
    105    UseScratchRegisterScope temps(*this);
    106    Register scratch = temps.Acquire();
    107    ma_negu(scratch, shift);
    108    as_sllv(scratch, rt, scratch);
    109    as_srlv(rd, rt, shift);
    110    as_or(rd, rd, scratch);
    111  }
    112 }
    113 
    114 void MacroAssemblerMIPSShared::ma_rol(Register rd, Register rt,
    115                                      Register shift) {
    116  UseScratchRegisterScope temps(*this);
    117  Register scratch = temps.Acquire();
    118  ma_negu(scratch, shift);
    119  if (hasR2()) {
    120    as_rotrv(rd, rt, scratch);
    121  } else {
    122    as_srlv(scratch, rt, scratch);
    123    as_sllv(rd, rt, shift);
    124    as_or(rd, rd, scratch);
    125  }
    126 }
    127 
    128 void MacroAssemblerMIPSShared::ma_negu(Register rd, Register rs) {
    129  as_subu(rd, zero, rs);
    130 }
    131 
    132 void MacroAssemblerMIPSShared::ma_not(Register rd, Register rs) {
    133  as_nor(rd, rs, zero);
    134 }
    135 
    136 // Bit extract/insert
    137 void MacroAssemblerMIPSShared::ma_ext(Register rt, Register rs, uint16_t pos,
    138                                      uint16_t size) {
    139  MOZ_ASSERT(pos < 32);
    140  MOZ_ASSERT(pos + size < 33);
    141 
    142  if (hasR2()) {
    143    as_ext(rt, rs, pos, size);
    144  } else {
    145    int shift_left = 32 - (pos + size);
    146    as_sll(rt, rs, shift_left);
    147    int shift_right = 32 - size;
    148    if (shift_right > 0) {
    149      as_srl(rt, rt, shift_right);
    150    }
    151  }
    152 }
    153 
    154 void MacroAssemblerMIPSShared::ma_ins(Register rt, Register rs, uint16_t pos,
    155                                      uint16_t size) {
    156  MOZ_ASSERT(pos < 32);
    157  MOZ_ASSERT(pos + size <= 32);
    158  MOZ_ASSERT(size != 0);
    159 
    160  if (hasR2()) {
    161    as_ins(rt, rs, pos, size);
    162  } else {
    163    UseScratchRegisterScope temps(*this);
    164    Register scratch = temps.Acquire();
    165    if (pos == 0) {
    166      ma_ext(scratch, rs, 0, size);
    167      as_srl(rt, rt, size);
    168      as_sll(rt, rt, size);
    169      as_or(rt, rt, scratch);
    170    } else if (pos + size == 32) {
    171      as_sll(scratch, rs, pos);
    172      as_sll(rt, rt, size);
    173      as_srl(rt, rt, size);
    174      as_or(rt, rt, scratch);
    175    } else {
    176      Register scratch2 = temps.Acquire();
    177      ma_subu(scratch, zero, Imm32(1));
    178      as_srl(scratch, scratch, 32 - size);
    179      as_and(scratch2, rs, scratch);
    180      as_sll(scratch2, scratch2, pos);
    181      as_sll(scratch, scratch, pos);
    182      as_nor(scratch, scratch, zero);
    183      as_and(scratch, rt, scratch);
    184      as_or(rt, scratch2, scratch);
    185    }
    186  }
    187 }
    188 
    189 // Sign extend
    190 void MacroAssemblerMIPSShared::ma_seb(Register rd, Register rt) {
    191  if (hasR2()) {
    192    as_seb(rd, rt);
    193  } else {
    194    as_sll(rd, rt, 24);
    195    as_sra(rd, rd, 24);
    196  }
    197 }
    198 
    199 void MacroAssemblerMIPSShared::ma_seh(Register rd, Register rt) {
    200  if (hasR2()) {
    201    as_seh(rd, rt);
    202  } else {
    203    as_sll(rd, rt, 16);
    204    as_sra(rd, rd, 16);
    205  }
    206 }
    207 
    208 // And.
    209 void MacroAssemblerMIPSShared::ma_and(Register rd, Register rs) {
    210  as_and(rd, rd, rs);
    211 }
    212 
    213 void MacroAssemblerMIPSShared::ma_and(Register rd, Imm32 imm) {
    214  ma_and(rd, rd, imm);
    215 }
    216 
    217 void MacroAssemblerMIPSShared::ma_and(Register rd, Register rs, Imm32 imm) {
    218  if (Imm16::IsInUnsignedRange(imm.value)) {
    219    as_andi(rd, rs, imm.value);
    220  } else {
    221    UseScratchRegisterScope temps(*this);
    222    Register scratch = temps.Acquire();
    223    ma_li(scratch, imm);
    224    as_and(rd, rs, scratch);
    225  }
    226 }
    227 
    228 // Or.
    229 void MacroAssemblerMIPSShared::ma_or(Register rd, Register rs) {
    230  as_or(rd, rd, rs);
    231 }
    232 
    233 void MacroAssemblerMIPSShared::ma_or(Register rd, Imm32 imm) {
    234  ma_or(rd, rd, imm);
    235 }
    236 
    237 void MacroAssemblerMIPSShared::ma_or(Register rd, Register rs, Imm32 imm) {
    238  if (Imm16::IsInUnsignedRange(imm.value)) {
    239    as_ori(rd, rs, imm.value);
    240  } else {
    241    UseScratchRegisterScope temps(*this);
    242    Register scratch = temps.Acquire();
    243    ma_li(scratch, imm);
    244    as_or(rd, rs, scratch);
    245  }
    246 }
    247 
    248 // xor
    249 void MacroAssemblerMIPSShared::ma_xor(Register rd, Register rs) {
    250  as_xor(rd, rd, rs);
    251 }
    252 
    253 void MacroAssemblerMIPSShared::ma_xor(Register rd, Imm32 imm) {
    254  ma_xor(rd, rd, imm);
    255 }
    256 
    257 void MacroAssemblerMIPSShared::ma_xor(Register rd, Register rs, Imm32 imm) {
    258  if (Imm16::IsInUnsignedRange(imm.value)) {
    259    as_xori(rd, rs, imm.value);
    260  } else {
    261    UseScratchRegisterScope temps(*this);
    262    Register scratch = temps.Acquire();
    263    ma_li(scratch, imm);
    264    as_xor(rd, rs, scratch);
    265  }
    266 }
    267 
    268 // word swap bytes within halfwords
    269 void MacroAssemblerMIPSShared::ma_wsbh(Register rd, Register rt) {
    270  as_wsbh(rd, rt);
    271 }
    272 
    273 void MacroAssemblerMIPSShared::ma_ctz(Register rd, Register rs) {
    274  UseScratchRegisterScope temps(*this);
    275  Register scratch = temps.Acquire();
    276  as_addiu(scratch, rs, -1);
    277  as_xor(rd, scratch, rs);
    278  as_and(rd, rd, scratch);
    279  as_clz(rd, rd);
    280  ma_li(scratch, Imm32(0x20));
    281  as_subu(rd, scratch, rd);
    282 }
    283 
    284 // Arithmetic-based ops.
    285 
    286 // Add.
    287 void MacroAssemblerMIPSShared::ma_addu(Register rd, Register rs, Imm32 imm) {
    288  if (Imm16::IsInSignedRange(imm.value)) {
    289    as_addiu(rd, rs, imm.value);
    290  } else {
    291    UseScratchRegisterScope temps(*this);
    292    Register scratch = temps.Acquire();
    293    ma_li(scratch, imm);
    294    as_addu(rd, rs, scratch);
    295  }
    296 }
    297 
    298 void MacroAssemblerMIPSShared::ma_addu(Register rd, Register rs) {
    299  as_addu(rd, rd, rs);
    300 }
    301 
    302 void MacroAssemblerMIPSShared::ma_addu(Register rd, Imm32 imm) {
    303  ma_addu(rd, rd, imm);
    304 }
    305 
    306 void MacroAssemblerMIPSShared::ma_add32TestCarry(Condition cond, Register rd,
    307                                                 Register rs, Register rt,
    308                                                 Label* overflow) {
    309  MOZ_ASSERT(cond == Assembler::CarrySet || cond == Assembler::CarryClear);
    310  MOZ_ASSERT_IF(rd == rs, rt != rd);
    311  UseScratchRegisterScope temps(*this);
    312  Register scratch2 = temps.Acquire();
    313  as_addu(rd, rs, rt);
    314  as_sltu(scratch2, rd, rd == rs ? rt : rs);
    315  ma_b(scratch2, scratch2, overflow,
    316       cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
    317 }
    318 
    319 void MacroAssemblerMIPSShared::ma_add32TestCarry(Condition cond, Register rd,
    320                                                 Register rs, Imm32 imm,
    321                                                 Label* overflow) {
    322  UseScratchRegisterScope temps(*this);
    323  Register scratch = temps.Acquire();
    324  ma_li(scratch, imm);
    325  ma_add32TestCarry(cond, rd, rs, scratch, overflow);
    326 }
    327 
    328 // Subtract.
    329 void MacroAssemblerMIPSShared::ma_subu(Register rd, Register rs, Imm32 imm) {
    330  if (Imm16::IsInSignedRange(-imm.value)) {
    331    as_addiu(rd, rs, -imm.value);
    332  } else {
    333    UseScratchRegisterScope temps(*this);
    334    Register scratch = temps.Acquire();
    335    ma_li(scratch, imm);
    336    as_subu(rd, rs, scratch);
    337  }
    338 }
    339 
    340 void MacroAssemblerMIPSShared::ma_subu(Register rd, Imm32 imm) {
    341  ma_subu(rd, rd, imm);
    342 }
    343 
    344 void MacroAssemblerMIPSShared::ma_subu(Register rd, Register rs) {
    345  as_subu(rd, rd, rs);
    346 }
    347 
    348 void MacroAssemblerMIPSShared::ma_sub32TestOverflow(Register rd, Register rs,
    349                                                    Imm32 imm,
    350                                                    Label* overflow) {
    351  if (imm.value != INT32_MIN) {
    352    asMasm().ma_add32TestOverflow(rd, rs, Imm32(-imm.value), overflow);
    353  } else {
    354    UseScratchRegisterScope temps(*this);
    355    Register scratch = temps.Acquire();
    356    ma_li(scratch, Imm32(imm.value));
    357    asMasm().ma_sub32TestOverflow(rd, rs, scratch, overflow);
    358  }
    359 }
    360 
    361 void MacroAssemblerMIPSShared::ma_mul(Register rd, Register rs, Imm32 imm) {
    362  UseScratchRegisterScope temps(*this);
    363  Register scratch = temps.Acquire();
    364  ma_li(scratch, imm);
    365  as_mul(rd, rs, scratch);
    366 }
    367 
    368 void MacroAssemblerMIPSShared::ma_mul32TestOverflow(Register rd, Register rs,
    369                                                    Register rt,
    370                                                    Label* overflow) {
    371  UseScratchRegisterScope temps(*this);
    372  Register scratch = temps.Acquire();
    373 
    374 #ifdef MIPSR6
    375  as_dmul(rd, rs, rt);
    376 #else
    377  as_dmult(rs, rt);
    378  as_mflo(rd);
    379 #endif
    380  ma_sll(scratch, rd, Imm32(0));
    381  ma_b(rd, scratch, overflow, Assembler::NotEqual);
    382 }
    383 
    384 void MacroAssemblerMIPSShared::ma_mul32TestOverflow(Register rd, Register rs,
    385                                                    Imm32 imm,
    386                                                    Label* overflow) {
    387  UseScratchRegisterScope temps(*this);
    388  Register scratch = temps.Acquire();
    389 
    390  ma_li(scratch, imm);
    391 #ifdef MIPSR6
    392  as_dmul(rd, rs, scratch);
    393 #else
    394  as_dmult(rs, scratch);
    395  as_mflo(rd);
    396 #endif
    397  ma_sll(scratch, rd, Imm32(0));
    398  ma_b(rd, scratch, overflow, Assembler::NotEqual);
    399 }
    400 
    401 void MacroAssemblerMIPSShared::ma_mod_mask(Register src, Register dest,
    402                                           Register hold, Register remain,
    403                                           int32_t shift, Label* negZero) {
    404  UseScratchRegisterScope temps(*this);
    405 
    406  // MATH:
    407  // We wish to compute x % (1<<y) - 1 for a known constant, y.
    408  // First, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit
    409  // dividend as a number in base b, namely
    410  // c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
    411  // now, since both addition and multiplication commute with modulus,
    412  // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
    413  // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
    414  // now, since b == C + 1, b % C == 1, and b^n % C == 1
    415  // this means that the whole thing simplifies to:
    416  // c_0 + c_1 + c_2 ... c_n % C
    417  // each c_n can easily be computed by a shift/bitextract, and the modulus
    418  // can be maintained by simply subtracting by C whenever the number gets
    419  // over C.
    420  int32_t mask = (1 << shift) - 1;
    421  Label head, negative, sumSigned, done;
    422 
    423  // hold holds -1 if the value was negative, 1 otherwise.
    424  // remain holds the remaining bits that have not been processed
    425  // scratch2 serves as a temporary location to store extracted bits
    426  // into as well as holding the trial subtraction as a temp value dest is
    427  // the accumulator (and holds the final result)
    428 
    429  // move the whole value into the remain.
    430  ma_move(remain, src);
    431  // Zero out the dest.
    432  ma_li(dest, Imm32(0));
    433  // Set the hold appropriately.
    434  ma_b(remain, remain, &negative, Signed, ShortJump);
    435  ma_li(hold, Imm32(1));
    436  ma_b(&head, ShortJump);
    437 
    438  bind(&negative);
    439  ma_li(hold, Imm32(-1));
    440  ma_negu(remain, remain);
    441 
    442  // Begin the main loop.
    443  bind(&head);
    444 
    445  Register scratch2 = temps.Acquire();
    446  // Extract the bottom bits into scratch2.
    447  ma_and(scratch2, remain, Imm32(mask));
    448  // Add those bits to the accumulator.
    449  as_addu(dest, dest, scratch2);
    450  // Do a trial subtraction
    451  ma_subu(scratch2, dest, Imm32(mask));
    452  // If (sum - C) > 0, store sum - C back into sum, thus performing a
    453  // modulus.
    454  ma_b(scratch2, scratch2, &sumSigned, Signed, ShortJump);
    455  ma_move(dest, scratch2);
    456  bind(&sumSigned);
    457  // Get rid of the bits that we extracted before.
    458  as_srl(remain, remain, shift);
    459  // If the shift produced zero, finish, otherwise, continue in the loop.
    460  ma_b(remain, remain, &head, NonZero, ShortJump);
    461  // Check the hold to see if we need to negate the result.
    462  ma_b(hold, hold, &done, NotSigned, ShortJump);
    463 
    464  if (negZero != nullptr) {
    465    // Jump out in case of negative zero.
    466    ma_b(dest, dest, negZero, Zero);
    467  }
    468  // If the hold was non-zero, negate the result to be in line with
    469  // what JS wants
    470  ma_negu(dest, dest);
    471 
    472  bind(&done);
    473 }
    474 
    475 // Memory.
    476 
    477 FaultingCodeOffset MacroAssemblerMIPSShared::ma_load(
    478    Register dest, const BaseIndex& src, LoadStoreSize size,
    479    LoadStoreExtension extension) {
    480  UseScratchRegisterScope temps(*this);
    481  FaultingCodeOffset fco;
    482  if (isLoongson() && ZeroExtend != extension &&
    483      Imm8::IsInSignedRange(src.offset)) {
    484    UseScratchRegisterScope temps(*this);
    485    Register index = src.index;
    486 
    487    if (src.scale != TimesOne) {
    488      int32_t shift = Imm32::ShiftOf(src.scale).value;
    489 
    490      Register scratch2 = temps.Acquire();
    491      MOZ_ASSERT(scratch2 != src.base);
    492      index = scratch2;
    493      asMasm().ma_dsll(index, src.index, Imm32(shift));
    494    }
    495 
    496    fco = FaultingCodeOffset(currentOffset());
    497    switch (size) {
    498      case SizeByte:
    499        as_gslbx(dest, src.base, index, src.offset);
    500        break;
    501      case SizeHalfWord:
    502        as_gslhx(dest, src.base, index, src.offset);
    503        break;
    504      case SizeWord:
    505        as_gslwx(dest, src.base, index, src.offset);
    506        break;
    507      case SizeDouble:
    508        as_gsldx(dest, src.base, index, src.offset);
    509        break;
    510      default:
    511        MOZ_CRASH("Invalid argument for ma_load");
    512    }
    513    return fco;
    514  }
    515 
    516  // dest will be overwritten anyway
    517  asMasm().computeEffectiveAddress(src, dest);
    518  return asMasm().ma_load(dest, Address(dest, 0), size, extension);
    519 }
    520 
    521 void MacroAssemblerMIPSShared::ma_load_unaligned(Register dest,
    522                                                 const BaseIndex& src,
    523                                                 LoadStoreSize size,
    524                                                 LoadStoreExtension extension) {
    525  int16_t lowOffset, hiOffset;
    526  UseScratchRegisterScope temps(*this);
    527  Register base = temps.Acquire();
    528  asMasm().computeScaledAddress(src, base);
    529  Register scratch = temps.Acquire();
    530 
    531  if (Imm16::IsInSignedRange(src.offset) &&
    532      Imm16::IsInSignedRange(src.offset + size / 8 - 1)) {
    533    lowOffset = Imm16(src.offset).encode();
    534    hiOffset = Imm16(src.offset + size / 8 - 1).encode();
    535  } else {
    536    ma_li(scratch, Imm32(src.offset));
    537    asMasm().addPtr(scratch, base);
    538    lowOffset = Imm16(0).encode();
    539    hiOffset = Imm16(size / 8 - 1).encode();
    540  }
    541 
    542  switch (size) {
    543    case SizeHalfWord:
    544      MOZ_ASSERT(dest != scratch);
    545      if (extension == ZeroExtend) {
    546        as_lbu(scratch, base, hiOffset);
    547      } else {
    548        as_lb(scratch, base, hiOffset);
    549      }
    550      as_lbu(dest, base, lowOffset);
    551      if (hasR2()) {
    552        as_ins(dest, scratch, 8, 24);
    553      } else {
    554        as_sll(scratch, scratch, 8);
    555        as_or(dest, dest, scratch);
    556      }
    557      break;
    558    case SizeWord:
    559      MOZ_ASSERT(dest != base);
    560      as_lwl(dest, base, hiOffset);
    561      as_lwr(dest, base, lowOffset);
    562      if (extension == ZeroExtend) {
    563        asMasm().ma_dext(dest, dest, Imm32(0), Imm32(32));
    564      }
    565      break;
    566    case SizeDouble:
    567      MOZ_ASSERT(dest != base);
    568      as_ldl(dest, base, hiOffset);
    569      as_ldr(dest, base, lowOffset);
    570      break;
    571    default:
    572      MOZ_CRASH("Invalid argument for ma_load_unaligned");
    573  }
    574 }
    575 
    576 void MacroAssemblerMIPSShared::ma_load_unaligned(Register dest,
    577                                                 const Address& address,
    578                                                 LoadStoreSize size,
    579                                                 LoadStoreExtension extension) {
    580  int16_t lowOffset, hiOffset;
    581  UseScratchRegisterScope temps(*this);
    582  Register scratch1 = temps.Acquire();
    583  Register scratch2 = temps.Acquire();
    584  Register base;
    585 
    586  if (Imm16::IsInSignedRange(address.offset) &&
    587      Imm16::IsInSignedRange(address.offset + size / 8 - 1)) {
    588    base = address.base;
    589    lowOffset = Imm16(address.offset).encode();
    590    hiOffset = Imm16(address.offset + size / 8 - 1).encode();
    591  } else {
    592    ma_li(scratch1, Imm32(address.offset));
    593    asMasm().addPtr(address.base, scratch1);
    594    base = scratch1;
    595    lowOffset = Imm16(0).encode();
    596    hiOffset = Imm16(size / 8 - 1).encode();
    597  }
    598 
    599  switch (size) {
    600    case SizeHalfWord:
    601      MOZ_ASSERT(base != scratch2 && dest != scratch2);
    602      if (extension == ZeroExtend) {
    603        as_lbu(scratch2, base, hiOffset);
    604      } else {
    605        as_lb(scratch2, base, hiOffset);
    606      }
    607      as_lbu(dest, base, lowOffset);
    608      if (hasR2()) {
    609        as_ins(dest, scratch2, 8, 24);
    610      } else {
    611        as_sll(scratch2, scratch2, 8);
    612        as_or(dest, dest, scratch2);
    613      }
    614      break;
    615    case SizeWord:
    616      MOZ_ASSERT(dest != base);
    617      as_lwl(dest, base, hiOffset);
    618      as_lwr(dest, base, lowOffset);
    619      if (extension == ZeroExtend) {
    620        as_dext(dest, dest, 0, 32);
    621      }
    622      break;
    623    case SizeDouble:
    624      MOZ_ASSERT(dest != base);
    625      as_ldl(dest, base, hiOffset);
    626      as_ldr(dest, base, lowOffset);
    627      break;
    628    default:
    629      MOZ_CRASH("Invalid argument for ma_load_unaligned");
    630  }
    631 }
    632 
    633 void MacroAssemblerMIPSShared::ma_load_unaligned(
    634    const wasm::MemoryAccessDesc& access, Register dest, const BaseIndex& src,
    635    Register temp, LoadStoreSize size, LoadStoreExtension extension) {
    636  MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
    637  int16_t lowOffset, hiOffset;
    638  Register base;
    639 
    640  UseScratchRegisterScope temps(*this);
    641  Register scratch2 = temps.Acquire();
    642  asMasm().computeScaledAddress(src, scratch2);
    643 
    644  if (Imm16::IsInSignedRange(src.offset) &&
    645      Imm16::IsInSignedRange(src.offset + size / 8 - 1)) {
    646    base = scratch2;
    647    lowOffset = Imm16(src.offset).encode();
    648    hiOffset = Imm16(src.offset + size / 8 - 1).encode();
    649  } else {
    650    Register scratch = temps.Acquire();
    651    ma_li(scratch, Imm32(src.offset));
    652    asMasm().addPtr(scratch2, scratch);
    653    base = scratch;
    654    lowOffset = Imm16(0).encode();
    655    hiOffset = Imm16(size / 8 - 1).encode();
    656  }
    657 
    658  BufferOffset load;
    659  unsigned byteSize = access.byteSize();
    660  switch (size) {
    661    case SizeHalfWord:
    662      // begins with 1-byte load
    663      byteSize = 1;
    664      if (extension == ZeroExtend) {
    665        load = as_lbu(temp, base, hiOffset);
    666      } else {
    667        load = as_lb(temp, base, hiOffset);
    668      }
    669      as_lbu(dest, base, lowOffset);
    670      if (hasR2()) {
    671        as_ins(dest, temp, 8, 24);
    672      } else {
    673        as_sll(temp, temp, 8);
    674        as_or(dest, dest, temp);
    675      }
    676      break;
    677    case SizeWord:
    678      load = as_lwl(dest, base, hiOffset);
    679      as_lwr(dest, base, lowOffset);
    680      if (extension == ZeroExtend) {
    681        asMasm().ma_dext(dest, dest, Imm32(0), Imm32(32));
    682      }
    683      break;
    684    case SizeDouble:
    685      load = as_ldl(dest, base, hiOffset);
    686      as_ldr(dest, base, lowOffset);
    687      break;
    688    default:
    689      MOZ_CRASH("Invalid argument for ma_load");
    690  }
    691 
    692  append(access, wasm::TrapMachineInsnForLoad(byteSize),
    693         FaultingCodeOffset(load.getOffset()));
    694 }
    695 
    696 FaultingCodeOffset MacroAssemblerMIPSShared::ma_store(
    697    Register data, const BaseIndex& dest, LoadStoreSize size,
    698    LoadStoreExtension extension) {
    699  UseScratchRegisterScope temps(*this);
    700  Register scratch2 = temps.Acquire();
    701  if (isLoongson() && Imm8::IsInSignedRange(dest.offset)) {
    702    FaultingCodeOffset fco;
    703    Register index = dest.index;
    704 
    705    if (dest.scale != TimesOne) {
    706      int32_t shift = Imm32::ShiftOf(dest.scale).value;
    707 
    708      MOZ_ASSERT(scratch2 != dest.base);
    709      index = scratch2;
    710      asMasm().ma_dsll(index, dest.index, Imm32(shift));
    711    }
    712 
    713    fco = FaultingCodeOffset(currentOffset());
    714    switch (size) {
    715      case SizeByte:
    716        as_gssbx(data, dest.base, index, dest.offset);
    717        break;
    718      case SizeHalfWord:
    719        as_gsshx(data, dest.base, index, dest.offset);
    720        break;
    721      case SizeWord:
    722        as_gsswx(data, dest.base, index, dest.offset);
    723        break;
    724      case SizeDouble:
    725        as_gssdx(data, dest.base, index, dest.offset);
    726        break;
    727      default:
    728        MOZ_CRASH("Invalid argument for ma_store");
    729    }
    730    return fco;
    731  }
    732 
    733  asMasm().computeScaledAddress(dest, scratch2);
    734  return asMasm().ma_store(data, Address(scratch2, dest.offset), size,
    735                           extension);
    736 }
    737 
    738 void MacroAssemblerMIPSShared::ma_store(Imm32 imm, const BaseIndex& dest,
    739                                        LoadStoreSize size,
    740                                        LoadStoreExtension extension) {
    741  UseScratchRegisterScope temps(*this);
    742  Register scratch2 = temps.Acquire();
    743  if (isLoongson() && Imm8::IsInSignedRange(dest.offset)) {
    744    Register data = zero;
    745    Register index = dest.index;
    746 
    747    if (imm.value) {
    748      Register scratch = temps.Acquire();
    749      MOZ_ASSERT(scratch != dest.base);
    750      MOZ_ASSERT(scratch != dest.index);
    751      data = scratch;
    752      ma_li(data, imm);
    753    }
    754 
    755    if (dest.scale != TimesOne) {
    756      int32_t shift = Imm32::ShiftOf(dest.scale).value;
    757 
    758      MOZ_ASSERT(scratch2 != dest.base);
    759      index = scratch2;
    760      asMasm().ma_dsll(index, dest.index, Imm32(shift));
    761    }
    762 
    763    switch (size) {
    764      case SizeByte:
    765        as_gssbx(data, dest.base, index, dest.offset);
    766        break;
    767      case SizeHalfWord:
    768        as_gsshx(data, dest.base, index, dest.offset);
    769        break;
    770      case SizeWord:
    771        as_gsswx(data, dest.base, index, dest.offset);
    772        break;
    773      case SizeDouble:
    774        as_gssdx(data, dest.base, index, dest.offset);
    775        break;
    776      default:
    777        MOZ_CRASH("Invalid argument for ma_store");
    778    }
    779    return;
    780  }
    781 
    782  // Make sure that scratch2 contains absolute address so that
    783  // offset is 0.
    784  asMasm().computeEffectiveAddress(dest, scratch2);
    785 
    786  Register scratch = temps.Acquire();
    787  // Scrach register is free now, use it for loading imm value
    788  ma_li(scratch, imm);
    789 
    790  // with offset=0 scratch will not be used in ma_store()
    791  // so we can use it as a parameter here
    792  asMasm().ma_store(scratch, Address(scratch2, 0), size, extension);
    793 }
    794 
    795 void MacroAssemblerMIPSShared::ma_store_unaligned(Register data,
    796                                                  const Address& address,
    797                                                  LoadStoreSize size) {
    798  int16_t lowOffset, hiOffset;
    799  UseScratchRegisterScope temps(*this);
    800  Register scratch = temps.Acquire();
    801  Register base;
    802 
    803  if (Imm16::IsInSignedRange(address.offset) &&
    804      Imm16::IsInSignedRange(address.offset + size / 8 - 1)) {
    805    base = address.base;
    806    lowOffset = Imm16(address.offset).encode();
    807    hiOffset = Imm16(address.offset + size / 8 - 1).encode();
    808  } else {
    809    ma_li(scratch, Imm32(address.offset));
    810    asMasm().addPtr(address.base, scratch);
    811    base = scratch;
    812    lowOffset = Imm16(0).encode();
    813    hiOffset = Imm16(size / 8 - 1).encode();
    814  }
    815 
    816  switch (size) {
    817    case SizeHalfWord: {
    818      UseScratchRegisterScope temps2(*this);
    819      Register scratch2 = temps2.Acquire();
    820      MOZ_ASSERT(base != scratch2);
    821      as_sb(data, base, lowOffset);
    822      ma_ext(scratch2, data, 8, 8);
    823      as_sb(scratch2, base, hiOffset);
    824      break;
    825    }
    826    case SizeWord:
    827      as_swl(data, base, hiOffset);
    828      as_swr(data, base, lowOffset);
    829      break;
    830    case SizeDouble:
    831      as_sdl(data, base, hiOffset);
    832      as_sdr(data, base, lowOffset);
    833      break;
    834    default:
    835      MOZ_CRASH("Invalid argument for ma_store_unaligned");
    836  }
    837 }
    838 
    839 void MacroAssemblerMIPSShared::ma_store_unaligned(Register data,
    840                                                  const BaseIndex& dest,
    841                                                  LoadStoreSize size) {
    842  int16_t lowOffset, hiOffset;
    843  UseScratchRegisterScope temps(*this);
    844  Register base = temps.Acquire();
    845  asMasm().computeScaledAddress(dest, base);
    846  Register scratch = temps.Acquire();
    847 
    848  if (Imm16::IsInSignedRange(dest.offset) &&
    849      Imm16::IsInSignedRange(dest.offset + size / 8 - 1)) {
    850    lowOffset = Imm16(dest.offset).encode();
    851    hiOffset = Imm16(dest.offset + size / 8 - 1).encode();
    852  } else {
    853    ma_li(scratch, Imm32(dest.offset));
    854    asMasm().addPtr(scratch, base);
    855    lowOffset = Imm16(0).encode();
    856    hiOffset = Imm16(size / 8 - 1).encode();
    857  }
    858 
    859  switch (size) {
    860    case SizeHalfWord:
    861      MOZ_ASSERT(base != scratch);
    862      as_sb(data, base, lowOffset);
    863      ma_ext(scratch, data, 8, 8);
    864      as_sb(scratch, base, hiOffset);
    865      break;
    866    case SizeWord:
    867      as_swl(data, base, hiOffset);
    868      as_swr(data, base, lowOffset);
    869      break;
    870    case SizeDouble:
    871      as_sdl(data, base, hiOffset);
    872      as_sdr(data, base, lowOffset);
    873      break;
    874    default:
    875      MOZ_CRASH("Invalid argument for ma_store_unaligned");
    876  }
    877 }
    878 
    879 void MacroAssemblerMIPSShared::ma_store_unaligned(
    880    const wasm::MemoryAccessDesc& access, Register data, const BaseIndex& dest,
    881    Register temp, LoadStoreSize size, LoadStoreExtension extension) {
    882  MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
    883  int16_t lowOffset, hiOffset;
    884  Register base;
    885 
    886  UseScratchRegisterScope temps(*this);
    887  Register scratch2 = temps.Acquire();
    888  asMasm().computeScaledAddress(dest, scratch2);
    889 
    890  if (Imm16::IsInSignedRange(dest.offset) &&
    891      Imm16::IsInSignedRange(dest.offset + size / 8 - 1)) {
    892    base = scratch2;
    893    lowOffset = Imm16(dest.offset).encode();
    894    hiOffset = Imm16(dest.offset + size / 8 - 1).encode();
    895  } else {
    896    Register scratch = temps.Acquire();
    897    ma_li(scratch, Imm32(dest.offset));
    898    asMasm().addPtr(scratch2, scratch);
    899    base = scratch;
    900    lowOffset = Imm16(0).encode();
    901    hiOffset = Imm16(size / 8 - 1).encode();
    902  }
    903 
    904  BufferOffset store;
    905  unsigned byteSize = access.byteSize();
    906  switch (size) {
    907    case SizeHalfWord:
    908      // begins with 1-byte store
    909      byteSize = 1;
    910      ma_ext(temp, data, 8, 8);
    911      store = as_sb(temp, base, hiOffset);
    912      as_sb(data, base, lowOffset);
    913      break;
    914    case SizeWord:
    915      store = as_swl(data, base, hiOffset);
    916      as_swr(data, base, lowOffset);
    917      break;
    918    case SizeDouble:
    919      store = as_sdl(data, base, hiOffset);
    920      as_sdr(data, base, lowOffset);
    921      break;
    922    default:
    923      MOZ_CRASH("Invalid argument for ma_store");
    924  }
    925  append(access, wasm::TrapMachineInsnForStore(byteSize),
    926         FaultingCodeOffset(store.getOffset()));
    927 }
    928 
    929 // Branches when done from within mips-specific code.
    930 void MacroAssemblerMIPSShared::ma_b(Register lhs, Register rhs, Label* label,
    931                                    Condition c, JumpKind jumpKind) {
    932  switch (c) {
    933    case Equal:
    934    case NotEqual:
    935      asMasm().branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind);
    936      break;
    937    case Always:
    938      ma_b(label, jumpKind);
    939      break;
    940    case Zero:
    941    case NonZero:
    942    case Signed:
    943    case NotSigned:
    944      MOZ_ASSERT(lhs == rhs);
    945      asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
    946      break;
    947    default: {
    948      UseScratchRegisterScope temps(*this);
    949      Register scratch = temps.Acquire();
    950      Condition cond = ma_cmp(scratch, lhs, rhs, c);
    951      asMasm().branchWithCode(getBranchCode(scratch, cond), label, jumpKind,
    952                              scratch);
    953    } break;
    954  }
    955 }
    956 
    957 void MacroAssemblerMIPSShared::ma_b(Register lhs, Imm32 imm, Label* label,
    958                                    Condition c, JumpKind jumpKind) {
    959  MOZ_ASSERT(c != Overflow);
    960  if (imm.value == 0) {
    961    if (c == Always || c == AboveOrEqual) {
    962      ma_b(label, jumpKind);
    963    } else if (c == Below) {
    964      ;  // This condition is always false. No branch required.
    965    } else {
    966      asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
    967    }
    968  } else {
    969    UseScratchRegisterScope temps(*this);
    970    Register scratch = temps.Acquire();
    971    switch (c) {
    972      case Equal:
    973      case NotEqual:
    974        ma_li(scratch, imm);
    975        ma_b(lhs, scratch, label, c, jumpKind);
    976        break;
    977      default:
    978        Condition cond = ma_cmp(scratch, lhs, imm, c);
    979        asMasm().branchWithCode(getBranchCode(scratch, cond), label, jumpKind,
    980                                scratch);
    981    }
    982  }
    983 }
    984 
    985 void MacroAssemblerMIPSShared::ma_b(Register lhs, ImmPtr imm, Label* l,
    986                                    Condition c, JumpKind jumpKind) {
    987  asMasm().ma_b(lhs, ImmWord(uintptr_t(imm.value)), l, c, jumpKind);
    988 }
    989 
    990 void MacroAssemblerMIPSShared::ma_b(Label* label, JumpKind jumpKind) {
    991  asMasm().branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
    992 }
    993 
    994 Assembler::Condition MacroAssemblerMIPSShared::ma_cmp(Register dest,
    995                                                      Register lhs,
    996                                                      Register rhs,
    997                                                      Condition c) {
    998  switch (c) {
    999    case Above:
   1000      // bgtu s,t,label =>
   1001      //   sltu at,t,s
   1002      //   bne at,$zero,offs
   1003      as_sltu(dest, rhs, lhs);
   1004      return NotEqual;
   1005    case AboveOrEqual:
   1006      // bgeu s,t,label =>
   1007      //   sltu at,s,t
   1008      //   beq at,$zero,offs
   1009      as_sltu(dest, lhs, rhs);
   1010      return Equal;
   1011    case Below:
   1012      // bltu s,t,label =>
   1013      //   sltu at,s,t
   1014      //   bne at,$zero,offs
   1015      as_sltu(dest, lhs, rhs);
   1016      return NotEqual;
   1017    case BelowOrEqual:
   1018      // bleu s,t,label =>
   1019      //   sltu at,t,s
   1020      //   beq at,$zero,offs
   1021      as_sltu(dest, rhs, lhs);
   1022      return Equal;
   1023    case GreaterThan:
   1024      // bgt s,t,label =>
   1025      //   slt at,t,s
   1026      //   bne at,$zero,offs
   1027      as_slt(dest, rhs, lhs);
   1028      return NotEqual;
   1029    case GreaterThanOrEqual:
   1030      // bge s,t,label =>
   1031      //   slt at,s,t
   1032      //   beq at,$zero,offs
   1033      as_slt(dest, lhs, rhs);
   1034      return Equal;
   1035    case LessThan:
   1036      // blt s,t,label =>
   1037      //   slt at,s,t
   1038      //   bne at,$zero,offs
   1039      as_slt(dest, lhs, rhs);
   1040      return NotEqual;
   1041    case LessThanOrEqual:
   1042      // ble s,t,label =>
   1043      //   slt at,t,s
   1044      //   beq at,$zero,offs
   1045      as_slt(dest, rhs, lhs);
   1046      return Equal;
   1047    default:
   1048      MOZ_CRASH("Invalid condition.");
   1049  }
   1050  return Always;
   1051 }
   1052 
   1053 Assembler::Condition MacroAssemblerMIPSShared::ma_cmp(Register dest,
   1054                                                      Register lhs, Imm32 imm,
   1055                                                      Condition c) {
   1056  UseScratchRegisterScope temps(*this);
   1057  switch (c) {
   1058    case Above:
   1059    case BelowOrEqual:
   1060      if (Imm16::IsInSignedRange(imm.value + 1) && imm.value != -1) {
   1061        // lhs <= rhs via lhs < rhs + 1 if rhs + 1 does not overflow
   1062        as_sltiu(dest, lhs, imm.value + 1);
   1063 
   1064        return (c == BelowOrEqual ? NotEqual : Equal);
   1065      } else {
   1066        Register scratch = dest == lhs ? temps.Acquire() : dest;
   1067        ma_li(scratch, imm);
   1068        as_sltu(dest, scratch, lhs);
   1069        return (c == BelowOrEqual ? Equal : NotEqual);
   1070      }
   1071    case AboveOrEqual:
   1072    case Below:
   1073      if (Imm16::IsInSignedRange(imm.value)) {
   1074        as_sltiu(dest, lhs, imm.value);
   1075      } else {
   1076        Register scratch = dest == lhs ? temps.Acquire() : dest;
   1077        ma_li(scratch, imm);
   1078        as_sltu(dest, lhs, scratch);
   1079      }
   1080      return (c == AboveOrEqual ? Equal : NotEqual);
   1081    case GreaterThan:
   1082    case LessThanOrEqual:
   1083      if (Imm16::IsInSignedRange(imm.value + 1)) {
   1084        // lhs <= rhs via lhs < rhs + 1.
   1085        as_slti(dest, lhs, imm.value + 1);
   1086        return (c == LessThanOrEqual ? NotEqual : Equal);
   1087      } else {
   1088        Register scratch = dest == lhs ? temps.Acquire() : dest;
   1089        ma_li(scratch, imm);
   1090        as_slt(dest, scratch, lhs);
   1091        return (c == LessThanOrEqual ? Equal : NotEqual);
   1092      }
   1093    case GreaterThanOrEqual:
   1094    case LessThan:
   1095      if (Imm16::IsInSignedRange(imm.value)) {
   1096        as_slti(dest, lhs, imm.value);
   1097      } else {
   1098        Register scratch = dest == lhs ? temps.Acquire() : dest;
   1099        ma_li(scratch, imm);
   1100        as_slt(dest, lhs, scratch);
   1101      }
   1102      return (c == GreaterThanOrEqual ? Equal : NotEqual);
   1103    default:
   1104      MOZ_CRASH("Invalid condition.");
   1105  }
   1106  return Always;
   1107 }
   1108 
   1109 void MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Register rt,
   1110                                          Condition c) {
   1111  switch (c) {
   1112    case Equal:
   1113      // seq d,s,t =>
   1114      //   xor d,s,t
   1115      //   sltiu d,d,1
   1116      as_xor(rd, rs, rt);
   1117      as_sltiu(rd, rd, 1);
   1118      break;
   1119    case NotEqual:
   1120      // sne d,s,t =>
   1121      //   xor d,s,t
   1122      //   sltu d,$zero,d
   1123      as_xor(rd, rs, rt);
   1124      as_sltu(rd, zero, rd);
   1125      break;
   1126    case Above:
   1127      // sgtu d,s,t =>
   1128      //   sltu d,t,s
   1129      as_sltu(rd, rt, rs);
   1130      break;
   1131    case AboveOrEqual:
   1132      // sgeu d,s,t =>
   1133      //   sltu d,s,t
   1134      //   xori d,d,1
   1135      as_sltu(rd, rs, rt);
   1136      as_xori(rd, rd, 1);
   1137      break;
   1138    case Below:
   1139      // sltu d,s,t
   1140      as_sltu(rd, rs, rt);
   1141      break;
   1142    case BelowOrEqual:
   1143      // sleu d,s,t =>
   1144      //   sltu d,t,s
   1145      //   xori d,d,1
   1146      as_sltu(rd, rt, rs);
   1147      as_xori(rd, rd, 1);
   1148      break;
   1149    case GreaterThan:
   1150      // sgt d,s,t =>
   1151      //   slt d,t,s
   1152      as_slt(rd, rt, rs);
   1153      break;
   1154    case GreaterThanOrEqual:
   1155      // sge d,s,t =>
   1156      //   slt d,s,t
   1157      //   xori d,d,1
   1158      as_slt(rd, rs, rt);
   1159      as_xori(rd, rd, 1);
   1160      break;
   1161    case LessThan:
   1162      // slt d,s,t
   1163      as_slt(rd, rs, rt);
   1164      break;
   1165    case LessThanOrEqual:
   1166      // sle d,s,t =>
   1167      //   slt d,t,s
   1168      //   xori d,d,1
   1169      as_slt(rd, rt, rs);
   1170      as_xori(rd, rd, 1);
   1171      break;
   1172    case Zero:
   1173      MOZ_ASSERT(rs == rt);
   1174      // seq d,s,$zero =>
   1175      //   sltiu d,s,1
   1176      as_sltiu(rd, rs, 1);
   1177      break;
   1178    case NonZero:
   1179      MOZ_ASSERT(rs == rt);
   1180      // sne d,s,$zero =>
   1181      //   sltu d,$zero,s
   1182      as_sltu(rd, zero, rs);
   1183      break;
   1184    case Signed:
   1185      MOZ_ASSERT(rs == rt);
   1186      as_slt(rd, rs, zero);
   1187      break;
   1188    case NotSigned:
   1189      MOZ_ASSERT(rs == rt);
   1190      // sge d,s,$zero =>
   1191      //   slt d,s,$zero
   1192      //   xori d,d,1
   1193      as_slt(rd, rs, zero);
   1194      as_xori(rd, rd, 1);
   1195      break;
   1196    default:
   1197      MOZ_CRASH("Invalid condition.");
   1198  }
   1199 }
   1200 
   1201 void MacroAssemblerMIPSShared::compareFloatingPoint(
   1202    FloatFormat fmt, FloatRegister lhs, FloatRegister rhs, DoubleCondition c,
   1203    FloatTestKind* testKind, FPConditionBit fcc) {
   1204  switch (c) {
   1205    case DoubleOrdered:
   1206      as_cun(fmt, lhs, rhs, fcc);
   1207      *testKind = TestForFalse;
   1208      break;
   1209    case DoubleEqual:
   1210      as_ceq(fmt, lhs, rhs, fcc);
   1211      *testKind = TestForTrue;
   1212      break;
   1213    case DoubleNotEqual:
   1214      as_cueq(fmt, lhs, rhs, fcc);
   1215      *testKind = TestForFalse;
   1216      break;
   1217    case DoubleGreaterThan:
   1218      as_colt(fmt, rhs, lhs, fcc);
   1219      *testKind = TestForTrue;
   1220      break;
   1221    case DoubleGreaterThanOrEqual:
   1222      as_cole(fmt, rhs, lhs, fcc);
   1223      *testKind = TestForTrue;
   1224      break;
   1225    case DoubleLessThan:
   1226      as_colt(fmt, lhs, rhs, fcc);
   1227      *testKind = TestForTrue;
   1228      break;
   1229    case DoubleLessThanOrEqual:
   1230      as_cole(fmt, lhs, rhs, fcc);
   1231      *testKind = TestForTrue;
   1232      break;
   1233    case DoubleUnordered:
   1234      as_cun(fmt, lhs, rhs, fcc);
   1235      *testKind = TestForTrue;
   1236      break;
   1237    case DoubleEqualOrUnordered:
   1238      as_cueq(fmt, lhs, rhs, fcc);
   1239      *testKind = TestForTrue;
   1240      break;
   1241    case DoubleNotEqualOrUnordered:
   1242      as_ceq(fmt, lhs, rhs, fcc);
   1243      *testKind = TestForFalse;
   1244      break;
   1245    case DoubleGreaterThanOrUnordered:
   1246      as_cult(fmt, rhs, lhs, fcc);
   1247      *testKind = TestForTrue;
   1248      break;
   1249    case DoubleGreaterThanOrEqualOrUnordered:
   1250      as_cule(fmt, rhs, lhs, fcc);
   1251      *testKind = TestForTrue;
   1252      break;
   1253    case DoubleLessThanOrUnordered:
   1254      as_cult(fmt, lhs, rhs, fcc);
   1255      *testKind = TestForTrue;
   1256      break;
   1257    case DoubleLessThanOrEqualOrUnordered:
   1258      as_cule(fmt, lhs, rhs, fcc);
   1259      *testKind = TestForTrue;
   1260      break;
   1261    default:
   1262      MOZ_CRASH("Invalid DoubleCondition.");
   1263  }
   1264 }
   1265 
   1266 void MacroAssemblerMIPSShared::ma_cmp_set_double(Register dest,
   1267                                                 FloatRegister lhs,
   1268                                                 FloatRegister rhs,
   1269                                                 DoubleCondition c) {
   1270  FloatTestKind moveCondition;
   1271  compareFloatingPoint(DoubleFloat, lhs, rhs, c, &moveCondition);
   1272 
   1273 #ifdef MIPSR6
   1274  as_mfc1(dest, FloatRegisters::f24);
   1275  if (moveCondition == TestForTrue) {
   1276    as_andi(dest, dest, 0x1);
   1277  } else {
   1278    as_addiu(dest, dest, 0x1);
   1279  }
   1280 #else
   1281  ma_li(dest, Imm32(1));
   1282 
   1283  if (moveCondition == TestForTrue) {
   1284    as_movf(dest, zero);
   1285  } else {
   1286    as_movt(dest, zero);
   1287  }
   1288 #endif
   1289 }
   1290 
   1291 void MacroAssemblerMIPSShared::ma_cmp_set_float32(Register dest,
   1292                                                  FloatRegister lhs,
   1293                                                  FloatRegister rhs,
   1294                                                  DoubleCondition c) {
   1295  FloatTestKind moveCondition;
   1296  compareFloatingPoint(SingleFloat, lhs, rhs, c, &moveCondition);
   1297 
   1298 #ifdef MIPSR6
   1299  as_mfc1(dest, FloatRegisters::f24);
   1300  if (moveCondition == TestForTrue) {
   1301    as_andi(dest, dest, 0x1);
   1302  } else {
   1303    as_addiu(dest, dest, 0x1);
   1304  }
   1305 #else
   1306  ma_li(dest, Imm32(1));
   1307 
   1308  if (moveCondition == TestForTrue) {
   1309    as_movf(dest, zero);
   1310  } else {
   1311    as_movt(dest, zero);
   1312  }
   1313 #endif
   1314 }
   1315 
   1316 void MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Imm32 imm,
   1317                                          Condition c) {
   1318  if (imm.value == 0) {
   1319    switch (c) {
   1320      case Equal:
   1321      case BelowOrEqual:
   1322        as_sltiu(rd, rs, 1);
   1323        break;
   1324      case NotEqual:
   1325      case Above:
   1326        as_sltu(rd, zero, rs);
   1327        break;
   1328      case AboveOrEqual:
   1329      case Below:
   1330        as_ori(rd, zero, c == AboveOrEqual ? 1 : 0);
   1331        break;
   1332      case GreaterThan:
   1333      case LessThanOrEqual:
   1334        as_slt(rd, zero, rs);
   1335        if (c == LessThanOrEqual) {
   1336          as_xori(rd, rd, 1);
   1337        }
   1338        break;
   1339      case LessThan:
   1340      case GreaterThanOrEqual:
   1341        as_slt(rd, rs, zero);
   1342        if (c == GreaterThanOrEqual) {
   1343          as_xori(rd, rd, 1);
   1344        }
   1345        break;
   1346      case Zero:
   1347        as_sltiu(rd, rs, 1);
   1348        break;
   1349      case NonZero:
   1350        as_sltu(rd, zero, rs);
   1351        break;
   1352      case Signed:
   1353        as_slt(rd, rs, zero);
   1354        break;
   1355      case NotSigned:
   1356        as_slt(rd, rs, zero);
   1357        as_xori(rd, rd, 1);
   1358        break;
   1359      default:
   1360        MOZ_CRASH("Invalid condition.");
   1361    }
   1362    return;
   1363  }
   1364 
   1365  switch (c) {
   1366    case Equal:
   1367    case NotEqual:
   1368      ma_xor(rd, rs, imm);
   1369      if (c == Equal) {
   1370        as_sltiu(rd, rd, 1);
   1371      } else {
   1372        as_sltu(rd, zero, rd);
   1373      }
   1374      break;
   1375    case Zero:
   1376    case NonZero:
   1377    case Signed:
   1378    case NotSigned:
   1379      MOZ_CRASH("Invalid condition.");
   1380    default:
   1381      Condition cond = ma_cmp(rd, rs, imm, c);
   1382      MOZ_ASSERT(cond == Equal || cond == NotEqual);
   1383 
   1384      if (cond == Equal) as_xori(rd, rd, 1);
   1385  }
   1386 }
   1387 
   1388 // fp instructions
   1389 void MacroAssemblerMIPSShared::ma_lis(FloatRegister dest, float value) {
   1390  Imm32 imm(mozilla::BitwiseCast<uint32_t>(value));
   1391 
   1392  if (imm.value != 0) {
   1393    UseScratchRegisterScope temps(*this);
   1394    Register scratch = temps.Acquire();
   1395    ma_li(scratch, imm);
   1396    moveToFloat32(scratch, dest);
   1397  } else {
   1398    moveToFloat32(zero, dest);
   1399  }
   1400 }
   1401 
   1402 FaultingCodeOffset MacroAssemblerMIPSShared::ma_sd(FloatRegister ft,
   1403                                                   BaseIndex address) {
   1404  UseScratchRegisterScope temps(*this);
   1405  Register scratch2 = temps.Acquire();
   1406  if (isLoongson() && Imm8::IsInSignedRange(address.offset)) {
   1407    Register index = address.index;
   1408 
   1409    if (address.scale != TimesOne) {
   1410      int32_t shift = Imm32::ShiftOf(address.scale).value;
   1411 
   1412      MOZ_ASSERT(scratch2 != address.base);
   1413      index = scratch2;
   1414      asMasm().ma_dsll(index, address.index, Imm32(shift));
   1415    }
   1416 
   1417    FaultingCodeOffset fco = FaultingCodeOffset(currentOffset());
   1418    as_gssdx(ft, address.base, index, address.offset);
   1419    return fco;
   1420  }
   1421 
   1422  asMasm().computeScaledAddress(address, scratch2);
   1423  return asMasm().ma_sd(ft, Address(scratch2, address.offset));
   1424 }
   1425 
   1426 FaultingCodeOffset MacroAssemblerMIPSShared::ma_ss(FloatRegister ft,
   1427                                                   BaseIndex address) {
   1428  UseScratchRegisterScope temps(*this);
   1429  Register scratch2 = temps.Acquire();
   1430  if (isLoongson() && Imm8::IsInSignedRange(address.offset)) {
   1431    Register index = address.index;
   1432 
   1433    if (address.scale != TimesOne) {
   1434      int32_t shift = Imm32::ShiftOf(address.scale).value;
   1435 
   1436      MOZ_ASSERT(scratch2 != address.base);
   1437      index = scratch2;
   1438      asMasm().ma_dsll(index, address.index, Imm32(shift));
   1439    }
   1440 
   1441    FaultingCodeOffset fco = FaultingCodeOffset(currentOffset());
   1442    as_gsssx(ft, address.base, index, address.offset);
   1443    return fco;
   1444  }
   1445 
   1446  asMasm().computeScaledAddress(address, scratch2);
   1447  return asMasm().ma_ss(ft, Address(scratch2, address.offset));
   1448 }
   1449 
   1450 FaultingCodeOffset MacroAssemblerMIPSShared::ma_ld(FloatRegister ft,
   1451                                                   const BaseIndex& src) {
   1452  UseScratchRegisterScope temps(*this);
   1453  Register scratch2 = temps.Acquire();
   1454  asMasm().computeScaledAddress(src, scratch2);
   1455  return asMasm().ma_ld(ft, Address(scratch2, src.offset));
   1456 }
   1457 
   1458 FaultingCodeOffset MacroAssemblerMIPSShared::ma_ls(FloatRegister ft,
   1459                                                   const BaseIndex& src) {
   1460  UseScratchRegisterScope temps(*this);
   1461  Register scratch2 = temps.Acquire();
   1462  asMasm().computeScaledAddress(src, scratch2);
   1463  return asMasm().ma_ls(ft, Address(scratch2, src.offset));
   1464 }
   1465 
   1466 void MacroAssemblerMIPSShared::ma_bc1s(FloatRegister lhs, FloatRegister rhs,
   1467                                       Label* label, DoubleCondition c,
   1468                                       JumpKind jumpKind, FPConditionBit fcc) {
   1469  FloatTestKind testKind;
   1470  compareFloatingPoint(SingleFloat, lhs, rhs, c, &testKind, fcc);
   1471  asMasm().branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
   1472 }
   1473 
   1474 void MacroAssemblerMIPSShared::ma_bc1d(FloatRegister lhs, FloatRegister rhs,
   1475                                       Label* label, DoubleCondition c,
   1476                                       JumpKind jumpKind, FPConditionBit fcc) {
   1477  FloatTestKind testKind;
   1478  compareFloatingPoint(DoubleFloat, lhs, rhs, c, &testKind, fcc);
   1479  asMasm().branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
   1480 }
   1481 
   1482 void MacroAssemblerMIPSShared::minMax32(Register lhs, Register rhs,
   1483                                        Register dest, bool isMax) {
   1484  if (rhs == dest) {
   1485    std::swap(lhs, rhs);
   1486  }
   1487 
   1488  auto cond = isMax ? Assembler::GreaterThan : Assembler::LessThan;
   1489  if (lhs != dest) {
   1490    asMasm().move32(lhs, dest);
   1491  }
   1492  asMasm().cmp32Move32(cond, rhs, lhs, rhs, dest);
   1493 }
   1494 
   1495 void MacroAssemblerMIPSShared::minMax32(Register lhs, Imm32 rhs, Register dest,
   1496                                        bool isMax) {
   1497  if (rhs.value == 0) {
   1498    UseScratchRegisterScope temps(*this);
   1499    Register scratch = temps.Acquire();
   1500 
   1501    if (isMax) {
   1502      // dest = (~lhs >> 31) & lhs
   1503      as_nor(scratch, lhs, zero);
   1504      as_sra(scratch, scratch, 31);
   1505      as_and(dest, lhs, scratch);
   1506    } else {
   1507      // dest = (lhs >> 31) & lhs
   1508      as_sra(scratch, lhs, 31);
   1509      as_and(dest, lhs, scratch);
   1510    }
   1511    return;
   1512  }
   1513 
   1514  UseScratchRegisterScope temps(*this);
   1515  Register scratch = temps.Acquire();
   1516  asMasm().move32(rhs, scratch);
   1517 
   1518  minMax32(lhs, scratch, dest, isMax);
   1519 }
   1520 
   1521 void MacroAssemblerMIPSShared::minMaxPtr(Register lhs, Register rhs,
   1522                                         Register dest, bool isMax) {
   1523  if (rhs == dest) {
   1524    std::swap(lhs, rhs);
   1525  }
   1526 
   1527  auto cond = isMax ? Assembler::GreaterThan : Assembler::LessThan;
   1528  if (lhs != dest) {
   1529    asMasm().movePtr(lhs, dest);
   1530  }
   1531  asMasm().cmpPtrMovePtr(cond, rhs, lhs, rhs, dest);
   1532 }
   1533 
   1534 void MacroAssemblerMIPSShared::minMaxPtr(Register lhs, ImmWord rhs,
   1535                                         Register dest, bool isMax) {
   1536  if (rhs.value == 0) {
   1537    UseScratchRegisterScope temps(*this);
   1538    Register scratch = temps.Acquire();
   1539 
   1540    if (isMax) {
   1541      // dest = (~lhs >> 63) & lhs
   1542      as_nor(scratch, lhs, zero);
   1543      as_dsra32(scratch, scratch, 63);
   1544      as_and(dest, lhs, scratch);
   1545    } else {
   1546      // dest = (lhs >> 63) & lhs
   1547      as_dsra32(scratch, lhs, 63);
   1548      as_and(dest, lhs, scratch);
   1549    }
   1550    return;
   1551  }
   1552 
   1553  UseScratchRegisterScope temps(*this);
   1554  Register scratch = temps.Acquire();
   1555  asMasm().movePtr(rhs, scratch);
   1556 
   1557  minMaxPtr(lhs, scratch, dest, isMax);
   1558 }
   1559 
   1560 void MacroAssemblerMIPSShared::minMaxDouble(FloatRegister srcDest,
   1561                                            FloatRegister second,
   1562                                            bool handleNaN, bool isMax) {
   1563  FloatRegister first = srcDest;
   1564 
   1565  Assembler::DoubleCondition cond = isMax ? Assembler::DoubleLessThanOrEqual
   1566                                          : Assembler::DoubleGreaterThanOrEqual;
   1567  Label nan, equal, done;
   1568  FloatTestKind moveCondition;
   1569 
   1570  // First or second is NaN, result is NaN.
   1571  ma_bc1d(first, second, &nan, Assembler::DoubleUnordered, ShortJump);
   1572 #ifdef MIPSR6
   1573  if (isMax) {
   1574    as_max(DoubleFloat, srcDest, first, second);
   1575  } else {
   1576    as_min(DoubleFloat, srcDest, first, second);
   1577  }
   1578 #else
   1579  // Make sure we handle -0 and 0 right.
   1580  ma_bc1d(first, second, &equal, Assembler::DoubleEqual, ShortJump);
   1581  compareFloatingPoint(DoubleFloat, first, second, cond, &moveCondition);
   1582  MOZ_ASSERT(TestForTrue == moveCondition);
   1583  as_movt(DoubleFloat, first, second);
   1584  ma_b(&done, ShortJump);
   1585 
   1586  // Check for zero.
   1587  bind(&equal);
   1588  asMasm().loadConstantDouble(0.0, ScratchDoubleReg);
   1589  compareFloatingPoint(DoubleFloat, first, ScratchDoubleReg,
   1590                       Assembler::DoubleEqual, &moveCondition);
   1591 
   1592  // So now both operands are either -0 or 0.
   1593  if (isMax) {
   1594    // -0 + -0 = -0 and -0 + 0 = 0.
   1595    as_addd(ScratchDoubleReg, first, second);
   1596  } else {
   1597    as_negd(ScratchDoubleReg, first);
   1598    as_subd(ScratchDoubleReg, ScratchDoubleReg, second);
   1599    as_negd(ScratchDoubleReg, ScratchDoubleReg);
   1600  }
   1601  MOZ_ASSERT(TestForTrue == moveCondition);
   1602  // First is 0 or -0, move max/min to it, else just return it.
   1603  as_movt(DoubleFloat, first, ScratchDoubleReg);
   1604 #endif
   1605  ma_b(&done, ShortJump);
   1606 
   1607  bind(&nan);
   1608  asMasm().loadConstantDouble(JS::GenericNaN(), srcDest);
   1609 
   1610  bind(&done);
   1611 }
   1612 
   1613 void MacroAssemblerMIPSShared::minMaxFloat32(FloatRegister srcDest,
   1614                                             FloatRegister second,
   1615                                             bool handleNaN, bool isMax) {
   1616  FloatRegister first = srcDest;
   1617 
   1618  Assembler::DoubleCondition cond = isMax ? Assembler::DoubleLessThanOrEqual
   1619                                          : Assembler::DoubleGreaterThanOrEqual;
   1620  Label nan, equal, done;
   1621  FloatTestKind moveCondition;
   1622 
   1623  // First or second is NaN, result is NaN.
   1624  ma_bc1s(first, second, &nan, Assembler::DoubleUnordered, ShortJump);
   1625 #ifdef MIPSR6
   1626  if (isMax) {
   1627    as_max(SingleFloat, srcDest, first, second);
   1628  } else {
   1629    as_min(SingleFloat, srcDest, first, second);
   1630  }
   1631 #else
   1632  // Make sure we handle -0 and 0 right.
   1633  ma_bc1s(first, second, &equal, Assembler::DoubleEqual, ShortJump);
   1634  compareFloatingPoint(SingleFloat, first, second, cond, &moveCondition);
   1635  MOZ_ASSERT(TestForTrue == moveCondition);
   1636  as_movt(SingleFloat, first, second);
   1637  ma_b(&done, ShortJump);
   1638 
   1639  // Check for zero.
   1640  bind(&equal);
   1641  asMasm().loadConstantFloat32(0.0f, ScratchFloat32Reg);
   1642  compareFloatingPoint(SingleFloat, first, ScratchFloat32Reg,
   1643                       Assembler::DoubleEqual, &moveCondition);
   1644 
   1645  // So now both operands are either -0 or 0.
   1646  if (isMax) {
   1647    // -0 + -0 = -0 and -0 + 0 = 0.
   1648    as_adds(ScratchFloat32Reg, first, second);
   1649  } else {
   1650    as_negs(ScratchFloat32Reg, first);
   1651    as_subs(ScratchFloat32Reg, ScratchFloat32Reg, second);
   1652    as_negs(ScratchFloat32Reg, ScratchFloat32Reg);
   1653  }
   1654  MOZ_ASSERT(TestForTrue == moveCondition);
   1655  // First is 0 or -0, move max/min to it, else just return it.
   1656  as_movt(SingleFloat, first, ScratchFloat32Reg);
   1657 #endif
   1658  ma_b(&done, ShortJump);
   1659 
   1660  bind(&nan);
   1661  asMasm().loadConstantFloat32(JS::GenericNaN(), srcDest);
   1662 
   1663  bind(&done);
   1664 }
   1665 
   1666 FaultingCodeOffset MacroAssemblerMIPSShared::loadDouble(const Address& address,
   1667                                                        FloatRegister dest) {
   1668  return asMasm().ma_ld(dest, address);
   1669 }
   1670 
   1671 FaultingCodeOffset MacroAssemblerMIPSShared::loadDouble(const BaseIndex& src,
   1672                                                        FloatRegister dest) {
   1673  return asMasm().ma_ld(dest, src);
   1674 }
   1675 
   1676 FaultingCodeOffset MacroAssemblerMIPSShared::loadFloat32(const Address& address,
   1677                                                         FloatRegister dest) {
   1678  return asMasm().ma_ls(dest, address);
   1679 }
   1680 
   1681 FaultingCodeOffset MacroAssemblerMIPSShared::loadFloat32(const BaseIndex& src,
   1682                                                         FloatRegister dest) {
   1683  return asMasm().ma_ls(dest, src);
   1684 }
   1685 
   1686 void MacroAssemblerMIPSShared::ma_call(ImmPtr dest) {
   1687  asMasm().ma_liPatchable(CallReg, dest);
   1688  as_jalr(CallReg);
   1689  as_nop();
   1690 }
   1691 
   1692 void MacroAssemblerMIPSShared::ma_jump(ImmPtr dest) {
   1693  UseScratchRegisterScope temps(*this);
   1694  Register scratch = temps.Acquire();
   1695  asMasm().ma_liPatchable(scratch, dest);
   1696  as_jr(scratch);
   1697  as_nop();
   1698 }
   1699 
   1700 MacroAssembler& MacroAssemblerMIPSShared::asMasm() {
   1701  return *static_cast<MacroAssembler*>(this);
   1702 }
   1703 
   1704 const MacroAssembler& MacroAssemblerMIPSShared::asMasm() const {
   1705  return *static_cast<const MacroAssembler*>(this);
   1706 }
   1707 
   1708 //{{{ check_macroassembler_style
   1709 // ===============================================================
   1710 // MacroAssembler high-level usage.
   1711 
   1712 void MacroAssembler::flush() {}
   1713 
   1714 // ===============================================================
   1715 // Stack manipulation functions.
   1716 
   1717 void MacroAssembler::Push(Register reg) {
   1718  push(reg);
   1719  adjustFrame(int32_t(sizeof(intptr_t)));
   1720 }
   1721 
   1722 void MacroAssembler::Push(const Imm32 imm) {
   1723  push(imm);
   1724  adjustFrame(int32_t(sizeof(intptr_t)));
   1725 }
   1726 
   1727 void MacroAssembler::Push(const ImmWord imm) {
   1728  push(imm);
   1729  adjustFrame(int32_t(sizeof(intptr_t)));
   1730 }
   1731 
   1732 void MacroAssembler::Push(const ImmPtr imm) {
   1733  Push(ImmWord(uintptr_t(imm.value)));
   1734 }
   1735 
   1736 void MacroAssembler::Push(const ImmGCPtr ptr) {
   1737  push(ptr);
   1738  adjustFrame(int32_t(sizeof(intptr_t)));
   1739 }
   1740 
   1741 void MacroAssembler::Push(FloatRegister f) {
   1742  push(f);
   1743  adjustFrame(int32_t(f.pushSize()));
   1744 }
   1745 
   1746 void MacroAssembler::Pop(Register reg) {
   1747  pop(reg);
   1748  adjustFrame(-int32_t(sizeof(intptr_t)));
   1749 }
   1750 
   1751 void MacroAssembler::Pop(FloatRegister f) {
   1752  pop(f);
   1753  adjustFrame(-int32_t(f.pushSize()));
   1754 }
   1755 
   1756 void MacroAssembler::Pop(const ValueOperand& val) {
   1757  popValue(val);
   1758  adjustFrame(-int32_t(sizeof(Value)));
   1759 }
   1760 
   1761 void MacroAssembler::PopStackPtr() {
   1762  loadPtr(Address(StackPointer, 0), StackPointer);
   1763  adjustFrame(-int32_t(sizeof(intptr_t)));
   1764 }
   1765 
   1766 // ===============================================================
   1767 // Simple call functions.
   1768 
   1769 CodeOffset MacroAssembler::call(Register reg) {
   1770  as_jalr(reg);
   1771  as_nop();
   1772  return CodeOffset(currentOffset());
   1773 }
   1774 
   1775 CodeOffset MacroAssembler::call(Label* label) {
   1776  ma_bal(label);
   1777  return CodeOffset(currentOffset());
   1778 }
   1779 
   1780 CodeOffset MacroAssembler::callWithPatch() {
   1781  UseScratchRegisterScope temps(*this);
   1782  as_bal(BOffImm16(3 * sizeof(uint32_t)));
   1783  addPtr(Imm32(5 * sizeof(uint32_t)), ra);
   1784  // Allocate space which will be patched by patchCall().
   1785  spew(".space 32bit initValue 0xffff ffff");
   1786  writeInst(UINT32_MAX);
   1787  Register scratch = temps.Acquire();
   1788  as_lw(scratch, ra, -(int32_t)(5 * sizeof(uint32_t)));
   1789  addPtr(ra, scratch);
   1790  as_jr(scratch);
   1791  as_nop();
   1792  return CodeOffset(currentOffset());
   1793 }
   1794 
   1795 void MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
   1796  BufferOffset call(callerOffset - 7 * sizeof(uint32_t));
   1797 
   1798  BOffImm16 offset = BufferOffset(calleeOffset).diffB<BOffImm16>(call);
   1799  if (!offset.isInvalid()) {
   1800    InstImm* bal = (InstImm*)editSrc(call);
   1801    bal->setBOffImm16(offset);
   1802  } else {
   1803    uint32_t u32Offset = callerOffset - 5 * sizeof(uint32_t);
   1804    uint32_t* u32 =
   1805        reinterpret_cast<uint32_t*>(editSrc(BufferOffset(u32Offset)));
   1806    *u32 = calleeOffset - callerOffset;
   1807  }
   1808 }
   1809 
   1810 CodeOffset MacroAssembler::farJumpWithPatch() {
   1811  UseScratchRegisterScope temps(*this);
   1812  Register scratch = temps.Acquire();
   1813  ma_move(scratch, ra);
   1814  as_bal(BOffImm16(3 * sizeof(uint32_t)));
   1815  Register scratch2 = temps.Acquire();
   1816  as_lw(scratch2, ra, 0);
   1817  // Allocate space which will be patched by patchFarJump().
   1818  CodeOffset farJump(currentOffset());
   1819  spew(".space 32bit initValue 0xffff ffff");
   1820  writeInst(UINT32_MAX);
   1821  addPtr(ra, scratch2);
   1822  as_jr(scratch2);
   1823  ma_move(ra, scratch);
   1824  return farJump;
   1825 }
   1826 
   1827 void MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset) {
   1828  uint32_t* u32 =
   1829      reinterpret_cast<uint32_t*>(editSrc(BufferOffset(farJump.offset())));
   1830  MOZ_ASSERT(*u32 == UINT32_MAX);
   1831  *u32 = targetOffset - farJump.offset();
   1832 }
   1833 
   1834 void MacroAssembler::patchFarJump(uint8_t* farJump, uint8_t* target) {
   1835  uint32_t* u32 = reinterpret_cast<uint32_t*>(farJump);
   1836  MOZ_ASSERT(*u32 == UINT32_MAX);
   1837 
   1838  *u32 = (intptr_t)target - (intptr_t)farJump;
   1839 }
   1840 
   1841 CodeOffset MacroAssembler::call(wasm::SymbolicAddress target) {
   1842  movePtr(target, CallReg);
   1843  return call(CallReg);
   1844 }
   1845 
   1846 CodeOffset MacroAssembler::call(const Address& addr) {
   1847  loadPtr(addr, CallReg);
   1848  return call(CallReg);
   1849 }
   1850 
   1851 void MacroAssembler::call(ImmWord target) { call(ImmPtr((void*)target.value)); }
   1852 
   1853 void MacroAssembler::call(ImmPtr target) {
   1854  BufferOffset bo = m_buffer.nextOffset();
   1855  addPendingJump(bo, target, RelocationKind::HARDCODED);
   1856  ma_call(target);
   1857 }
   1858 
   1859 void MacroAssembler::call(JitCode* c) {
   1860  UseScratchRegisterScope temps(*this);
   1861  BufferOffset bo = m_buffer.nextOffset();
   1862  addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
   1863  Register scratch = temps.Acquire();
   1864  ma_liPatchable(scratch, ImmPtr(c->raw()));
   1865  callJitNoProfiler(scratch);
   1866 }
   1867 
   1868 CodeOffset MacroAssembler::nopPatchableToCall() {
   1869  as_nop();  // lui
   1870  as_nop();  // ori
   1871  as_nop();  // dsll
   1872  as_nop();  // ori
   1873  as_nop();  // jalr
   1874  as_nop();
   1875  return CodeOffset(currentOffset());
   1876 }
   1877 
   1878 void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) {
   1879  Instruction* inst = (Instruction*)call - 6 /* six nops */;
   1880  Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)target);
   1881  inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
   1882 }
   1883 
   1884 void MacroAssembler::patchCallToNop(uint8_t* call) {
   1885  Instruction* inst = (Instruction*)call - 6 /* six nops */;
   1886 
   1887  inst[0].makeNop();
   1888  inst[1].makeNop();
   1889  inst[2].makeNop();
   1890  inst[3].makeNop();
   1891  inst[4].makeNop();
   1892  inst[5].makeNop();
   1893 }
   1894 
   1895 CodeOffset MacroAssembler::move32WithPatch(Register dest) {
   1896  CodeOffset offs = CodeOffset(currentOffset());
   1897  ma_liPatchable(dest, Imm32(0));
   1898  return offs;
   1899 }
   1900 
   1901 void MacroAssembler::patchMove32(CodeOffset offset, Imm32 n) {
   1902  patchSub32FromStackPtr(offset, n);
   1903 }
   1904 
   1905 void MacroAssembler::pushReturnAddress() { push(ra); }
   1906 
   1907 void MacroAssembler::popReturnAddress() { pop(ra); }
   1908 
   1909 // ===============================================================
   1910 // Jit Frames.
   1911 
   1912 uint32_t MacroAssembler::pushFakeReturnAddress(Register scratch) {
   1913  CodeLabel cl;
   1914 
   1915  ma_li(scratch, &cl);
   1916  Push(scratch);
   1917  bind(&cl);
   1918  uint32_t retAddr = currentOffset();
   1919 
   1920  addCodeLabel(cl);
   1921  return retAddr;
   1922 }
   1923 
   1924 void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
   1925  ma_and(buffer, ptr, Imm32(int32_t(~gc::ChunkMask)));
   1926  loadPtr(Address(buffer, gc::ChunkStoreBufferOffset), buffer);
   1927 }
   1928 
   1929 void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
   1930                                             Register temp, Label* label) {
   1931  MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
   1932  MOZ_ASSERT(ptr != temp);
   1933 
   1934  UseScratchRegisterScope temps(*this);
   1935  Register scratch2 = temps.Acquire();
   1936 
   1937  ma_and(scratch2, ptr, Imm32(int32_t(~gc::ChunkMask)));
   1938  branchPtr(InvertCondition(cond),
   1939            Address(scratch2, gc::ChunkStoreBufferOffset), ImmWord(0), label);
   1940 }
   1941 
   1942 void MacroAssembler::comment(const char* msg) { Assembler::comment(msg); }
   1943 
   1944 // ===============================================================
   1945 // WebAssembly
   1946 
   1947 FaultingCodeOffset MacroAssembler::wasmTrapInstruction() {
   1948  FaultingCodeOffset fco = FaultingCodeOffset(currentOffset());
   1949  as_teq(zero, zero, WASM_TRAP);
   1950  return fco;
   1951 }
   1952 
   1953 void MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input,
   1954                                               Register output,
   1955                                               bool isSaturating,
   1956                                               Label* oolEntry) {
   1957  UseScratchRegisterScope temps(*this);
   1958  as_truncwd(ScratchFloat32Reg, input);
   1959  Register scratch = temps.Acquire();
   1960  as_cfc1(scratch, Assembler::FCSR);
   1961  moveFromFloat32(ScratchFloat32Reg, output);
   1962  ma_ext(scratch, scratch, Assembler::CauseV, 1);
   1963  ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
   1964 }
   1965 
   1966 void MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input,
   1967                                                Register output,
   1968                                                bool isSaturating,
   1969                                                Label* oolEntry) {
   1970  UseScratchRegisterScope temps(*this);
   1971  as_truncws(ScratchFloat32Reg, input);
   1972  Register scratch = temps.Acquire();
   1973  as_cfc1(scratch, Assembler::FCSR);
   1974  moveFromFloat32(ScratchFloat32Reg, output);
   1975  ma_ext(scratch, scratch, Assembler::CauseV, 1);
   1976  ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
   1977 }
   1978 
   1979 void MacroAssembler::oolWasmTruncateCheckF32ToI32(
   1980    FloatRegister input, Register output, TruncFlags flags,
   1981    const wasm::TrapSiteDesc& trapSiteDesc, Label* rejoin) {
   1982  outOfLineWasmTruncateToInt32Check(input, output, MIRType::Float32, flags,
   1983                                    rejoin, trapSiteDesc);
   1984 }
   1985 
   1986 void MacroAssembler::oolWasmTruncateCheckF64ToI32(
   1987    FloatRegister input, Register output, TruncFlags flags,
   1988    const wasm::TrapSiteDesc& trapSiteDesc, Label* rejoin) {
   1989  outOfLineWasmTruncateToInt32Check(input, output, MIRType::Double, flags,
   1990                                    rejoin, trapSiteDesc);
   1991 }
   1992 
   1993 void MacroAssembler::oolWasmTruncateCheckF32ToI64(
   1994    FloatRegister input, Register64 output, TruncFlags flags,
   1995    const wasm::TrapSiteDesc& trapSiteDesc, Label* rejoin) {
   1996  outOfLineWasmTruncateToInt64Check(input, output, MIRType::Float32, flags,
   1997                                    rejoin, trapSiteDesc);
   1998 }
   1999 
   2000 void MacroAssembler::oolWasmTruncateCheckF64ToI64(
   2001    FloatRegister input, Register64 output, TruncFlags flags,
   2002    const wasm::TrapSiteDesc& trapSiteDesc, Label* rejoin) {
   2003  outOfLineWasmTruncateToInt64Check(input, output, MIRType::Double, flags,
   2004                                    rejoin, trapSiteDesc);
   2005 }
   2006 
   2007 void MacroAssemblerMIPSShared::outOfLineWasmTruncateToInt32Check(
   2008    FloatRegister input, Register output, MIRType fromType, TruncFlags flags,
   2009    Label* rejoin, const wasm::TrapSiteDesc& trapSiteDesc) {
   2010  bool isUnsigned = flags & TRUNC_UNSIGNED;
   2011  bool isSaturating = flags & TRUNC_SATURATING;
   2012 
   2013  if (isSaturating) {
   2014    if (fromType == MIRType::Double) {
   2015      asMasm().loadConstantDouble(0.0, ScratchDoubleReg);
   2016    } else {
   2017      asMasm().loadConstantFloat32(0.0f, ScratchFloat32Reg);
   2018    }
   2019 
   2020    if (isUnsigned) {
   2021      ma_li(output, Imm32(UINT32_MAX));
   2022 
   2023      FloatTestKind moveCondition;
   2024      compareFloatingPoint(
   2025          fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
   2026          fromType == MIRType::Double ? ScratchDoubleReg : ScratchFloat32Reg,
   2027          Assembler::DoubleLessThanOrUnordered, &moveCondition);
   2028      MOZ_ASSERT(moveCondition == TestForTrue);
   2029 
   2030      as_movt(output, zero);
   2031    } else {
   2032      // Positive overflow is already saturated to INT32_MAX, so we only have
   2033      // to handle NaN and negative overflow here.
   2034 
   2035      FloatTestKind moveCondition;
   2036      compareFloatingPoint(
   2037          fromType == MIRType::Double ? DoubleFloat : SingleFloat, input, input,
   2038          Assembler::DoubleUnordered, &moveCondition);
   2039      MOZ_ASSERT(moveCondition == TestForTrue);
   2040 
   2041      as_movt(output, zero);
   2042 
   2043      compareFloatingPoint(
   2044          fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
   2045          fromType == MIRType::Double ? ScratchDoubleReg : ScratchFloat32Reg,
   2046          Assembler::DoubleLessThan, &moveCondition);
   2047      MOZ_ASSERT(moveCondition == TestForTrue);
   2048 
   2049      UseScratchRegisterScope temps(*this);
   2050      Register scratch = temps.Acquire();
   2051      ma_li(scratch, Imm32(INT32_MIN));
   2052      as_movt(output, scratch);
   2053    }
   2054 
   2055    MOZ_ASSERT(rejoin->bound());
   2056    asMasm().jump(rejoin);
   2057    return;
   2058  }
   2059 
   2060  Label inputIsNaN;
   2061 
   2062  if (fromType == MIRType::Double) {
   2063    asMasm().branchDouble(Assembler::DoubleUnordered, input, input,
   2064                          &inputIsNaN);
   2065  } else if (fromType == MIRType::Float32) {
   2066    asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
   2067  }
   2068 
   2069  asMasm().wasmTrap(wasm::Trap::IntegerOverflow, trapSiteDesc);
   2070  asMasm().bind(&inputIsNaN);
   2071  asMasm().wasmTrap(wasm::Trap::InvalidConversionToInteger, trapSiteDesc);
   2072 }
   2073 
   2074 void MacroAssemblerMIPSShared::outOfLineWasmTruncateToInt64Check(
   2075    FloatRegister input, Register64 output_, MIRType fromType, TruncFlags flags,
   2076    Label* rejoin, const wasm::TrapSiteDesc& trapSiteDesc) {
   2077  bool isUnsigned = flags & TRUNC_UNSIGNED;
   2078  bool isSaturating = flags & TRUNC_SATURATING;
   2079 
   2080  if (isSaturating) {
   2081    Register output = output_.reg;
   2082 
   2083    if (fromType == MIRType::Double) {
   2084      asMasm().loadConstantDouble(0.0, ScratchDoubleReg);
   2085    } else {
   2086      asMasm().loadConstantFloat32(0.0f, ScratchFloat32Reg);
   2087    }
   2088 
   2089    if (isUnsigned) {
   2090      asMasm().ma_li(output, ImmWord(UINT64_MAX));
   2091 
   2092      FloatTestKind moveCondition;
   2093      compareFloatingPoint(
   2094          fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
   2095          fromType == MIRType::Double ? ScratchDoubleReg : ScratchFloat32Reg,
   2096          Assembler::DoubleLessThanOrUnordered, &moveCondition);
   2097      MOZ_ASSERT(moveCondition == TestForTrue);
   2098 
   2099      as_movt(output, zero);
   2100 
   2101    } else {
   2102      // Positive overflow is already saturated to INT64_MAX, so we only have
   2103      // to handle NaN and negative overflow here.
   2104 
   2105      FloatTestKind moveCondition;
   2106      compareFloatingPoint(
   2107          fromType == MIRType::Double ? DoubleFloat : SingleFloat, input, input,
   2108          Assembler::DoubleUnordered, &moveCondition);
   2109      MOZ_ASSERT(moveCondition == TestForTrue);
   2110 
   2111      as_movt(output, zero);
   2112 
   2113      compareFloatingPoint(
   2114          fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
   2115          fromType == MIRType::Double ? ScratchDoubleReg : ScratchFloat32Reg,
   2116          Assembler::DoubleLessThan, &moveCondition);
   2117      MOZ_ASSERT(moveCondition == TestForTrue);
   2118 
   2119      UseScratchRegisterScope temps(*this);
   2120      Register scratch = temps.Acquire();
   2121      asMasm().ma_li(scratch, ImmWord(INT64_MIN));
   2122      as_movt(output, scratch);
   2123    }
   2124 
   2125    MOZ_ASSERT(rejoin->bound());
   2126    asMasm().jump(rejoin);
   2127    return;
   2128  }
   2129 
   2130  Label inputIsNaN;
   2131 
   2132  if (fromType == MIRType::Double) {
   2133    asMasm().branchDouble(Assembler::DoubleUnordered, input, input,
   2134                          &inputIsNaN);
   2135  } else if (fromType == MIRType::Float32) {
   2136    asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
   2137  }
   2138 
   2139  asMasm().wasmTrap(wasm::Trap::IntegerOverflow, trapSiteDesc);
   2140  asMasm().bind(&inputIsNaN);
   2141  asMasm().wasmTrap(wasm::Trap::InvalidConversionToInteger, trapSiteDesc);
   2142 }
   2143 
   2144 void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
   2145                              Register memoryBase, Register ptr,
   2146                              Register ptrScratch, AnyRegister output) {
   2147  wasmLoadImpl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
   2148 }
   2149 
   2150 void MacroAssembler::wasmUnalignedLoad(const wasm::MemoryAccessDesc& access,
   2151                                       Register memoryBase, Register ptr,
   2152                                       Register ptrScratch, Register output,
   2153                                       Register tmp) {
   2154  wasmLoadImpl(access, memoryBase, ptr, ptrScratch, AnyRegister(output), tmp);
   2155 }
   2156 
   2157 void MacroAssembler::wasmUnalignedLoadFP(const wasm::MemoryAccessDesc& access,
   2158                                         Register memoryBase, Register ptr,
   2159                                         Register ptrScratch,
   2160                                         FloatRegister output, Register tmp1) {
   2161  wasmLoadImpl(access, memoryBase, ptr, ptrScratch, AnyRegister(output), tmp1);
   2162 }
   2163 
   2164 void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
   2165                               AnyRegister value, Register memoryBase,
   2166                               Register ptr, Register ptrScratch) {
   2167  wasmStoreImpl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
   2168 }
   2169 
   2170 void MacroAssembler::wasmUnalignedStore(const wasm::MemoryAccessDesc& access,
   2171                                        Register value, Register memoryBase,
   2172                                        Register ptr, Register ptrScratch,
   2173                                        Register tmp) {
   2174  wasmStoreImpl(access, AnyRegister(value), memoryBase, ptr, ptrScratch, tmp);
   2175 }
   2176 
   2177 void MacroAssembler::wasmUnalignedStoreFP(const wasm::MemoryAccessDesc& access,
   2178                                          FloatRegister floatValue,
   2179                                          Register memoryBase, Register ptr,
   2180                                          Register ptrScratch, Register tmp) {
   2181  wasmStoreImpl(access, AnyRegister(floatValue), memoryBase, ptr, ptrScratch,
   2182                tmp);
   2183 }
   2184 
   2185 void MacroAssemblerMIPSShared::wasmLoadImpl(
   2186    const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
   2187    Register ptrScratch, AnyRegister output, Register tmp) {
   2188  access.assertOffsetInGuardPages();
   2189  uint32_t offset = access.offset32();
   2190  MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
   2191 
   2192  // Maybe add the offset.
   2193  if (offset) {
   2194    asMasm().addPtr(ImmWord(offset), ptrScratch);
   2195    ptr = ptrScratch;
   2196  }
   2197 
   2198  unsigned byteSize = access.byteSize();
   2199  bool isSigned = Scalar::isSignedIntType(access.type());
   2200  bool isFloat = Scalar::isFloatingType(access.type());
   2201 
   2202  MOZ_ASSERT(!access.isZeroExtendSimd128Load());
   2203  MOZ_ASSERT(!access.isSplatSimd128Load());
   2204  MOZ_ASSERT(!access.isWidenSimd128Load());
   2205 
   2206  BaseIndex address(memoryBase, ptr, TimesOne);
   2207  if (IsUnaligned(access)) {
   2208    MOZ_ASSERT(tmp != InvalidReg);
   2209    if (isFloat) {
   2210      if (byteSize == 4) {
   2211        asMasm().loadUnalignedFloat32(access, address, tmp, output.fpu());
   2212      } else {
   2213        asMasm().loadUnalignedDouble(access, address, tmp, output.fpu());
   2214      }
   2215    } else {
   2216      asMasm().ma_load_unaligned(access, output.gpr(), address, tmp,
   2217                                 static_cast<LoadStoreSize>(8 * byteSize),
   2218                                 isSigned ? SignExtend : ZeroExtend);
   2219    }
   2220    return;
   2221  }
   2222 
   2223  asMasm().memoryBarrierBefore(access.sync());
   2224  FaultingCodeOffset fco;
   2225  if (isFloat) {
   2226    if (byteSize == 4) {
   2227      fco = asMasm().ma_ls(output.fpu(), address);
   2228    } else {
   2229      fco = asMasm().ma_ld(output.fpu(), address);
   2230    }
   2231  } else {
   2232    fco = asMasm().ma_load(output.gpr(), address,
   2233                           static_cast<LoadStoreSize>(8 * byteSize),
   2234                           isSigned ? SignExtend : ZeroExtend);
   2235  }
   2236  asMasm().append(access,
   2237                  wasm::TrapMachineInsnForLoad(Scalar::byteSize(access.type())),
   2238                  fco);
   2239 
   2240  asMasm().memoryBarrierAfter(access.sync());
   2241 }
   2242 
   2243 void MacroAssemblerMIPSShared::wasmStoreImpl(
   2244    const wasm::MemoryAccessDesc& access, AnyRegister value,
   2245    Register memoryBase, Register ptr, Register ptrScratch, Register tmp) {
   2246  access.assertOffsetInGuardPages();
   2247  uint32_t offset = access.offset32();
   2248  MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
   2249 
   2250  // Maybe add the offset.
   2251  if (offset) {
   2252    asMasm().addPtr(ImmWord(offset), ptrScratch);
   2253    ptr = ptrScratch;
   2254  }
   2255 
   2256  unsigned byteSize = access.byteSize();
   2257  bool isSigned = Scalar::isSignedIntType(access.type());
   2258  bool isFloat = Scalar::isFloatingType(access.type());
   2259 
   2260  BaseIndex address(memoryBase, ptr, TimesOne);
   2261  if (IsUnaligned(access)) {
   2262    MOZ_ASSERT(tmp != InvalidReg);
   2263    if (isFloat) {
   2264      if (byteSize == 4) {
   2265        asMasm().storeUnalignedFloat32(access, value.fpu(), tmp, address);
   2266      } else {
   2267        asMasm().storeUnalignedDouble(access, value.fpu(), tmp, address);
   2268      }
   2269    } else {
   2270      asMasm().ma_store_unaligned(access, value.gpr(), address, tmp,
   2271                                  static_cast<LoadStoreSize>(8 * byteSize),
   2272                                  isSigned ? SignExtend : ZeroExtend);
   2273    }
   2274    return;
   2275  }
   2276 
   2277  asMasm().memoryBarrierBefore(access.sync());
   2278  // Only the last emitted instruction is a memory access.
   2279  FaultingCodeOffset fco;
   2280  if (isFloat) {
   2281    if (byteSize == 4) {
   2282      fco = asMasm().ma_ss(value.fpu(), address);
   2283    } else {
   2284      fco = asMasm().ma_sd(value.fpu(), address);
   2285    }
   2286  } else {
   2287    fco = asMasm().ma_store(value.gpr(), address,
   2288                            static_cast<LoadStoreSize>(8 * byteSize),
   2289                            isSigned ? SignExtend : ZeroExtend);
   2290  }
   2291  asMasm().append(
   2292      access, wasm::TrapMachineInsnForStore(Scalar::byteSize(access.type())),
   2293      fco);
   2294  asMasm().memoryBarrierAfter(access.sync());
   2295 }
   2296 
   2297 void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
   2298                                               ExitFrameType type) {
   2299  enterFakeExitFrame(cxreg, scratch, type);
   2300 }
   2301 
   2302 CodeOffset MacroAssembler::sub32FromMemAndBranchIfNegativeWithPatch(
   2303    Address address, Label* label) {
   2304  UseScratchRegisterScope temps(*this);
   2305  Register scratch = temps.Acquire();
   2306  MOZ_ASSERT(scratch != address.base);
   2307  ma_load(scratch, address);
   2308  // mips doesn't have imm subtract insn, instead we use addiu rs, rt, -imm.
   2309  // 128 is arbitrary, but makes `*address` count upwards, which may help
   2310  // to identify cases where the subsequent ::patch..() call was forgotten.
   2311  as_addiu(scratch, scratch, 128);
   2312  // Points immediately after the insn to patch
   2313  CodeOffset patchPoint = CodeOffset(currentOffset());
   2314  ma_store(scratch, address);
   2315  ma_b(scratch, scratch, label, Assembler::Signed);
   2316  return patchPoint;
   2317 }
   2318 
   2319 void MacroAssembler::patchSub32FromMemAndBranchIfNegative(CodeOffset offset,
   2320                                                          Imm32 imm) {
   2321  int32_t val = imm.value;
   2322  // Patching it to zero would make the insn pointless
   2323  MOZ_RELEASE_ASSERT(val >= 1 && val <= 127);
   2324  InstImm* inst = (InstImm*)m_buffer.getInst(BufferOffset(offset.offset() - 4));
   2325  // mips doesn't have imm subtract insn, instead we use addiu rs, rt, -imm.
   2326  // 31     25 20 15
   2327  // |      |  |  |
   2328  // 001001 rs rt imm = addiu rs, rt, imm
   2329  MOZ_ASSERT(inst->extractOpcode() == ((uint32_t)op_addiu >> OpcodeShift));
   2330  inst->setImm16(Imm16(-val & 0xffff));
   2331 }
   2332 
   2333 // ========================================================================
   2334 // Primitive atomic operations.
   2335 
   2336 template <typename T>
   2337 static void CompareExchange(MacroAssembler& masm,
   2338                            const wasm::MemoryAccessDesc* access,
   2339                            Scalar::Type type, Synchronization sync,
   2340                            const T& mem, Register oldval, Register newval,
   2341                            Register valueTemp, Register offsetTemp,
   2342                            Register maskTemp, Register output) {
   2343  UseScratchRegisterScope temps(masm);
   2344 
   2345  bool signExtend = Scalar::isSignedIntType(type);
   2346  unsigned nbytes = Scalar::byteSize(type);
   2347 
   2348  switch (nbytes) {
   2349    case 1:
   2350    case 2:
   2351      break;
   2352    case 4:
   2353      MOZ_ASSERT(valueTemp == InvalidReg);
   2354      MOZ_ASSERT(offsetTemp == InvalidReg);
   2355      MOZ_ASSERT(maskTemp == InvalidReg);
   2356      break;
   2357    default:
   2358      MOZ_CRASH();
   2359  }
   2360 
   2361  Label again, end;
   2362 
   2363  Register scratch2 = temps.Acquire();
   2364  masm.computeEffectiveAddress(mem, scratch2);
   2365 
   2366  if (nbytes == 4) {
   2367    Register scratch = temps.Acquire();
   2368 
   2369    masm.memoryBarrierBefore(sync);
   2370    masm.bind(&again);
   2371 
   2372    if (access) {
   2373      masm.append(*access, wasm::TrapMachineInsn::Load32,
   2374                  FaultingCodeOffset(masm.currentOffset()));
   2375    }
   2376 
   2377    masm.as_ll(output, scratch2, 0);
   2378    masm.ma_b(output, oldval, &end, Assembler::NotEqual, ShortJump);
   2379    masm.ma_move(scratch, newval);
   2380    masm.as_sc(scratch, scratch2, 0);
   2381    masm.ma_b(scratch, scratch, &again, Assembler::Zero, ShortJump);
   2382 
   2383    masm.memoryBarrierAfter(sync);
   2384    masm.bind(&end);
   2385 
   2386    return;
   2387  }
   2388 
   2389  masm.as_andi(offsetTemp, scratch2, 3);
   2390  masm.subPtr(offsetTemp, scratch2);
   2391 #if !MOZ_LITTLE_ENDIAN()
   2392  masm.as_xori(offsetTemp, offsetTemp, 3);
   2393 #endif
   2394  masm.as_sll(offsetTemp, offsetTemp, 3);
   2395  masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
   2396  masm.as_sllv(maskTemp, maskTemp, offsetTemp);
   2397  masm.as_nor(maskTemp, zero, maskTemp);
   2398 
   2399  masm.memoryBarrierBefore(sync);
   2400 
   2401  masm.bind(&again);
   2402 
   2403  if (access) {
   2404    masm.append(*access, wasm::TrapMachineInsn::Load32,
   2405                FaultingCodeOffset(masm.currentOffset()));
   2406  }
   2407 
   2408  Register scratch = temps.Acquire();
   2409  masm.as_ll(scratch, scratch2, 0);
   2410 
   2411  masm.as_srlv(output, scratch, offsetTemp);
   2412 
   2413  switch (nbytes) {
   2414    case 1:
   2415      if (signExtend) {
   2416        masm.ma_seb(valueTemp, oldval);
   2417        masm.ma_seb(output, output);
   2418      } else {
   2419        masm.as_andi(valueTemp, oldval, 0xff);
   2420        masm.as_andi(output, output, 0xff);
   2421      }
   2422      break;
   2423    case 2:
   2424      if (signExtend) {
   2425        masm.ma_seh(valueTemp, oldval);
   2426        masm.ma_seh(output, output);
   2427      } else {
   2428        masm.as_andi(valueTemp, oldval, 0xffff);
   2429        masm.as_andi(output, output, 0xffff);
   2430      }
   2431      break;
   2432  }
   2433 
   2434  masm.ma_b(output, valueTemp, &end, Assembler::NotEqual, ShortJump);
   2435 
   2436  // truncate newval for 8-bit and 16-bit cmpxchg
   2437  switch (nbytes) {
   2438    case 1:
   2439      masm.as_andi(valueTemp, newval, 0xff);
   2440      break;
   2441    case 2:
   2442      masm.as_andi(valueTemp, newval, 0xffff);
   2443      break;
   2444  }
   2445 
   2446  masm.as_sllv(valueTemp, valueTemp, offsetTemp);
   2447  masm.as_and(scratch, scratch, maskTemp);
   2448  masm.as_or(scratch, scratch, valueTemp);
   2449 
   2450  masm.as_sc(scratch, scratch2, 0);
   2451 
   2452  masm.ma_b(scratch, scratch, &again, Assembler::Zero, ShortJump);
   2453 
   2454  masm.memoryBarrierAfter(sync);
   2455 
   2456  masm.bind(&end);
   2457 }
   2458 
   2459 void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
   2460                                     const Address& mem, Register oldval,
   2461                                     Register newval, Register valueTemp,
   2462                                     Register offsetTemp, Register maskTemp,
   2463                                     Register output) {
   2464  CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
   2465                  offsetTemp, maskTemp, output);
   2466 }
   2467 
   2468 void MacroAssembler::compareExchange(Scalar::Type type, Synchronization sync,
   2469                                     const BaseIndex& mem, Register oldval,
   2470                                     Register newval, Register valueTemp,
   2471                                     Register offsetTemp, Register maskTemp,
   2472                                     Register output) {
   2473  CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
   2474                  offsetTemp, maskTemp, output);
   2475 }
   2476 
   2477 void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
   2478                                         const Address& mem, Register oldval,
   2479                                         Register newval, Register valueTemp,
   2480                                         Register offsetTemp, Register maskTemp,
   2481                                         Register output) {
   2482  CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
   2483                  newval, valueTemp, offsetTemp, maskTemp, output);
   2484 }
   2485 
   2486 void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
   2487                                         const BaseIndex& mem, Register oldval,
   2488                                         Register newval, Register valueTemp,
   2489                                         Register offsetTemp, Register maskTemp,
   2490                                         Register output) {
   2491  CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
   2492                  newval, valueTemp, offsetTemp, maskTemp, output);
   2493 }
   2494 
   2495 template <typename T>
   2496 static void AtomicExchange(MacroAssembler& masm,
   2497                           const wasm::MemoryAccessDesc* access,
   2498                           Scalar::Type type, Synchronization sync,
   2499                           const T& mem, Register value, Register valueTemp,
   2500                           Register offsetTemp, Register maskTemp,
   2501                           Register output) {
   2502  bool signExtend = Scalar::isSignedIntType(type);
   2503  unsigned nbytes = Scalar::byteSize(type);
   2504 
   2505  switch (nbytes) {
   2506    case 1:
   2507    case 2:
   2508      break;
   2509    case 4:
   2510      MOZ_ASSERT(valueTemp == InvalidReg);
   2511      MOZ_ASSERT(offsetTemp == InvalidReg);
   2512      MOZ_ASSERT(maskTemp == InvalidReg);
   2513      break;
   2514    default:
   2515      MOZ_CRASH();
   2516  }
   2517 
   2518  Label again;
   2519 
   2520  UseScratchRegisterScope temps(masm);
   2521  Register scratch2 = temps.Acquire();
   2522  masm.computeEffectiveAddress(mem, scratch2);
   2523 
   2524  if (nbytes == 4) {
   2525    UseScratchRegisterScope temps(masm);
   2526    Register scratch = temps.Acquire();
   2527    masm.memoryBarrierBefore(sync);
   2528    masm.bind(&again);
   2529 
   2530    if (access) {
   2531      masm.append(*access, wasm::TrapMachineInsn::Load32,
   2532                  FaultingCodeOffset(masm.currentOffset()));
   2533    }
   2534 
   2535    masm.as_ll(output, scratch2, 0);
   2536    masm.ma_move(scratch, value);
   2537    masm.as_sc(scratch, scratch2, 0);
   2538    masm.ma_b(scratch, scratch, &again, Assembler::Zero, ShortJump);
   2539 
   2540    masm.memoryBarrierAfter(sync);
   2541 
   2542    return;
   2543  }
   2544 
   2545  masm.as_andi(offsetTemp, scratch2, 3);
   2546  masm.subPtr(offsetTemp, scratch2);
   2547 #if !MOZ_LITTLE_ENDIAN()
   2548  masm.as_xori(offsetTemp, offsetTemp, 3);
   2549 #endif
   2550  masm.as_sll(offsetTemp, offsetTemp, 3);
   2551  masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
   2552  masm.as_sllv(maskTemp, maskTemp, offsetTemp);
   2553  masm.as_nor(maskTemp, zero, maskTemp);
   2554  switch (nbytes) {
   2555    case 1:
   2556      masm.as_andi(valueTemp, value, 0xff);
   2557      break;
   2558    case 2:
   2559      masm.as_andi(valueTemp, value, 0xffff);
   2560      break;
   2561  }
   2562  masm.as_sllv(valueTemp, valueTemp, offsetTemp);
   2563 
   2564  masm.memoryBarrierBefore(sync);
   2565 
   2566  masm.bind(&again);
   2567 
   2568  if (access) {
   2569    masm.append(*access, wasm::TrapMachineInsn::Load32,
   2570                FaultingCodeOffset(masm.currentOffset()));
   2571  }
   2572 
   2573  Register scratch = temps.Acquire();
   2574  masm.as_ll(output, scratch2, 0);
   2575  masm.as_and(scratch, output, maskTemp);
   2576  masm.as_or(scratch, scratch, valueTemp);
   2577 
   2578  masm.as_sc(scratch, scratch2, 0);
   2579 
   2580  masm.ma_b(scratch, scratch, &again, Assembler::Zero, ShortJump);
   2581 
   2582  masm.as_srlv(output, output, offsetTemp);
   2583 
   2584  switch (nbytes) {
   2585    case 1:
   2586      if (signExtend) {
   2587        masm.ma_seb(output, output);
   2588      } else {
   2589        masm.as_andi(output, output, 0xff);
   2590      }
   2591      break;
   2592    case 2:
   2593      if (signExtend) {
   2594        masm.ma_seh(output, output);
   2595      } else {
   2596        masm.as_andi(output, output, 0xffff);
   2597      }
   2598      break;
   2599  }
   2600 
   2601  masm.memoryBarrierAfter(sync);
   2602 }
   2603 
   2604 void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
   2605                                    const Address& mem, Register value,
   2606                                    Register valueTemp, Register offsetTemp,
   2607                                    Register maskTemp, Register output) {
   2608  AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
   2609                 maskTemp, output);
   2610 }
   2611 
   2612 void MacroAssembler::atomicExchange(Scalar::Type type, Synchronization sync,
   2613                                    const BaseIndex& mem, Register value,
   2614                                    Register valueTemp, Register offsetTemp,
   2615                                    Register maskTemp, Register output) {
   2616  AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
   2617                 maskTemp, output);
   2618 }
   2619 
   2620 void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
   2621                                        const Address& mem, Register value,
   2622                                        Register valueTemp, Register offsetTemp,
   2623                                        Register maskTemp, Register output) {
   2624  AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
   2625                 valueTemp, offsetTemp, maskTemp, output);
   2626 }
   2627 
   2628 void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
   2629                                        const BaseIndex& mem, Register value,
   2630                                        Register valueTemp, Register offsetTemp,
   2631                                        Register maskTemp, Register output) {
   2632  AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
   2633                 valueTemp, offsetTemp, maskTemp, output);
   2634 }
   2635 
   2636 template <typename T>
   2637 static void AtomicFetchOp(MacroAssembler& masm,
   2638                          const wasm::MemoryAccessDesc* access,
   2639                          Scalar::Type type, Synchronization sync, AtomicOp op,
   2640                          const T& mem, Register value, Register valueTemp,
   2641                          Register offsetTemp, Register maskTemp,
   2642                          Register output) {
   2643  bool signExtend = Scalar::isSignedIntType(type);
   2644  unsigned nbytes = Scalar::byteSize(type);
   2645 
   2646  switch (nbytes) {
   2647    case 1:
   2648    case 2:
   2649      break;
   2650    case 4:
   2651      MOZ_ASSERT(valueTemp == InvalidReg);
   2652      MOZ_ASSERT(offsetTemp == InvalidReg);
   2653      MOZ_ASSERT(maskTemp == InvalidReg);
   2654      break;
   2655    default:
   2656      MOZ_CRASH();
   2657  }
   2658 
   2659  Label again;
   2660 
   2661  UseScratchRegisterScope temps(masm);
   2662  Register scratch2 = temps.Acquire();
   2663  masm.computeEffectiveAddress(mem, scratch2);
   2664 
   2665  if (nbytes == 4) {
   2666    UseScratchRegisterScope temps(masm);
   2667    Register scratch = temps.Acquire();
   2668    masm.memoryBarrierBefore(sync);
   2669    masm.bind(&again);
   2670 
   2671    if (access) {
   2672      masm.append(*access, wasm::TrapMachineInsn::Load32,
   2673                  FaultingCodeOffset(masm.currentOffset()));
   2674    }
   2675 
   2676    masm.as_ll(output, scratch2, 0);
   2677 
   2678    switch (op) {
   2679      case AtomicOp::Add:
   2680        masm.as_addu(scratch, output, value);
   2681        break;
   2682      case AtomicOp::Sub:
   2683        masm.as_subu(scratch, output, value);
   2684        break;
   2685      case AtomicOp::And:
   2686        masm.as_and(scratch, output, value);
   2687        break;
   2688      case AtomicOp::Or:
   2689        masm.as_or(scratch, output, value);
   2690        break;
   2691      case AtomicOp::Xor:
   2692        masm.as_xor(scratch, output, value);
   2693        break;
   2694      default:
   2695        MOZ_CRASH();
   2696    }
   2697 
   2698    masm.as_sc(scratch, scratch2, 0);
   2699    masm.ma_b(scratch, scratch, &again, Assembler::Zero, ShortJump);
   2700 
   2701    masm.memoryBarrierAfter(sync);
   2702 
   2703    return;
   2704  }
   2705 
   2706  masm.as_andi(offsetTemp, scratch2, 3);
   2707  masm.subPtr(offsetTemp, scratch2);
   2708 #if !MOZ_LITTLE_ENDIAN()
   2709  masm.as_xori(offsetTemp, offsetTemp, 3);
   2710 #endif
   2711  masm.as_sll(offsetTemp, offsetTemp, 3);
   2712  masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
   2713  masm.as_sllv(maskTemp, maskTemp, offsetTemp);
   2714  masm.as_nor(maskTemp, zero, maskTemp);
   2715 
   2716  masm.memoryBarrierBefore(sync);
   2717 
   2718  masm.bind(&again);
   2719 
   2720  if (access) {
   2721    masm.append(*access, wasm::TrapMachineInsn::Load32,
   2722                FaultingCodeOffset(masm.currentOffset()));
   2723  }
   2724 
   2725  Register scratch = temps.Acquire();
   2726  masm.as_ll(scratch, scratch2, 0);
   2727  masm.as_srlv(output, scratch, offsetTemp);
   2728 
   2729  switch (op) {
   2730    case AtomicOp::Add:
   2731      masm.as_addu(valueTemp, output, value);
   2732      break;
   2733    case AtomicOp::Sub:
   2734      masm.as_subu(valueTemp, output, value);
   2735      break;
   2736    case AtomicOp::And:
   2737      masm.as_and(valueTemp, output, value);
   2738      break;
   2739    case AtomicOp::Or:
   2740      masm.as_or(valueTemp, output, value);
   2741      break;
   2742    case AtomicOp::Xor:
   2743      masm.as_xor(valueTemp, output, value);
   2744      break;
   2745    default:
   2746      MOZ_CRASH();
   2747  }
   2748 
   2749  switch (nbytes) {
   2750    case 1:
   2751      masm.as_andi(valueTemp, valueTemp, 0xff);
   2752      break;
   2753    case 2:
   2754      masm.as_andi(valueTemp, valueTemp, 0xffff);
   2755      break;
   2756  }
   2757 
   2758  masm.as_sllv(valueTemp, valueTemp, offsetTemp);
   2759 
   2760  masm.as_and(scratch, scratch, maskTemp);
   2761  masm.as_or(scratch, scratch, valueTemp);
   2762 
   2763  masm.as_sc(scratch, scratch2, 0);
   2764 
   2765  masm.ma_b(scratch, scratch, &again, Assembler::Zero, ShortJump);
   2766 
   2767  switch (nbytes) {
   2768    case 1:
   2769      if (signExtend) {
   2770        masm.ma_seb(output, output);
   2771      } else {
   2772        masm.as_andi(output, output, 0xff);
   2773      }
   2774      break;
   2775    case 2:
   2776      if (signExtend) {
   2777        masm.ma_seh(output, output);
   2778      } else {
   2779        masm.as_andi(output, output, 0xffff);
   2780      }
   2781      break;
   2782  }
   2783 
   2784  masm.memoryBarrierAfter(sync);
   2785 }
   2786 
   2787 void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
   2788                                   AtomicOp op, Register value,
   2789                                   const Address& mem, Register valueTemp,
   2790                                   Register offsetTemp, Register maskTemp,
   2791                                   Register output) {
   2792  AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
   2793                offsetTemp, maskTemp, output);
   2794 }
   2795 
   2796 void MacroAssembler::atomicFetchOp(Scalar::Type type, Synchronization sync,
   2797                                   AtomicOp op, Register value,
   2798                                   const BaseIndex& mem, Register valueTemp,
   2799                                   Register offsetTemp, Register maskTemp,
   2800                                   Register output) {
   2801  AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
   2802                offsetTemp, maskTemp, output);
   2803 }
   2804 
   2805 void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
   2806                                       AtomicOp op, Register value,
   2807                                       const Address& mem, Register valueTemp,
   2808                                       Register offsetTemp, Register maskTemp,
   2809                                       Register output) {
   2810  AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
   2811                valueTemp, offsetTemp, maskTemp, output);
   2812 }
   2813 
   2814 void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
   2815                                       AtomicOp op, Register value,
   2816                                       const BaseIndex& mem, Register valueTemp,
   2817                                       Register offsetTemp, Register maskTemp,
   2818                                       Register output) {
   2819  AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
   2820                valueTemp, offsetTemp, maskTemp, output);
   2821 }
   2822 
   2823 template <typename T>
   2824 static void AtomicEffectOp(MacroAssembler& masm,
   2825                           const wasm::MemoryAccessDesc* access,
   2826                           Scalar::Type type, Synchronization sync, AtomicOp op,
   2827                           const T& mem, Register value, Register valueTemp,
   2828                           Register offsetTemp, Register maskTemp) {
   2829  unsigned nbytes = Scalar::byteSize(type);
   2830 
   2831  switch (nbytes) {
   2832    case 1:
   2833    case 2:
   2834      break;
   2835    case 4:
   2836      MOZ_ASSERT(valueTemp == InvalidReg);
   2837      MOZ_ASSERT(offsetTemp == InvalidReg);
   2838      MOZ_ASSERT(maskTemp == InvalidReg);
   2839      break;
   2840    default:
   2841      MOZ_CRASH();
   2842  }
   2843 
   2844  Label again;
   2845 
   2846  UseScratchRegisterScope temps(masm);
   2847  Register scratch2 = temps.Acquire();
   2848  masm.computeEffectiveAddress(mem, scratch2);
   2849 
   2850  if (nbytes == 4) {
   2851    UseScratchRegisterScope temps(masm);
   2852    Register scratch = temps.Acquire();
   2853    masm.memoryBarrierBefore(sync);
   2854    masm.bind(&again);
   2855 
   2856    if (access) {
   2857      masm.append(*access, wasm::TrapMachineInsn::Load32,
   2858                  FaultingCodeOffset(masm.currentOffset()));
   2859    }
   2860 
   2861    masm.as_ll(scratch, scratch2, 0);
   2862 
   2863    switch (op) {
   2864      case AtomicOp::Add:
   2865        masm.as_addu(scratch, scratch, value);
   2866        break;
   2867      case AtomicOp::Sub:
   2868        masm.as_subu(scratch, scratch, value);
   2869        break;
   2870      case AtomicOp::And:
   2871        masm.as_and(scratch, scratch, value);
   2872        break;
   2873      case AtomicOp::Or:
   2874        masm.as_or(scratch, scratch, value);
   2875        break;
   2876      case AtomicOp::Xor:
   2877        masm.as_xor(scratch, scratch, value);
   2878        break;
   2879      default:
   2880        MOZ_CRASH();
   2881    }
   2882 
   2883    masm.as_sc(scratch, scratch2, 0);
   2884    masm.ma_b(scratch, scratch, &again, Assembler::Zero, ShortJump);
   2885 
   2886    masm.memoryBarrierAfter(sync);
   2887 
   2888    return;
   2889  }
   2890 
   2891  masm.as_andi(offsetTemp, scratch2, 3);
   2892  masm.subPtr(offsetTemp, scratch2);
   2893 #if !MOZ_LITTLE_ENDIAN()
   2894  masm.as_xori(offsetTemp, offsetTemp, 3);
   2895 #endif
   2896  masm.as_sll(offsetTemp, offsetTemp, 3);
   2897  masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
   2898  masm.as_sllv(maskTemp, maskTemp, offsetTemp);
   2899  masm.as_nor(maskTemp, zero, maskTemp);
   2900 
   2901  masm.memoryBarrierBefore(sync);
   2902 
   2903  masm.bind(&again);
   2904 
   2905  if (access) {
   2906    masm.append(*access, wasm::TrapMachineInsn::Load32,
   2907                FaultingCodeOffset(masm.currentOffset()));
   2908  }
   2909 
   2910  Register scratch = temps.Acquire();
   2911  masm.as_ll(scratch, scratch2, 0);
   2912  masm.as_srlv(valueTemp, scratch, offsetTemp);
   2913 
   2914  switch (op) {
   2915    case AtomicOp::Add:
   2916      masm.as_addu(valueTemp, valueTemp, value);
   2917      break;
   2918    case AtomicOp::Sub:
   2919      masm.as_subu(valueTemp, valueTemp, value);
   2920      break;
   2921    case AtomicOp::And:
   2922      masm.as_and(valueTemp, valueTemp, value);
   2923      break;
   2924    case AtomicOp::Or:
   2925      masm.as_or(valueTemp, valueTemp, value);
   2926      break;
   2927    case AtomicOp::Xor:
   2928      masm.as_xor(valueTemp, valueTemp, value);
   2929      break;
   2930    default:
   2931      MOZ_CRASH();
   2932  }
   2933 
   2934  switch (nbytes) {
   2935    case 1:
   2936      masm.as_andi(valueTemp, valueTemp, 0xff);
   2937      break;
   2938    case 2:
   2939      masm.as_andi(valueTemp, valueTemp, 0xffff);
   2940      break;
   2941  }
   2942 
   2943  masm.as_sllv(valueTemp, valueTemp, offsetTemp);
   2944 
   2945  masm.as_and(scratch, scratch, maskTemp);
   2946  masm.as_or(scratch, scratch, valueTemp);
   2947 
   2948  masm.as_sc(scratch, scratch2, 0);
   2949 
   2950  masm.ma_b(scratch, scratch, &again, Assembler::Zero, ShortJump);
   2951 
   2952  masm.memoryBarrierAfter(sync);
   2953 }
   2954 
   2955 void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
   2956                                        AtomicOp op, Register value,
   2957                                        const Address& mem, Register valueTemp,
   2958                                        Register offsetTemp,
   2959                                        Register maskTemp) {
   2960  AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
   2961                 valueTemp, offsetTemp, maskTemp);
   2962 }
   2963 
   2964 void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
   2965                                        AtomicOp op, Register value,
   2966                                        const BaseIndex& mem,
   2967                                        Register valueTemp, Register offsetTemp,
   2968                                        Register maskTemp) {
   2969  AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
   2970                 valueTemp, offsetTemp, maskTemp);
   2971 }
   2972 
   2973 // ========================================================================
   2974 // JS atomic operations.
   2975 
   2976 template <typename T>
   2977 static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
   2978                              Synchronization sync, const T& mem,
   2979                              Register oldval, Register newval,
   2980                              Register valueTemp, Register offsetTemp,
   2981                              Register maskTemp, Register temp,
   2982                              AnyRegister output) {
   2983  if (arrayType == Scalar::Uint32) {
   2984    masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
   2985                         offsetTemp, maskTemp, temp);
   2986    masm.convertUInt32ToDouble(temp, output.fpu());
   2987  } else {
   2988    masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
   2989                         offsetTemp, maskTemp, output.gpr());
   2990  }
   2991 }
   2992 
   2993 void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
   2994                                       Synchronization sync, const Address& mem,
   2995                                       Register oldval, Register newval,
   2996                                       Register valueTemp, Register offsetTemp,
   2997                                       Register maskTemp, Register temp,
   2998                                       AnyRegister output) {
   2999  CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
   3000                    offsetTemp, maskTemp, temp, output);
   3001 }
   3002 
   3003 void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
   3004                                       Synchronization sync,
   3005                                       const BaseIndex& mem, Register oldval,
   3006                                       Register newval, Register valueTemp,
   3007                                       Register offsetTemp, Register maskTemp,
   3008                                       Register temp, AnyRegister output) {
   3009  CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
   3010                    offsetTemp, maskTemp, temp, output);
   3011 }
   3012 
   3013 template <typename T>
   3014 static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
   3015                             Synchronization sync, const T& mem, Register value,
   3016                             Register valueTemp, Register offsetTemp,
   3017                             Register maskTemp, Register temp,
   3018                             AnyRegister output) {
   3019  if (arrayType == Scalar::Uint32) {
   3020    masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
   3021                        maskTemp, temp);
   3022    masm.convertUInt32ToDouble(temp, output.fpu());
   3023  } else {
   3024    masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
   3025                        maskTemp, output.gpr());
   3026  }
   3027 }
   3028 
   3029 void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
   3030                                      Synchronization sync, const Address& mem,
   3031                                      Register value, Register valueTemp,
   3032                                      Register offsetTemp, Register maskTemp,
   3033                                      Register temp, AnyRegister output) {
   3034  AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
   3035                   maskTemp, temp, output);
   3036 }
   3037 
   3038 void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
   3039                                      Synchronization sync,
   3040                                      const BaseIndex& mem, Register value,
   3041                                      Register valueTemp, Register offsetTemp,
   3042                                      Register maskTemp, Register temp,
   3043                                      AnyRegister output) {
   3044  AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
   3045                   maskTemp, temp, output);
   3046 }
   3047 
   3048 template <typename T>
   3049 static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
   3050                            Synchronization sync, AtomicOp op, Register value,
   3051                            const T& mem, Register valueTemp,
   3052                            Register offsetTemp, Register maskTemp,
   3053                            Register temp, AnyRegister output) {
   3054  if (arrayType == Scalar::Uint32) {
   3055    masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
   3056                       maskTemp, temp);
   3057    masm.convertUInt32ToDouble(temp, output.fpu());
   3058  } else {
   3059    masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
   3060                       maskTemp, output.gpr());
   3061  }
   3062 }
   3063 
   3064 void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
   3065                                     Synchronization sync, AtomicOp op,
   3066                                     Register value, const Address& mem,
   3067                                     Register valueTemp, Register offsetTemp,
   3068                                     Register maskTemp, Register temp,
   3069                                     AnyRegister output) {
   3070  AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
   3071                  maskTemp, temp, output);
   3072 }
   3073 
   3074 void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
   3075                                     Synchronization sync, AtomicOp op,
   3076                                     Register value, const BaseIndex& mem,
   3077                                     Register valueTemp, Register offsetTemp,
   3078                                     Register maskTemp, Register temp,
   3079                                     AnyRegister output) {
   3080  AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
   3081                  maskTemp, temp, output);
   3082 }
   3083 
   3084 void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
   3085                                      Synchronization sync, AtomicOp op,
   3086                                      Register value, const BaseIndex& mem,
   3087                                      Register valueTemp, Register offsetTemp,
   3088                                      Register maskTemp) {
   3089  AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
   3090                 offsetTemp, maskTemp);
   3091 }
   3092 
   3093 void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
   3094                                      Synchronization sync, AtomicOp op,
   3095                                      Register value, const Address& mem,
   3096                                      Register valueTemp, Register offsetTemp,
   3097                                      Register maskTemp) {
   3098  AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
   3099                 offsetTemp, maskTemp);
   3100 }
   3101 
   3102 void MacroAssembler::atomicPause() { as_sync(); }
   3103 
   3104 void MacroAssembler::flexibleQuotient32(Register lhs, Register rhs,
   3105                                        Register dest, bool isUnsigned,
   3106                                        const LiveRegisterSet&) {
   3107  quotient32(lhs, rhs, dest, isUnsigned);
   3108 }
   3109 
   3110 void MacroAssembler::flexibleRemainder32(Register lhs, Register rhs,
   3111                                         Register dest, bool isUnsigned,
   3112                                         const LiveRegisterSet&) {
   3113  remainder32(lhs, rhs, dest, isUnsigned);
   3114 }
   3115 
   3116 void MacroAssembler::flexibleDivMod32(Register lhs, Register rhs,
   3117                                      Register divOutput, Register remOutput,
   3118                                      bool isUnsigned, const LiveRegisterSet&) {
   3119  MOZ_ASSERT(lhs != divOutput && lhs != remOutput, "lhs is preserved");
   3120  MOZ_ASSERT(rhs != divOutput && rhs != remOutput, "rhs is preserved");
   3121 
   3122 #ifdef MIPSR6
   3123  if (isUnsigned) {
   3124    as_divu(divOutput, lhs, rhs);
   3125    as_modu(remOutput, rhs, rhs);
   3126  } else {
   3127    as_div(divOutput, lhs, rhs);
   3128    as_mod(remOutput, lhs, rhs);
   3129  }
   3130 #else
   3131  if (isUnsigned) {
   3132    as_divu(lhs, rhs);
   3133  } else {
   3134    as_div(lhs, rhs);
   3135  }
   3136  as_mfhi(remOutput);
   3137  as_mflo(divOutput);
   3138 #endif
   3139 }
   3140 
   3141 CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
   3142  return movWithPatch(ImmPtr(nullptr), dest);
   3143 }
   3144 
   3145 void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
   3146                                          CodeLocationLabel target) {
   3147  PatchDataWithValueCheck(loc, ImmPtr(target.raw()), ImmPtr(nullptr));
   3148 }
   3149 
   3150 // ========================================================================
   3151 // Spectre Mitigations.
   3152 
   3153 void MacroAssembler::speculationBarrier() { MOZ_CRASH(); }
   3154 
   3155 void MacroAssembler::floorFloat32ToInt32(FloatRegister src, Register dest,
   3156                                         Label* fail) {
   3157  ScratchFloat32Scope fscratch(*this);
   3158 
   3159  // Round toward negative infinity.
   3160  as_floorls(fscratch, src);
   3161  moveFromDouble(fscratch, dest);
   3162 
   3163  // Sign extend lower 32 bits to test if the result isn't an Int32.
   3164  {
   3165    UseScratchRegisterScope temps(*this);
   3166    Register scratch = temps.Acquire();
   3167 
   3168    move32SignExtendToPtr(dest, scratch);
   3169    branchPtr(Assembler::NotEqual, dest, scratch, fail);
   3170  }
   3171 
   3172  // We have to check for -0 and NaN when the result is zero.
   3173  Label notZero;
   3174  ma_b(dest, zero, &notZero, Assembler::NotEqual, ShortJump);
   3175  {
   3176    // If any of the two most significant bits is set, |src| is -0 or NaN.
   3177    moveFromFloat32(src, dest);
   3178    ma_srl(dest, dest, Imm32(30));
   3179    branch32(Assembler::NotEqual, dest, zero, fail);
   3180  }
   3181  bind(&notZero);
   3182 }
   3183 
   3184 void MacroAssembler::floorDoubleToInt32(FloatRegister src, Register dest,
   3185                                        Label* fail) {
   3186  ScratchDoubleScope dscratch(*this);
   3187 
   3188  // Round toward negative infinity.
   3189  as_floorld(dscratch, src);
   3190  moveFromDouble(dscratch, dest);
   3191 
   3192  // Sign extend lower 32 bits to test if the result isn't an Int32.
   3193  {
   3194    UseScratchRegisterScope temps(*this);
   3195    Register scratch = temps.Acquire();
   3196 
   3197    move32SignExtendToPtr(dest, scratch);
   3198    branchPtr(Assembler::NotEqual, dest, scratch, fail);
   3199  }
   3200 
   3201  // We have to check for -0 and NaN when the result is zero.
   3202  Label notZero;
   3203  ma_b(dest, zero, &notZero, Assembler::NotEqual, ShortJump);
   3204  {
   3205    // If any of the two most significant bits is set, |src| is -0 or NaN.
   3206    moveFromDouble(src, dest);
   3207    ma_dsrl(dest, dest, Imm32(62));
   3208    branchPtr(Assembler::NotEqual, dest, zero, fail);
   3209  }
   3210  bind(&notZero);
   3211 }
   3212 
   3213 void MacroAssembler::ceilFloat32ToInt32(FloatRegister src, Register dest,
   3214                                        Label* fail) {
   3215  ScratchFloat32Scope fscratch(*this);
   3216 
   3217  // Round toward positive infinity.
   3218  as_ceills(fscratch, src);
   3219  moveFromDouble(fscratch, dest);
   3220 
   3221  // Sign extend lower 32 bits to test if the result isn't an Int32.
   3222  {
   3223    UseScratchRegisterScope temps(*this);
   3224    Register scratch = temps.Acquire();
   3225 
   3226    move32SignExtendToPtr(dest, scratch);
   3227    branchPtr(Assembler::NotEqual, dest, scratch, fail);
   3228  }
   3229 
   3230  // We have to check for (-1, -0] and NaN when the result is zero.
   3231  Label notZero;
   3232  ma_b(dest, zero, &notZero, Assembler::NotEqual, ShortJump);
   3233  {
   3234    // If binary value is not zero, the input was not 0, so we bail.
   3235    moveFromFloat32(src, dest);
   3236    branch32(Assembler::NotEqual, dest, zero, fail);
   3237  }
   3238  bind(&notZero);
   3239 }
   3240 
   3241 void MacroAssembler::ceilDoubleToInt32(FloatRegister src, Register dest,
   3242                                       Label* fail) {
   3243  ScratchDoubleScope dscratch(*this);
   3244 
   3245  // Round toward positive infinity.
   3246  as_ceilld(dscratch, src);
   3247  moveFromDouble(dscratch, dest);
   3248 
   3249  // Sign extend lower 32 bits to test if the result isn't an Int32.
   3250  {
   3251    UseScratchRegisterScope temps(*this);
   3252    Register scratch = temps.Acquire();
   3253 
   3254    move32SignExtendToPtr(dest, scratch);
   3255    branchPtr(Assembler::NotEqual, dest, scratch, fail);
   3256  }
   3257 
   3258  // We have to check for (-1, -0] and NaN when the result is zero.
   3259  Label notZero;
   3260  ma_b(dest, zero, &notZero, Assembler::NotEqual, ShortJump);
   3261  {
   3262    // If binary value is not zero, the input was not 0, so we bail.
   3263    moveFromDouble(src, dest);
   3264    branchPtr(Assembler::NotEqual, dest, zero, fail);
   3265  }
   3266  bind(&notZero);
   3267 }
   3268 
   3269 void MacroAssembler::roundFloat32ToInt32(FloatRegister src, Register dest,
   3270                                         FloatRegister temp, Label* fail) {
   3271  ScratchFloat32Scope fscratch(*this);
   3272 
   3273  Label negative, end, performRound;
   3274 
   3275  // Branch for negative inputs. Doesn't catch NaN or -0.
   3276  loadConstantFloat32(0.0f, fscratch);
   3277  ma_bc1s(src, fscratch, &negative, Assembler::DoubleLessThan, ShortJump);
   3278 
   3279  // If non-negative check for bailout.
   3280  ma_bc1s(src, fscratch, &performRound, Assembler::DoubleNotEqual, ShortJump);
   3281  {
   3282    // If binary value is not zero, it is NaN or -0, so we bail.
   3283    moveFromFloat32(src, dest);
   3284    branch32(Assembler::NotEqual, dest, zero, fail);
   3285    ma_b(&end, ShortJump);
   3286  }
   3287 
   3288  // Input is negative, but isn't -0.
   3289  bind(&negative);
   3290  {
   3291    // Inputs in [-0.5, 0) are rounded to -0. Fail.
   3292    loadConstantFloat32(-0.5f, fscratch);
   3293    branchFloat(Assembler::DoubleGreaterThanOrEqual, src, fscratch, fail);
   3294  }
   3295 
   3296  bind(&performRound);
   3297  {
   3298    // Load biggest number less than 0.5 in the temp register.
   3299    loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
   3300 
   3301    // Other inputs need the biggest float less than 0.5 added.
   3302    as_adds(fscratch, src, temp);
   3303 
   3304    // Round toward negative infinity.
   3305    as_floorls(fscratch, fscratch);
   3306    moveFromDouble(fscratch, dest);
   3307 
   3308    // Sign extend lower 32 bits to test if the result isn't an Int32.
   3309    {
   3310      UseScratchRegisterScope temps(*this);
   3311      Register scratch = temps.Acquire();
   3312 
   3313      move32SignExtendToPtr(dest, scratch);
   3314      branchPtr(Assembler::NotEqual, dest, scratch, fail);
   3315    }
   3316  }
   3317  bind(&end);
   3318 }
   3319 
   3320 void MacroAssembler::roundDoubleToInt32(FloatRegister src, Register dest,
   3321                                        FloatRegister temp, Label* fail) {
   3322  ScratchDoubleScope dscratch(*this);
   3323 
   3324  Label negative, end, performRound;
   3325 
   3326  // Branch for negative inputs. Doesn't catch NaN or -0.
   3327  loadConstantDouble(0.0, dscratch);
   3328  ma_bc1d(src, dscratch, &negative, Assembler::DoubleLessThan, ShortJump);
   3329 
   3330  // If non-negative check for bailout.
   3331  ma_bc1d(src, dscratch, &performRound, Assembler::DoubleNotEqual, ShortJump);
   3332  {
   3333    // If binary value is not zero, it is NaN or -0, so we bail.
   3334    moveFromDouble(src, dest);
   3335    branchPtr(Assembler::NotEqual, dest, zero, fail);
   3336    ma_b(&end, ShortJump);
   3337  }
   3338 
   3339  // Input is negative, but isn't -0.
   3340  bind(&negative);
   3341  {
   3342    // Inputs in [-0.5, 0) are rounded to -0. Fail.
   3343    loadConstantDouble(-0.5, dscratch);
   3344    branchDouble(Assembler::DoubleGreaterThanOrEqual, src, dscratch, fail);
   3345  }
   3346 
   3347  bind(&performRound);
   3348  {
   3349    // Load biggest number less than 0.5 in the temp register.
   3350    loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
   3351 
   3352    // Other inputs need the biggest double less than 0.5 added.
   3353    as_addd(dscratch, src, temp);
   3354 
   3355    // Round toward negative infinity.
   3356    as_floorld(dscratch, dscratch);
   3357    moveFromDouble(dscratch, dest);
   3358 
   3359    // Sign extend lower 32 bits to test if the result isn't an Int32.
   3360    {
   3361      UseScratchRegisterScope temps(*this);
   3362      Register scratch = temps.Acquire();
   3363 
   3364      move32SignExtendToPtr(dest, scratch);
   3365      branchPtr(Assembler::NotEqual, dest, scratch, fail);
   3366    }
   3367  }
   3368  bind(&end);
   3369 }
   3370 
   3371 void MacroAssembler::truncFloat32ToInt32(FloatRegister src, Register dest,
   3372                                         Label* fail) {
   3373  ScratchFloat32Scope fscratch(*this);
   3374 
   3375  // Round toward zero.
   3376  as_truncls(fscratch, src);
   3377  moveFromDouble(fscratch, dest);
   3378 
   3379  // Sign extend lower 32 bits to test if the result isn't an Int32.
   3380  {
   3381    UseScratchRegisterScope temps(*this);
   3382    Register scratch = temps.Acquire();
   3383 
   3384    move32SignExtendToPtr(dest, scratch);
   3385    branchPtr(Assembler::NotEqual, dest, scratch, fail);
   3386  }
   3387 
   3388  // We have to check for (-1, -0] and NaN when the result is zero.
   3389  Label notZero;
   3390  ma_b(dest, zero, &notZero, Assembler::NotEqual, ShortJump);
   3391  {
   3392    // If any of the two most significant bits is set, |src| is negative or NaN.
   3393    moveFromFloat32(src, dest);
   3394    ma_srl(dest, dest, Imm32(30));
   3395    branch32(Assembler::NotEqual, dest, zero, fail);
   3396  }
   3397  bind(&notZero);
   3398 }
   3399 
   3400 void MacroAssembler::truncDoubleToInt32(FloatRegister src, Register dest,
   3401                                        Label* fail) {
   3402  ScratchDoubleScope dscratch(*this);
   3403 
   3404  // Round toward zero.
   3405  as_truncld(dscratch, src);
   3406  moveFromDouble(dscratch, dest);
   3407 
   3408  // Sign extend lower 32 bits to test if the result isn't an Int32.
   3409  {
   3410    UseScratchRegisterScope temps(*this);
   3411    Register scratch = temps.Acquire();
   3412 
   3413    move32SignExtendToPtr(dest, scratch);
   3414    branchPtr(Assembler::NotEqual, dest, scratch, fail);
   3415  }
   3416 
   3417  // We have to check for (-1, -0] and NaN when the result is zero.
   3418  Label notZero;
   3419  ma_b(dest, zero, &notZero, Assembler::NotEqual, ShortJump);
   3420  {
   3421    // If any of the two most significant bits is set, |src| is negative or NaN.
   3422    moveFromDouble(src, dest);
   3423    ma_dsrl(dest, dest, Imm32(62));
   3424    branchPtr(Assembler::NotEqual, dest, zero, fail);
   3425  }
   3426  bind(&notZero);
   3427 }
   3428 
   3429 void MacroAssembler::nearbyIntDouble(RoundingMode mode, FloatRegister src,
   3430                                     FloatRegister dest) {
   3431  MOZ_CRASH("not supported on this platform");
   3432 }
   3433 
   3434 void MacroAssembler::nearbyIntFloat32(RoundingMode mode, FloatRegister src,
   3435                                      FloatRegister dest) {
   3436  MOZ_CRASH("not supported on this platform");
   3437 }
   3438 
   3439 void MacroAssembler::copySignDouble(FloatRegister lhs, FloatRegister rhs,
   3440                                    FloatRegister output) {
   3441  UseScratchRegisterScope temps(*this);
   3442  Register lhsi = temps.Acquire();
   3443  Register rhsi = temps.Acquire();
   3444 
   3445  moveFromDouble(lhs, lhsi);
   3446  moveFromDouble(rhs, rhsi);
   3447 
   3448  // Combine.
   3449  if (hasR2()) {
   3450    ma_dins(rhsi, lhsi, Imm32(0), Imm32(63));
   3451  } else {
   3452    ma_dext(lhsi, lhsi, Imm32(0), Imm32(63));
   3453    ma_dsrl(rhsi, rhsi, Imm32(63));
   3454    ma_dsll(rhsi, rhsi, Imm32(63));
   3455    as_or(rhsi, rhsi, lhsi);
   3456  }
   3457  moveToDouble(rhsi, output);
   3458 }
   3459 
   3460 void MacroAssembler::copySignFloat32(FloatRegister lhs, FloatRegister rhs,
   3461                                     FloatRegister output) {
   3462  UseScratchRegisterScope temps(*this);
   3463  Register lhsi = temps.Acquire();
   3464  Register rhsi = temps.Acquire();
   3465 
   3466  moveFromFloat32(lhs, lhsi);
   3467  moveFromFloat32(rhs, rhsi);
   3468 
   3469  // Combine.
   3470  if (hasR2()) {
   3471    ma_ins(rhsi, lhsi, 0, 31);
   3472  } else {
   3473    ma_ext(lhsi, lhsi, 0, 31);
   3474    ma_srl(rhsi, rhsi, Imm32(31));
   3475    ma_sll(rhsi, rhsi, Imm32(31));
   3476    as_or(rhsi, rhsi, lhsi);
   3477  }
   3478  moveToFloat32(rhsi, output);
   3479 }
   3480 
   3481 void MacroAssembler::shiftIndex32AndAdd(Register indexTemp32, int shift,
   3482                                        Register pointer) {
   3483  if (IsShiftInScaleRange(shift)) {
   3484    computeEffectiveAddress(
   3485        BaseIndex(pointer, indexTemp32, ShiftToScale(shift)), pointer);
   3486    return;
   3487  }
   3488  lshift32(Imm32(shift), indexTemp32);
   3489  addPtr(indexTemp32, pointer);
   3490 }
   3491 
   3492 //}}} check_macroassembler_style