tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

MacroAssembler-mips64.cpp (115210B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/mips64/MacroAssembler-mips64.h"
      8 
      9 #include "mozilla/DebugOnly.h"
     10 #include "mozilla/MathAlgorithms.h"
     11 
     12 #include "jit/Bailouts.h"
     13 #include "jit/BaselineFrame.h"
     14 #include "jit/JitFrames.h"
     15 #include "jit/JitRuntime.h"
     16 #include "jit/MacroAssembler.h"
     17 #include "jit/mips64/Simulator-mips64.h"
     18 #include "jit/MoveEmitter.h"
     19 #include "jit/SharedICRegisters.h"
     20 #include "util/Memory.h"
     21 #include "vm/JitActivation.h"  // js::jit::JitActivation
     22 #include "vm/JSContext.h"
     23 #include "wasm/WasmStubs.h"
     24 
     25 #include "jit/MacroAssembler-inl.h"
     26 
     27 using namespace js;
     28 using namespace jit;
     29 
     30 using mozilla::Abs;
     31 
     32 static_assert(sizeof(intptr_t) == 8, "Not 32-bit clean.");
     33 
     34 void MacroAssemblerMIPS64Compat::convertBoolToInt32(Register src,
     35                                                    Register dest) {
     36  // Note that C++ bool is only 1 byte, so zero extend it to clear the
     37  // higher-order bits.
     38  ma_and(dest, src, Imm32(0xff));
     39 }
     40 
     41 void MacroAssemblerMIPS64Compat::convertInt32ToDouble(Register src,
     42                                                      FloatRegister dest) {
     43  as_mtc1(src, dest);
     44  as_cvtdw(dest, dest);
     45 }
     46 
     47 void MacroAssemblerMIPS64Compat::convertInt32ToDouble(const Address& src,
     48                                                      FloatRegister dest) {
     49  ma_ls(dest, src);
     50  as_cvtdw(dest, dest);
     51 }
     52 
     53 void MacroAssemblerMIPS64Compat::convertInt32ToDouble(const BaseIndex& src,
     54                                                      FloatRegister dest) {
     55  UseScratchRegisterScope temps(*this);
     56  Register scratch = temps.Acquire();
     57  computeScaledAddress(src, scratch);
     58  convertInt32ToDouble(Address(scratch, src.offset), dest);
     59 }
     60 
     61 void MacroAssemblerMIPS64Compat::convertUInt32ToDouble(Register src,
     62                                                       FloatRegister dest) {
     63  UseScratchRegisterScope temps(*this);
     64  Register scratch = temps.Acquire();
     65  ma_dext(scratch, src, Imm32(0), Imm32(32));
     66  asMasm().convertInt64ToDouble(Register64(scratch), dest);
     67 }
     68 
     69 void MacroAssemblerMIPS64Compat::convertUInt64ToDouble(Register src,
     70                                                       FloatRegister dest) {
     71  Label positive, done;
     72  ma_b(src, src, &positive, NotSigned, ShortJump);
     73 
     74  UseScratchRegisterScope temps(*this);
     75  Register scratch = temps.Acquire();
     76  Register scratch2 = temps.Acquire();
     77  MOZ_ASSERT(src != scratch);
     78  MOZ_ASSERT(src != scratch2);
     79 
     80  ma_and(scratch, src, Imm32(1));
     81  ma_dsrl(scratch2, src, Imm32(1));
     82  ma_or(scratch, scratch2);
     83  as_dmtc1(scratch, dest);
     84  as_cvtdl(dest, dest);
     85  asMasm().addDouble(dest, dest);
     86  ma_b(&done, ShortJump);
     87 
     88  bind(&positive);
     89  as_dmtc1(src, dest);
     90  as_cvtdl(dest, dest);
     91 
     92  bind(&done);
     93 }
     94 
     95 void MacroAssemblerMIPS64Compat::convertUInt32ToFloat32(Register src,
     96                                                        FloatRegister dest) {
     97  UseScratchRegisterScope temps(*this);
     98  Register scratch = temps.Acquire();
     99  ma_dext(scratch, src, Imm32(0), Imm32(32));
    100  asMasm().convertInt64ToFloat32(Register64(scratch), dest);
    101 }
    102 
    103 void MacroAssemblerMIPS64Compat::convertDoubleToFloat32(FloatRegister src,
    104                                                        FloatRegister dest) {
    105  as_cvtsd(dest, src);
    106 }
    107 
    108 const int CauseBitPos = int(Assembler::CauseI);
    109 const int CauseBitCount = 1 + int(Assembler::CauseV) - int(Assembler::CauseI);
    110 const int CauseIOrVMask = ((1 << int(Assembler::CauseI)) |
    111                           (1 << int(Assembler::CauseV))) >>
    112                          int(Assembler::CauseI);
    113 
    114 // Checks whether a double is representable as a 32-bit integer. If so, the
    115 // integer is written to the output register. Otherwise, a bailout is taken to
    116 // the given snapshot. This function overwrites the scratch float register.
    117 void MacroAssemblerMIPS64Compat::convertDoubleToInt32(FloatRegister src,
    118                                                      Register dest,
    119                                                      Label* fail,
    120                                                      bool negativeZeroCheck) {
    121  UseScratchRegisterScope temps(*this);
    122  if (negativeZeroCheck) {
    123    moveFromDouble(src, dest);
    124    ma_drol(dest, dest, Imm32(1));
    125    ma_b(dest, Imm32(1), fail, Assembler::Equal);
    126  }
    127 
    128  Register scratch = temps.Acquire();
    129  // Truncate double to int ; if result is inexact or invalid fail.
    130  as_truncwd(ScratchFloat32Reg, src);
    131  as_cfc1(scratch, Assembler::FCSR);
    132  moveFromFloat32(ScratchFloat32Reg, dest);
    133  ma_ext(scratch, scratch, CauseBitPos, CauseBitCount);
    134  as_andi(scratch, scratch,
    135          CauseIOrVMask);  // masking for Inexact and Invalid flag.
    136  ma_b(scratch, Imm32(0), fail, Assembler::NotEqual);
    137 }
    138 
    139 void MacroAssemblerMIPS64Compat::convertDoubleToPtr(FloatRegister src,
    140                                                    Register dest, Label* fail,
    141                                                    bool negativeZeroCheck) {
    142  UseScratchRegisterScope temps(*this);
    143  if (negativeZeroCheck) {
    144    moveFromDouble(src, dest);
    145    ma_drol(dest, dest, Imm32(1));
    146    ma_b(dest, Imm32(1), fail, Assembler::Equal);
    147  }
    148 
    149  Register scratch = temps.Acquire();
    150  as_truncld(ScratchDoubleReg, src);
    151  as_cfc1(scratch, Assembler::FCSR);
    152  moveFromDouble(ScratchDoubleReg, dest);
    153  ma_ext(scratch, scratch, CauseBitPos, CauseBitCount);
    154  as_andi(scratch, scratch, CauseIOrVMask);
    155  ma_b(scratch, Imm32(0), fail, Assembler::NotEqual);
    156 }
    157 
    158 // Checks whether a float32 is representable as a 32-bit integer. If so, the
    159 // integer is written to the output register. Otherwise, a bailout is taken to
    160 // the given snapshot. This function overwrites the scratch float register.
    161 void MacroAssemblerMIPS64Compat::convertFloat32ToInt32(FloatRegister src,
    162                                                       Register dest,
    163                                                       Label* fail,
    164                                                       bool negativeZeroCheck) {
    165  UseScratchRegisterScope temps(*this);
    166  if (negativeZeroCheck) {
    167    moveFromFloat32(src, dest);
    168    ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
    169  }
    170 
    171  Register scratch = temps.Acquire();
    172  as_truncws(ScratchFloat32Reg, src);
    173  as_cfc1(scratch, Assembler::FCSR);
    174  moveFromFloat32(ScratchFloat32Reg, dest);
    175  ma_ext(scratch, scratch, CauseBitPos, CauseBitCount);
    176  as_andi(scratch, scratch, CauseIOrVMask);
    177  ma_b(scratch, Imm32(0), fail, Assembler::NotEqual);
    178 }
    179 
    180 void MacroAssemblerMIPS64Compat::convertFloat32ToDouble(FloatRegister src,
    181                                                        FloatRegister dest) {
    182  as_cvtds(dest, src);
    183 }
    184 
    185 void MacroAssemblerMIPS64Compat::convertInt32ToFloat32(Register src,
    186                                                       FloatRegister dest) {
    187  as_mtc1(src, dest);
    188  as_cvtsw(dest, dest);
    189 }
    190 
    191 void MacroAssemblerMIPS64Compat::convertInt32ToFloat32(const Address& src,
    192                                                       FloatRegister dest) {
    193  ma_ls(dest, src);
    194  as_cvtsw(dest, dest);
    195 }
    196 
    197 void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
    198  convertInt64ToDouble(Register64(src), dest);
    199 }
    200 
    201 void MacroAssemblerMIPS64::ma_li(Register dest, CodeLabel* label) {
    202  BufferOffset bo = m_buffer.nextOffset();
    203  ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
    204  label->patchAt()->bind(bo.getOffset());
    205  label->setLinkMode(CodeLabel::MoveImmediate);
    206 }
    207 
    208 void MacroAssemblerMIPS64::ma_li(Register dest, ImmWord imm) {
    209  int64_t value = imm.value;
    210 
    211  if (-1 == (value >> 15) || 0 == (value >> 15)) {
    212    as_addiu(dest, zero, value);
    213    return;
    214  }
    215  if (0 == (value >> 16)) {
    216    as_ori(dest, zero, value);
    217    return;
    218  }
    219 
    220  if (-1 == (value >> 31) || 0 == (value >> 31)) {
    221    as_lui(dest, uint16_t(value >> 16));
    222  } else if (0 == (value >> 32)) {
    223    as_lui(dest, uint16_t(value >> 16));
    224    // mips spec recommends dinsu but it is unfriendly to mips3
    225    ma_dext(dest, dest, Imm32(0), Imm32(32));
    226  } else if (-1 == (value >> 47) || 0 == (value >> 47)) {
    227    as_lui(dest, uint16_t(value >> 32));
    228    if (uint16_t(value >> 16)) {
    229      as_ori(dest, dest, uint16_t(value >> 16));
    230    }
    231    as_dsll(dest, dest, 16);
    232  } else if (0 == (value >> 48)) {
    233    as_lui(dest, uint16_t(value >> 32));
    234    // mips spec recommends dinsu but it is unfriendly to mips3
    235    ma_dext(dest, dest, Imm32(0), Imm32(32));
    236    if (uint16_t(value >> 16)) {
    237      as_ori(dest, dest, uint16_t(value >> 16));
    238    }
    239    as_dsll(dest, dest, 16);
    240  } else {
    241    uint32_t trailingZeroes = mozilla::CountTrailingZeroes64(value);
    242    if (Imm16::IsInSignedRange(value >> trailingZeroes)) {
    243      as_addiu(dest, zero, int32_t(value >> trailingZeroes));
    244      as_dsll32(dest, dest, trailingZeroes);
    245      return;
    246    }
    247 
    248    as_lui(dest, uint16_t(value >> 48));
    249    if (uint16_t(value >> 32)) {
    250      as_ori(dest, dest, uint16_t(value >> 32));
    251    }
    252    if (uint16_t(value >> 16)) {
    253      as_dsll(dest, dest, 16);
    254      as_ori(dest, dest, uint16_t(value >> 16));
    255      as_dsll(dest, dest, 16);
    256    } else {
    257      as_dsll32(dest, dest, 32);
    258    }
    259  }
    260  if (uint16_t(value)) {
    261    as_ori(dest, dest, uint16_t(value));
    262  }
    263 }
    264 
    265 // This method generates lui, dsll and ori instruction block that can be
    266 // modified by UpdateLoad64Value, either during compilation (eg.
    267 // Assembler::bind), or during execution (eg. jit::PatchJump).
    268 void MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmPtr imm) {
    269  return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
    270 }
    271 
    272 void MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmWord imm,
    273                                          LiFlags flags) {
    274  if (Li64 == flags) {
    275    m_buffer.ensureSpace(6 * sizeof(uint32_t));
    276    as_lui(dest, Imm16::Upper(Imm32((imm.value >> 32) + 0x8000)).encode());
    277    as_daddiu(dest, dest, int16_t((imm.value >> 32) & 0xffff));
    278    as_dsll(dest, dest, 16);
    279    as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
    280    as_dsll(dest, dest, 16);
    281    as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
    282  } else {
    283    m_buffer.ensureSpace(4 * sizeof(uint32_t));
    284    as_lui(dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
    285    as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
    286    as_dsll(dest, dest, 16);
    287    as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
    288  }
    289 }
    290 
    291 void MacroAssemblerMIPS64::ma_dnegu(Register rd, Register rs) {
    292  as_dsubu(rd, zero, rs);
    293 }
    294 
    295 // Shifts
    296 void MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Imm32 shift) {
    297  if (31 < shift.value) {
    298    as_dsll32(rd, rt, shift.value);
    299  } else {
    300    as_dsll(rd, rt, shift.value);
    301  }
    302 }
    303 
    304 void MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Imm32 shift) {
    305  if (31 < shift.value) {
    306    as_dsrl32(rd, rt, shift.value);
    307  } else {
    308    as_dsrl(rd, rt, shift.value);
    309  }
    310 }
    311 
    312 void MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Imm32 shift) {
    313  if (31 < shift.value) {
    314    as_dsra32(rd, rt, shift.value);
    315  } else {
    316    as_dsra(rd, rt, shift.value);
    317  }
    318 }
    319 
    320 void MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Imm32 shift) {
    321  if (hasR2()) {
    322    if (31 < shift.value) {
    323      as_drotr32(rd, rt, shift.value);
    324    } else {
    325      as_drotr(rd, rt, shift.value);
    326    }
    327  } else {
    328    UseScratchRegisterScope temps(*this);
    329    Register scratch = temps.Acquire();
    330    ma_dsrl(scratch, rt, shift);
    331    ma_dsll(rd, rt, Imm32(64 - shift.value));
    332    as_or(rd, rd, scratch);
    333  }
    334 }
    335 
    336 void MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Imm32 shift) {
    337  ma_dror(rd, rt, Imm32(64 - shift.value));
    338 }
    339 
    340 void MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Register shift) {
    341  as_dsllv(rd, rt, shift);
    342 }
    343 
    344 void MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Register shift) {
    345  as_dsrlv(rd, rt, shift);
    346 }
    347 
    348 void MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Register shift) {
    349  as_dsrav(rd, rt, shift);
    350 }
    351 
    352 void MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Register shift) {
    353  if (hasR2()) {
    354    as_drotrv(rd, rt, shift);
    355  } else {
    356    UseScratchRegisterScope temps(*this);
    357    Register scratch = temps.Acquire();
    358    as_dsubu(scratch, zero, shift);
    359    as_dsllv(scratch, rt, scratch);
    360    as_dsrlv(rd, rt, shift);
    361    as_or(rd, rd, scratch);
    362  }
    363 }
    364 
    365 void MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Register shift) {
    366  UseScratchRegisterScope temps(*this);
    367  Register scratch = temps.Acquire();
    368  as_dsubu(scratch, zero, shift);
    369  if (hasR2()) {
    370    as_drotrv(rd, rt, scratch);
    371  } else {
    372    as_dsrlv(scratch, rt, scratch);
    373    as_dsllv(rd, rt, shift);
    374    as_or(rd, rd, scratch);
    375  }
    376 }
    377 
    378 void MacroAssemblerMIPS64::ma_dins(Register rt, Register rs, Imm32 pos,
    379                                   Imm32 size) {
    380  if (hasR2()) {
    381    if (pos.value >= 0 && pos.value < 32) {
    382      if (pos.value + size.value > 32) {
    383        as_dinsm(rt, rs, pos.value, size.value);
    384      } else {
    385        as_dins(rt, rs, pos.value, size.value);
    386      }
    387    } else {
    388      as_dinsu(rt, rs, pos.value, size.value);
    389    }
    390  } else {
    391    UseScratchRegisterScope temps(*this);
    392    Register scratch = temps.Acquire();
    393 
    394    // optimize for special positions
    395    if (pos.value == 0) {
    396      ma_dext(scratch, rs, Imm32(0), size);
    397      ma_dsrl(rt, rt, size);
    398      ma_dsll(rt, rt, size);
    399      as_or(rt, rt, scratch);
    400    } else if (pos.value + size.value == 64) {
    401      ma_dsll(scratch, rs, pos);
    402      ma_dsll(rt, rt, size);
    403      ma_dsrl(rt, rt, size);
    404      as_or(rt, rt, scratch);
    405    } else {
    406      Register scratch2 = temps.Acquire();
    407      ma_dsubu(scratch, zero, Imm32(1));
    408      ma_dsrl(scratch, scratch, Imm32(64 - size.value));
    409      as_and(scratch2, rs, scratch);
    410      ma_dsll(scratch, scratch, pos);
    411      ma_dsll(scratch2, scratch2, pos);
    412      as_nor(scratch, scratch, zero);
    413      as_and(rt, rt, scratch);
    414      as_or(rt, rt, scratch2);
    415    }
    416  }
    417 }
    418 
    419 void MacroAssemblerMIPS64::ma_dext(Register rt, Register rs, Imm32 pos,
    420                                   Imm32 size) {
    421  if (hasR2()) {
    422    if (pos.value >= 0 && pos.value < 32) {
    423      if (size.value > 32) {
    424        as_dextm(rt, rs, pos.value, size.value);
    425      } else {
    426        as_dext(rt, rs, pos.value, size.value);
    427      }
    428    } else {
    429      as_dextu(rt, rs, pos.value, size.value);
    430    }
    431  } else {
    432    ma_dsll(rt, rs, Imm32(64 - pos.value - size.value));
    433    ma_dsrl(rt, rt, Imm32(64 - size.value));
    434  }
    435 }
    436 
    437 void MacroAssemblerMIPS64::ma_dsbh(Register rd, Register rt) {
    438  as_dsbh(rd, rt);
    439 }
    440 
    441 void MacroAssemblerMIPS64::ma_dshd(Register rd, Register rt) {
    442  as_dshd(rd, rt);
    443 }
    444 
    445 void MacroAssemblerMIPS64::ma_dctz(Register rd, Register rs) {
    446  UseScratchRegisterScope temps(*this);
    447  Register scratch = temps.Acquire();
    448  Register scratch2 = temps.Acquire();
    449  ma_dnegu(scratch, rs);
    450  as_and(rd, scratch, rs);
    451  as_dclz(rd, rd);
    452  ma_dnegu(scratch2, rd);
    453  ma_daddu(scratch2, Imm32(0x3f));
    454 #ifdef MIPS64
    455  as_selnez(scratch2, scratch2, scratch);
    456  as_seleqz(rd, rd, scratch);
    457  as_or(rd, rd, scratch2);
    458 #else
    459  as_movn(rd, scratch2, scratch);
    460 #endif
    461 }
    462 
    463 // Arithmetic-based ops.
    464 
    465 // Add.
    466 void MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs, Imm32 imm) {
    467  if (Imm16::IsInSignedRange(imm.value)) {
    468    as_daddiu(rd, rs, imm.value);
    469  } else {
    470    UseScratchRegisterScope temps(*this);
    471    Register scratch = temps.Acquire();
    472    ma_li(scratch, imm);
    473    as_daddu(rd, rs, scratch);
    474  }
    475 }
    476 
    477 void MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs, ImmWord imm) {
    478  if (Imm16::IsInSignedRange(int64_t(imm.value))) {
    479    as_daddiu(rd, rs, imm.value);
    480  } else {
    481    UseScratchRegisterScope temps(*this);
    482    Register scratch = temps.Acquire();
    483    ma_li(scratch, imm);
    484    as_daddu(rd, rs, scratch);
    485  }
    486 }
    487 
    488 void MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs) {
    489  as_daddu(rd, rd, rs);
    490 }
    491 
    492 void MacroAssemblerMIPS64::ma_daddu(Register rd, Imm32 imm) {
    493  ma_daddu(rd, rd, imm);
    494 }
    495 
    496 void MacroAssemblerMIPS64::ma_add32TestOverflow(Register rd, Register rs,
    497                                                Register rt, Label* overflow) {
    498  UseScratchRegisterScope temps(*this);
    499  Register scratch2 = temps.Acquire();
    500  as_daddu(scratch2, rs, rt);
    501  as_addu(rd, rs, rt);
    502  ma_b(rd, scratch2, overflow, Assembler::NotEqual);
    503 }
    504 
    505 void MacroAssemblerMIPS64::ma_add32TestOverflow(Register rd, Register rs,
    506                                                Imm32 imm, Label* overflow) {
    507  // Check for signed range because of as_daddiu
    508  UseScratchRegisterScope temps(*this);
    509  if (Imm16::IsInSignedRange(imm.value)) {
    510    Register scratch2 = temps.Acquire();
    511    as_daddiu(scratch2, rs, imm.value);
    512    as_addiu(rd, rs, imm.value);
    513    ma_b(rd, scratch2, overflow, Assembler::NotEqual);
    514  } else {
    515    Register scratch = temps.Acquire();
    516    ma_li(scratch, imm);
    517    ma_add32TestOverflow(rd, rs, scratch, overflow);
    518  }
    519 }
    520 
    521 void MacroAssemblerMIPS64::ma_addPtrTestOverflow(Register rd, Register rs,
    522                                                 Register rt, Label* overflow) {
    523  UseScratchRegisterScope temps(*this);
    524  Register scratch2 = temps.Acquire();
    525  MOZ_ASSERT_IF(rd == rs, rs != rt);
    526  MOZ_ASSERT(rd != scratch2);
    527 
    528  if (rs == rt) {
    529    as_daddu(rd, rs, rs);
    530    as_xor(scratch2, rs, rd);
    531  } else {
    532    UseScratchRegisterScope temps(*this);
    533    Register scratch = temps.Acquire();
    534    MOZ_ASSERT(rs != scratch2);
    535    MOZ_ASSERT(rt != scratch2);
    536 
    537    // If the sign of rs and rt are different, no overflow
    538    as_xor(scratch2, rs, rt);
    539    as_nor(scratch2, scratch2, zero);
    540 
    541    // handle rd == rt
    542    ma_move(scratch, rt);
    543    as_daddu(rd, rs, rt);
    544    as_xor(scratch, rd, scratch);
    545    as_and(scratch2, scratch, scratch2);
    546  }
    547 
    548  ma_b(scratch2, zero, overflow, Assembler::LessThan);
    549 }
    550 
    551 void MacroAssemblerMIPS64::ma_addPtrTestOverflow(Register rd, Register rs,
    552                                                 Imm32 imm, Label* overflow) {
    553  ma_addPtrTestOverflow(rd, rs, ImmWord(imm.value), overflow);
    554 }
    555 
    556 void MacroAssemblerMIPS64::ma_addPtrTestOverflow(Register rd, Register rs,
    557                                                 ImmWord imm, Label* overflow) {
    558  UseScratchRegisterScope temps(*this);
    559  Register scratch2 = temps.Acquire();
    560  {
    561    UseScratchRegisterScope temps(*this);
    562    Register scratch = temps.Acquire();
    563 
    564    // If the sign of rs and rt are different, no overflow
    565    ma_li(scratch, imm);
    566    as_xor(scratch2, rs, scratch);
    567    as_nor(scratch2, scratch2, zero);
    568 
    569    as_daddu(rd, rs, scratch);
    570    as_xor(scratch, rd, scratch);
    571    as_and(scratch2, scratch, scratch2);
    572  }
    573  ma_b(scratch2, zero, overflow, Assembler::LessThan);
    574 }
    575 
    576 void MacroAssemblerMIPS64::ma_addPtrTestCarry(Condition cond, Register rd,
    577                                              Register rs, Register rt,
    578                                              Label* overflow) {
    579  UseScratchRegisterScope temps(*this);
    580  Register scratch2 = temps.Acquire();
    581  as_daddu(rd, rs, rt);
    582  as_sltu(scratch2, rd, rt);
    583  ma_b(scratch2, scratch2, overflow,
    584       cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
    585 }
    586 
    587 void MacroAssemblerMIPS64::ma_addPtrTestCarry(Condition cond, Register rd,
    588                                              Register rs, Imm32 imm,
    589                                              Label* overflow) {
    590  // Check for signed range because of as_daddiu
    591  UseScratchRegisterScope temps(*this);
    592  if (Imm16::IsInSignedRange(imm.value)) {
    593    Register scratch2 = temps.Acquire();
    594    as_daddiu(rd, rs, imm.value);
    595    as_sltiu(scratch2, rd, imm.value);
    596    ma_b(scratch2, scratch2, overflow,
    597         cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
    598  } else {
    599    Register scratch = temps.Acquire();
    600    ma_li(scratch, imm);
    601    ma_addPtrTestCarry(cond, rd, rs, scratch, overflow);
    602  }
    603 }
    604 
    605 void MacroAssemblerMIPS64::ma_addPtrTestCarry(Condition cond, Register rd,
    606                                              Register rs, ImmWord imm,
    607                                              Label* overflow) {
    608  // Check for signed range because of as_daddiu
    609  UseScratchRegisterScope temps(*this);
    610  if (Imm16::IsInSignedRange(int64_t(imm.value))) {
    611    Register scratch2 = temps.Acquire();
    612    as_daddiu(rd, rs, imm.value);
    613    as_sltiu(scratch2, rd, imm.value);
    614    ma_b(scratch2, scratch2, overflow,
    615         cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
    616  } else {
    617    Register scratch = temps.Acquire();
    618    ma_li(scratch, imm);
    619    ma_addPtrTestCarry(cond, rd, rs, scratch, overflow);
    620  }
    621 }
    622 
    623 void MacroAssemblerMIPS64::ma_addPtrTestSigned(Condition cond, Register rd,
    624                                               Register rj, Register rk,
    625                                               Label* taken) {
    626  MOZ_ASSERT(cond == Assembler::Signed || cond == Assembler::NotSigned);
    627 
    628  as_daddu(rd, rj, rk);
    629  ma_b(rd, rd, taken, cond);
    630 }
    631 
    632 void MacroAssemblerMIPS64::ma_addPtrTestSigned(Condition cond, Register rd,
    633                                               Register rj, Imm32 imm,
    634                                               Label* taken) {
    635  MOZ_ASSERT(cond == Assembler::Signed || cond == Assembler::NotSigned);
    636 
    637  ma_daddu(rd, rj, imm);
    638  ma_b(rd, rd, taken, cond);
    639 }
    640 
    641 void MacroAssemblerMIPS64::ma_addPtrTestSigned(Condition cond, Register rd,
    642                                               Register rj, ImmWord imm,
    643                                               Label* taken) {
    644  MOZ_ASSERT(cond == Assembler::Signed || cond == Assembler::NotSigned);
    645 
    646  UseScratchRegisterScope temps(*this);
    647  Register scratch2 = temps.Acquire();
    648  ma_li(scratch2, imm);
    649  ma_addPtrTestSigned(cond, rd, rj, scratch2, taken);
    650 }
    651 
    652 // Subtract.
    653 void MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs, Imm32 imm) {
    654  if (Imm16::IsInSignedRange(-imm.value)) {
    655    as_daddiu(rd, rs, -imm.value);
    656  } else {
    657    UseScratchRegisterScope temps(*this);
    658    Register scratch = temps.Acquire();
    659    ma_li(scratch, imm);
    660    as_dsubu(rd, rs, scratch);
    661  }
    662 }
    663 
    664 void MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs, ImmWord imm) {
    665  if (Imm16::IsInSignedRange(int64_t(-imm.value))) {
    666    as_daddiu(rd, rs, -imm.value);
    667  } else {
    668    UseScratchRegisterScope temps(*this);
    669    Register scratch = temps.Acquire();
    670    ma_li(scratch, imm);
    671    as_dsubu(rd, rs, scratch);
    672  }
    673 }
    674 
    675 void MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs) {
    676  as_dsubu(rd, rd, rs);
    677 }
    678 
    679 void MacroAssemblerMIPS64::ma_dsubu(Register rd, Imm32 imm) {
    680  ma_dsubu(rd, rd, imm);
    681 }
    682 
    683 void MacroAssemblerMIPS64::ma_sub32TestOverflow(Register rd, Register rs,
    684                                                Register rt, Label* overflow) {
    685  UseScratchRegisterScope temps(*this);
    686  Register scratch2 = temps.Acquire();
    687  as_dsubu(scratch2, rs, rt);
    688  as_subu(rd, rs, rt);
    689  ma_b(rd, scratch2, overflow, Assembler::NotEqual);
    690 }
    691 
    692 void MacroAssemblerMIPS64::ma_subPtrTestOverflow(Register rd, Register rs,
    693                                                 Register rt, Label* overflow) {
    694  UseScratchRegisterScope temps(*this);
    695  Register scratch2 = temps.Acquire();
    696  MOZ_ASSERT_IF(rs == rd, rs != rt);
    697  MOZ_ASSERT(rs != scratch2);
    698  MOZ_ASSERT(rt != scratch2);
    699  MOZ_ASSERT(rd != scratch2);
    700 
    701  Register rs_copy = rs;
    702 
    703  if (rs == rd) {
    704    ma_move(scratch2, rs);
    705    rs_copy = scratch2;
    706  }
    707 
    708  {
    709    UseScratchRegisterScope temps(*this);
    710    Register scratch = temps.Acquire();
    711    MOZ_ASSERT(rd != scratch);
    712 
    713    ma_move(scratch, rt);
    714    as_dsubu(rd, rs, rt);
    715    // If the sign of rs and rt are the same, no overflow
    716    as_xor(scratch, rs_copy, scratch);
    717    // Check if the sign of rd and rs are the same
    718    as_xor(scratch2, rd, rs_copy);
    719    as_and(scratch2, scratch2, scratch);
    720  }
    721 
    722  ma_b(scratch2, zero, overflow, Assembler::LessThan);
    723 }
    724 
    725 void MacroAssemblerMIPS64::ma_subPtrTestOverflow(Register rd, Register rs,
    726                                                 Imm32 imm, Label* overflow) {
    727  UseScratchRegisterScope temps(*this);
    728  Register scratch2 = temps.Acquire();
    729  MOZ_ASSERT(rs != scratch2);
    730  MOZ_ASSERT(rd != scratch2);
    731 
    732  Register rs_copy = rs;
    733 
    734  if (rs == rd) {
    735    ma_move(scratch2, rs);
    736    rs_copy = scratch2;
    737  }
    738 
    739  {
    740    UseScratchRegisterScope temps(*this);
    741    Register scratch = temps.Acquire();
    742    MOZ_ASSERT(rd != scratch);
    743 
    744    ma_li(scratch, imm);
    745    as_dsubu(rd, rs, scratch);
    746    // If the sign of rs and rt are the same, no overflow
    747    as_xor(scratch, rs_copy, scratch);
    748    // Check if the sign of rd and rs are the same
    749    as_xor(scratch2, rd, rs_copy);
    750    as_and(scratch2, scratch2, scratch);
    751  }
    752 
    753  ma_b(scratch2, zero, overflow, Assembler::LessThan);
    754 }
    755 
    756 void MacroAssemblerMIPS64::ma_dmulu(Register rd, Register rs, Register rt) {
    757 #ifdef MIPSR6
    758  as_dmulu(rd, rs, rt);
    759 #else
    760  as_dmultu(rs, rt);
    761  as_mflo(rd);
    762 #endif
    763 }
    764 
    765 void MacroAssemblerMIPS64::ma_dmulu(Register rd, Register rs, ImmWord imm) {
    766  UseScratchRegisterScope temps(*this);
    767  Register scratch = temps.Acquire();
    768  ma_li(scratch, imm);
    769  ma_dmulu(rd, rs, scratch);
    770 }
    771 
    772 void MacroAssemblerMIPS64::ma_mulPtrTestOverflow(Register rd, Register rs,
    773                                                 Register rt, Label* overflow) {
    774  UseScratchRegisterScope temps(*this);
    775  Register scratch = temps.Acquire();
    776  Register scratch2 = temps.Acquire();
    777 #ifdef MIPSR6
    778  if (rd == rs) {
    779    ma_move(scratch2, rs);
    780    rs = scratch2;
    781  }
    782  as_dmul(rd, rs, rt);
    783  as_dmuh(scratch2, rs, rt);
    784 #else
    785  as_dmult(rs, rt);
    786  as_mflo(rd);
    787  as_mfhi(scratch2);
    788 #endif
    789  as_dsra32(scratch, rd, 63);
    790  ma_b(scratch, scratch2, overflow, Assembler::NotEqual);
    791 }
    792 
    793 // Memory.
    794 FaultingCodeOffset MacroAssemblerMIPS64::ma_load(Register dest, Address address,
    795                                                 LoadStoreSize size,
    796                                                 LoadStoreExtension extension) {
    797  UseScratchRegisterScope temps(*this);
    798  int16_t encodedOffset;
    799  Register base;
    800  FaultingCodeOffset fco;
    801 
    802  if (isLoongson() && ZeroExtend != extension &&
    803      !Imm16::IsInSignedRange(address.offset)) {
    804    UseScratchRegisterScope temps(*this);
    805    Register scratch = temps.Acquire();
    806    ma_li(scratch, Imm32(address.offset));
    807    base = address.base;
    808 
    809    fco = FaultingCodeOffset(currentOffset());
    810    switch (size) {
    811      case SizeByte:
    812        as_gslbx(dest, base, scratch, 0);
    813        break;
    814      case SizeHalfWord:
    815        as_gslhx(dest, base, scratch, 0);
    816        break;
    817      case SizeWord:
    818        as_gslwx(dest, base, scratch, 0);
    819        break;
    820      case SizeDouble:
    821        as_gsldx(dest, base, scratch, 0);
    822        break;
    823      default:
    824        MOZ_CRASH("Invalid argument for ma_load");
    825    }
    826    return fco;
    827  }
    828 
    829  if (!Imm16::IsInSignedRange(address.offset)) {
    830    Register scratch = temps.Acquire();
    831    ma_li(scratch, Imm32(address.offset));
    832    as_daddu(scratch, address.base, scratch);
    833    base = scratch;
    834    encodedOffset = Imm16(0).encode();
    835  } else {
    836    encodedOffset = Imm16(address.offset).encode();
    837    base = address.base;
    838  }
    839 
    840  fco = FaultingCodeOffset(currentOffset());
    841  switch (size) {
    842    case SizeByte:
    843      if (ZeroExtend == extension) {
    844        as_lbu(dest, base, encodedOffset);
    845      } else {
    846        as_lb(dest, base, encodedOffset);
    847      }
    848      break;
    849    case SizeHalfWord:
    850      if (ZeroExtend == extension) {
    851        as_lhu(dest, base, encodedOffset);
    852      } else {
    853        as_lh(dest, base, encodedOffset);
    854      }
    855      break;
    856    case SizeWord:
    857      if (ZeroExtend == extension) {
    858        as_lwu(dest, base, encodedOffset);
    859      } else {
    860        as_lw(dest, base, encodedOffset);
    861      }
    862      break;
    863    case SizeDouble:
    864      as_ld(dest, base, encodedOffset);
    865      break;
    866    default:
    867      MOZ_CRASH("Invalid argument for ma_load");
    868  }
    869  return fco;
    870 }
    871 
    872 void MacroAssemblerMIPS64::ma_store(ImmWord imm, const BaseIndex& dest,
    873                                    LoadStoreSize size,
    874                                    LoadStoreExtension extension) {
    875  UseScratchRegisterScope temps(*this);
    876  Register scratch2 = temps.Acquire();
    877 
    878  // Make sure that scratch2 contains absolute address so that
    879  // offset is 0.
    880  asMasm().computeEffectiveAddress(dest, scratch2);
    881  // Scrach register is free now, use it for loading imm value
    882  Register scratch = temps.Acquire();
    883  ma_li(scratch, ImmWord(imm.value));
    884 
    885  // with offset=0 scratch will not be used in ma_store()
    886  // so we can use it as a parameter here
    887  ma_store(scratch, Address(scratch2, 0), size, extension);
    888 }
    889 
    890 void MacroAssemblerMIPS64::ma_store(ImmWord imm, Address address,
    891                                    LoadStoreSize size,
    892                                    LoadStoreExtension extension) {
    893  UseScratchRegisterScope temps(*this);
    894  Register scratch2 = temps.Acquire();
    895  ma_li(scratch2, imm);
    896  ma_store(scratch2, address, size, extension);
    897 }
    898 
    899 FaultingCodeOffset MacroAssemblerMIPS64::ma_store(
    900    Register data, Address address, LoadStoreSize size,
    901    LoadStoreExtension extension) {
    902  UseScratchRegisterScope temps(*this);
    903  int16_t encodedOffset;
    904  Register base;
    905  FaultingCodeOffset fco;
    906 
    907  if (isLoongson() && !Imm16::IsInSignedRange(address.offset)) {
    908    UseScratchRegisterScope temps(*this);
    909    Register scratch = temps.Acquire();
    910    ma_li(scratch, Imm32(address.offset));
    911    base = address.base;
    912 
    913    fco = FaultingCodeOffset(currentOffset());
    914    switch (size) {
    915      case SizeByte:
    916        as_gssbx(data, base, scratch, 0);
    917        break;
    918      case SizeHalfWord:
    919        as_gsshx(data, base, scratch, 0);
    920        break;
    921      case SizeWord:
    922        as_gsswx(data, base, scratch, 0);
    923        break;
    924      case SizeDouble:
    925        as_gssdx(data, base, scratch, 0);
    926        break;
    927      default:
    928        MOZ_CRASH("Invalid argument for ma_store");
    929    }
    930    return fco;
    931  }
    932 
    933  if (!Imm16::IsInSignedRange(address.offset)) {
    934    // assert on scratch ownership
    935    Register scratch = temps.Acquire();
    936    ma_li(scratch, Imm32(address.offset));
    937    as_daddu(scratch, address.base, scratch);
    938    base = scratch;
    939    encodedOffset = Imm16(0).encode();
    940  } else {
    941    encodedOffset = Imm16(address.offset).encode();
    942    base = address.base;
    943  }
    944 
    945  fco = FaultingCodeOffset(currentOffset());
    946  switch (size) {
    947    case SizeByte:
    948      as_sb(data, base, encodedOffset);
    949      break;
    950    case SizeHalfWord:
    951      as_sh(data, base, encodedOffset);
    952      break;
    953    case SizeWord:
    954      as_sw(data, base, encodedOffset);
    955      break;
    956    case SizeDouble:
    957      as_sd(data, base, encodedOffset);
    958      break;
    959    default:
    960      MOZ_CRASH("Invalid argument for ma_store");
    961  }
    962  return fco;
    963 }
    964 
    965 void MacroAssemblerMIPS64Compat::computeScaledAddress32(
    966    const BaseIndex& address, Register dest) {
    967  Register base = address.base;
    968  Register index = address.index;
    969  int32_t shift = Imm32::ShiftOf(address.scale).value;
    970  if (shift && base == zero) {
    971    MOZ_ASSERT(shift <= 4);
    972    ma_sll(dest, index, Imm32(shift));
    973  } else if (shift) {
    974    UseScratchRegisterScope temps(*this);
    975    Register tmp = temps.Acquire();
    976    MOZ_ASSERT(shift <= 4);
    977    ma_sll(tmp, index, Imm32(shift));
    978    as_addu(dest, base, tmp);
    979  } else {
    980    as_addu(dest, base, index);
    981  }
    982 }
    983 
    984 void MacroAssemblerMIPS64Compat::computeScaledAddress(const BaseIndex& address,
    985                                                      Register dest) {
    986  int32_t shift = Imm32::ShiftOf(address.scale).value;
    987  if (shift) {
    988    UseScratchRegisterScope temps(*this);
    989    Register scratch = temps.Acquire();
    990    ma_dsll(scratch, address.index, Imm32(shift));
    991    as_daddu(dest, address.base, scratch);
    992  } else {
    993    as_daddu(dest, address.base, address.index);
    994  }
    995 }
    996 
    997 void MacroAssemblerMIPS64Compat::computeEffectiveAddress(
    998    const BaseIndex& address, Register dest) {
    999  computeScaledAddress(address, dest);
   1000  if (address.offset) {
   1001    asMasm().addPtr(Imm32(address.offset), dest);
   1002  }
   1003 }
   1004 
   1005 // Shortcut for when we know we're transferring 32 bits of data.
   1006 void MacroAssemblerMIPS64::ma_pop(Register r) {
   1007  as_ld(r, StackPointer, 0);
   1008  as_daddiu(StackPointer, StackPointer, sizeof(intptr_t));
   1009 }
   1010 
   1011 void MacroAssemblerMIPS64::ma_push(Register r) {
   1012  UseScratchRegisterScope temps(*this);
   1013  if (r == sp) {
   1014    Register scratch = temps.Acquire();
   1015    // Pushing sp requires one more instruction.
   1016    ma_move(scratch, sp);
   1017    r = scratch;
   1018  }
   1019 
   1020  as_daddiu(StackPointer, StackPointer, -int32_t(sizeof(intptr_t)));
   1021  as_sd(r, StackPointer, 0);
   1022 }
   1023 
   1024 // Branches when done from within mips-specific code.
   1025 void MacroAssemblerMIPS64::ma_b(Register lhs, ImmWord imm, Label* label,
   1026                                Condition c, JumpKind jumpKind) {
   1027  if (imm.value <= INT32_MAX) {
   1028    ma_b(lhs, Imm32(uint32_t(imm.value)), label, c, jumpKind);
   1029  } else {
   1030    UseScratchRegisterScope temps(*this);
   1031    Register scratch = temps.Acquire();
   1032    ma_li(scratch, imm);
   1033    ma_b(lhs, scratch, label, c, jumpKind);
   1034  }
   1035 }
   1036 
   1037 void MacroAssemblerMIPS64::ma_b(Register lhs, Address addr, Label* label,
   1038                                Condition c, JumpKind jumpKind) {
   1039  UseScratchRegisterScope temps(*this);
   1040  Register scratch = temps.Acquire();
   1041  ma_load(scratch, addr, SizeDouble);
   1042  ma_b(lhs, scratch, label, c, jumpKind);
   1043 }
   1044 
   1045 void MacroAssemblerMIPS64::ma_b(Address addr, Imm32 imm, Label* label,
   1046                                Condition c, JumpKind jumpKind) {
   1047  UseScratchRegisterScope temps(*this);
   1048  Register scratch2 = temps.Acquire();
   1049  ma_load(scratch2, addr, SizeDouble);
   1050  ma_b(scratch2, imm, label, c, jumpKind);
   1051 }
   1052 
   1053 void MacroAssemblerMIPS64::ma_b(Address addr, ImmGCPtr imm, Label* label,
   1054                                Condition c, JumpKind jumpKind) {
   1055  UseScratchRegisterScope temps(*this);
   1056  Register scratch2 = temps.Acquire();
   1057  ma_load(scratch2, addr, SizeDouble);
   1058  ma_b(scratch2, imm, label, c, jumpKind);
   1059 }
   1060 
   1061 void MacroAssemblerMIPS64::ma_bal(Label* label, DelaySlotFill delaySlotFill) {
   1062  spew("branch .Llabel %p\n", label);
   1063  if (label->bound()) {
   1064    UseScratchRegisterScope temps(*this);
   1065    // Generate the long jump for calls because return address has to be
   1066    // the address after the reserved block.
   1067    addLongJump(nextOffset(), BufferOffset(label->offset()));
   1068    Register scratch = temps.Acquire();
   1069    ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
   1070    as_jalr(scratch);
   1071    if (delaySlotFill == FillDelaySlot) {
   1072      as_nop();
   1073    }
   1074    return;
   1075  }
   1076 
   1077  // Second word holds a pointer to the next branch in label's chain.
   1078  uint32_t nextInChain =
   1079      label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
   1080 
   1081  // Make the whole branch continous in the buffer. The '6'
   1082  // instructions are writing at below (contain delay slot).
   1083  m_buffer.ensureSpace(6 * sizeof(uint32_t));
   1084 
   1085  spew("bal .Llabel %p\n", label);
   1086  BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
   1087  writeInst(nextInChain);
   1088  if (!oom()) {
   1089    label->use(bo.getOffset());
   1090  }
   1091  // Leave space for long jump.
   1092  as_nop();
   1093  as_nop();
   1094  as_nop();
   1095  if (delaySlotFill == FillDelaySlot) {
   1096    as_nop();
   1097  }
   1098 }
   1099 
   1100 void MacroAssemblerMIPS64::branchWithCode(InstImm code, Label* label,
   1101                                          JumpKind jumpKind,
   1102                                          Register branchCodeScratch) {
   1103  // simply output the pointer of one label as its id,
   1104  // notice that after one label destructor, the pointer will be reused.
   1105  spew("branch .Llabel %p", label);
   1106  MOZ_ASSERT(code.encode() !=
   1107             InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
   1108  InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
   1109 
   1110  if (label->bound()) {
   1111    int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
   1112 
   1113    if (BOffImm16::IsInRange(offset)) {
   1114      jumpKind = ShortJump;
   1115    }
   1116 
   1117    if (jumpKind == ShortJump) {
   1118      MOZ_ASSERT(BOffImm16::IsInRange(offset));
   1119      code.setBOffImm16(BOffImm16(offset));
   1120 #ifdef JS_JITSPEW
   1121      decodeBranchInstAndSpew(code);
   1122 #endif
   1123      writeInst(code.encode());
   1124      as_nop();
   1125      return;
   1126    }
   1127 
   1128    if (code.encode() == inst_beq.encode()) {
   1129      UseScratchRegisterScope temps(*this);
   1130      // Handle long jump
   1131      addLongJump(nextOffset(), BufferOffset(label->offset()));
   1132      Register scratch = branchCodeScratch;
   1133      if (scratch == InvalidReg) {
   1134        // Request a new scratch register if |branchCodeScratch| is invalid.
   1135        // NB: |branchCodeScratch| must not be used before encoding |code|.
   1136        scratch = temps.Acquire();
   1137      }
   1138      ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
   1139      as_jr(scratch);
   1140      as_nop();
   1141      return;
   1142    }
   1143 
   1144    // Handle long conditional branch, the target offset is based on self,
   1145    // point to next instruction of nop at below.
   1146    spew("invert branch .Llabel %p", label);
   1147    InstImm code_r = invertBranch(code, BOffImm16(7 * sizeof(uint32_t)));
   1148 #ifdef JS_JITSPEW
   1149    decodeBranchInstAndSpew(code_r);
   1150 #endif
   1151    writeInst(code_r.encode());
   1152    UseScratchRegisterScope temps(*this);
   1153    // No need for a "nop" here because we can clobber scratch.
   1154    addLongJump(nextOffset(), BufferOffset(label->offset()));
   1155    Register scratch = branchCodeScratch;
   1156    if (scratch == InvalidReg) {
   1157      // Request a new scratch register if |branchCodeScratch| is invalid.
   1158      // NB: |branchCodeScratch| must not be used before encoding |code|.
   1159      scratch = temps.Acquire();
   1160    }
   1161    ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
   1162    as_jr(scratch);
   1163    as_nop();
   1164    return;
   1165  }
   1166 
   1167  // Generate open jump and link it to a label.
   1168 
   1169  // Second word holds a pointer to the next branch in label's chain.
   1170  uint32_t nextInChain =
   1171      label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
   1172 
   1173  if (jumpKind == ShortJump) {
   1174    // Make the whole branch continous in the buffer.
   1175    m_buffer.ensureSpace(2 * sizeof(uint32_t));
   1176 
   1177    // Indicate that this is short jump with offset 4.
   1178    code.setBOffImm16(BOffImm16(4));
   1179 #ifdef JS_JITSPEW
   1180    decodeBranchInstAndSpew(code);
   1181 #endif
   1182    BufferOffset bo = writeInst(code.encode());
   1183    writeInst(nextInChain);
   1184    if (!oom()) {
   1185      label->use(bo.getOffset());
   1186    }
   1187    return;
   1188  }
   1189 
   1190  bool conditional = code.encode() != inst_beq.encode();
   1191 
   1192  // Make the whole branch continous in the buffer. The '7'
   1193  // instructions are writing at below (contain conditional nop).
   1194  m_buffer.ensureSpace(7 * sizeof(uint32_t));
   1195 
   1196 #ifdef JS_JITSPEW
   1197  decodeBranchInstAndSpew(code);
   1198 #endif
   1199  BufferOffset bo = writeInst(code.encode());
   1200  writeInst(nextInChain);
   1201  if (!oom()) {
   1202    label->use(bo.getOffset());
   1203  }
   1204  // Leave space for potential long jump.
   1205  as_nop();
   1206  as_nop();
   1207  as_nop();
   1208  as_nop();
   1209  if (conditional) {
   1210    as_nop();
   1211  }
   1212 }
   1213 
   1214 void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmWord imm,
   1215                                      Condition c) {
   1216  if (imm.value <= INT32_MAX) {
   1217    ma_cmp_set(rd, rs, Imm32(uint32_t(imm.value)), c);
   1218  } else {
   1219    UseScratchRegisterScope temps(*this);
   1220    Register scratch = temps.Acquire();
   1221    ma_li(scratch, imm);
   1222    ma_cmp_set(rd, rs, scratch, c);
   1223  }
   1224 }
   1225 
   1226 void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmPtr imm,
   1227                                      Condition c) {
   1228  ma_cmp_set(rd, rs, ImmWord(uintptr_t(imm.value)), c);
   1229 }
   1230 
   1231 void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmGCPtr imm,
   1232                                      Condition c) {
   1233  UseScratchRegisterScope temps(*this);
   1234  Register scratch = temps.Acquire();
   1235  ma_li(scratch, imm);
   1236  ma_cmp_set(rd, rs, scratch, c);
   1237 }
   1238 
   1239 void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Address address, Register rt,
   1240                                      Condition c) {
   1241  UseScratchRegisterScope temps(*this);
   1242  Register scratch2 = temps.Acquire();
   1243  ma_load(scratch2, address, SizeDouble);
   1244  ma_cmp_set(rd, scratch2, rt, c);
   1245 }
   1246 
   1247 void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Address address, ImmWord imm,
   1248                                      Condition c) {
   1249  UseScratchRegisterScope temps(*this);
   1250  Register scratch2 = temps.Acquire();
   1251  ma_load(scratch2, address, SizeDouble);
   1252  ma_cmp_set(rd, scratch2, imm, c);
   1253 }
   1254 
   1255 void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Address address, Imm32 imm,
   1256                                      Condition c) {
   1257  UseScratchRegisterScope temps(*this);
   1258  Register scratch2 = temps.Acquire();
   1259  ma_load(scratch2, address, SizeWord, SignExtend);
   1260  ma_cmp_set(rd, scratch2, imm, c);
   1261 }
   1262 
   1263 // fp instructions
   1264 void MacroAssemblerMIPS64::ma_lid(FloatRegister dest, double value) {
   1265  ImmWord imm(mozilla::BitwiseCast<uint64_t>(value));
   1266 
   1267  if (imm.value != 0) {
   1268    UseScratchRegisterScope temps(*this);
   1269    Register scratch = temps.Acquire();
   1270    ma_li(scratch, imm);
   1271    moveToDouble(scratch, dest);
   1272  } else {
   1273    moveToDouble(zero, dest);
   1274  }
   1275 }
   1276 
   1277 void MacroAssemblerMIPS64::ma_mv(FloatRegister src, ValueOperand dest) {
   1278  as_dmfc1(dest.valueReg(), src);
   1279 }
   1280 
   1281 void MacroAssemblerMIPS64::ma_mv(ValueOperand src, FloatRegister dest) {
   1282  as_dmtc1(src.valueReg(), dest);
   1283 }
   1284 
   1285 FaultingCodeOffset MacroAssemblerMIPS64::ma_ls(FloatRegister ft,
   1286                                               Address address) {
   1287  UseScratchRegisterScope temps(*this);
   1288  FaultingCodeOffset fco;
   1289  if (Imm16::IsInSignedRange(address.offset)) {
   1290    fco = FaultingCodeOffset(currentOffset());
   1291    as_lwc1(ft, address.base, address.offset);
   1292  } else {
   1293    Register scratch = temps.Acquire();
   1294    MOZ_ASSERT(address.base != scratch);
   1295    ma_li(scratch, Imm32(address.offset));
   1296    if (isLoongson()) {
   1297      fco = FaultingCodeOffset(currentOffset());
   1298      as_gslsx(ft, address.base, scratch, 0);
   1299    } else {
   1300      as_daddu(scratch, address.base, scratch);
   1301      fco = FaultingCodeOffset(currentOffset());
   1302      as_lwc1(ft, scratch, 0);
   1303    }
   1304  }
   1305  return fco;
   1306 }
   1307 
   1308 FaultingCodeOffset MacroAssemblerMIPS64::ma_ld(FloatRegister ft,
   1309                                               Address address) {
   1310  UseScratchRegisterScope temps(*this);
   1311  FaultingCodeOffset fco;
   1312  if (Imm16::IsInSignedRange(address.offset)) {
   1313    fco = FaultingCodeOffset(currentOffset());
   1314    as_ldc1(ft, address.base, address.offset);
   1315  } else {
   1316    Register scratch = temps.Acquire();
   1317    MOZ_ASSERT(address.base != scratch);
   1318    ma_li(scratch, Imm32(address.offset));
   1319    if (isLoongson()) {
   1320      fco = FaultingCodeOffset(currentOffset());
   1321      as_gsldx(ft, address.base, scratch, 0);
   1322    } else {
   1323      as_daddu(scratch, address.base, scratch);
   1324      fco = FaultingCodeOffset(currentOffset());
   1325      as_ldc1(ft, scratch, 0);
   1326    }
   1327  }
   1328  return fco;
   1329 }
   1330 
   1331 FaultingCodeOffset MacroAssemblerMIPS64::ma_sd(FloatRegister ft,
   1332                                               Address address) {
   1333  UseScratchRegisterScope temps(*this);
   1334  FaultingCodeOffset fco;
   1335  if (Imm16::IsInSignedRange(address.offset)) {
   1336    fco = FaultingCodeOffset(currentOffset());
   1337    as_sdc1(ft, address.base, address.offset);
   1338  } else {
   1339    Register scratch = temps.Acquire();
   1340    MOZ_ASSERT(address.base != scratch);
   1341    ma_li(scratch, Imm32(address.offset));
   1342    if (isLoongson()) {
   1343      fco = FaultingCodeOffset(currentOffset());
   1344      as_gssdx(ft, address.base, scratch, 0);
   1345    } else {
   1346      as_daddu(scratch, address.base, scratch);
   1347      fco = FaultingCodeOffset(currentOffset());
   1348      as_sdc1(ft, scratch, 0);
   1349    }
   1350  }
   1351  return fco;
   1352 }
   1353 
   1354 FaultingCodeOffset MacroAssemblerMIPS64::ma_ss(FloatRegister ft,
   1355                                               Address address) {
   1356  UseScratchRegisterScope temps(*this);
   1357  FaultingCodeOffset fco;
   1358  if (Imm16::IsInSignedRange(address.offset)) {
   1359    fco = FaultingCodeOffset(currentOffset());
   1360    as_swc1(ft, address.base, address.offset);
   1361  } else {
   1362    Register scratch = temps.Acquire();
   1363    MOZ_ASSERT(address.base != scratch);
   1364    ma_li(scratch, Imm32(address.offset));
   1365    if (isLoongson()) {
   1366      fco = FaultingCodeOffset(currentOffset());
   1367      as_gsssx(ft, address.base, scratch, 0);
   1368    } else {
   1369      as_daddu(scratch, address.base, scratch);
   1370      fco = FaultingCodeOffset(currentOffset());
   1371      as_swc1(ft, scratch, 0);
   1372    }
   1373  }
   1374  return fco;
   1375 }
   1376 
   1377 void MacroAssemblerMIPS64::ma_pop(FloatRegister f) {
   1378  if (f.isDouble()) {
   1379    as_ldc1(f, StackPointer, 0);
   1380  } else {
   1381    MOZ_ASSERT(f.isSingle(), "simd128 not supported");
   1382    as_lwc1(f, StackPointer, 0);
   1383  }
   1384  as_daddiu(StackPointer, StackPointer, sizeof(double));
   1385 }
   1386 
   1387 void MacroAssemblerMIPS64::ma_push(FloatRegister f) {
   1388  as_daddiu(StackPointer, StackPointer, -int32_t(sizeof(double)));
   1389  if (f.isDouble()) {
   1390    as_sdc1(f, StackPointer, 0);
   1391  } else {
   1392    MOZ_ASSERT(f.isSingle(), "simd128 not supported");
   1393    as_swc1(f, StackPointer, 0);
   1394  }
   1395 }
   1396 
   1397 bool MacroAssemblerMIPS64Compat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
   1398  asMasm().Push(FrameDescriptor(FrameType::IonJS));  // descriptor_
   1399  asMasm().Push(ImmPtr(fakeReturnAddr));
   1400  asMasm().Push(FramePointer);
   1401  return true;
   1402 }
   1403 
   1404 void MacroAssemblerMIPS64Compat::move32(Imm32 imm, Register dest) {
   1405  ma_li(dest, imm);
   1406 }
   1407 
   1408 void MacroAssemblerMIPS64Compat::move32(Register src, Register dest) {
   1409  ma_sll(dest, src, Imm32(0));
   1410 }
   1411 
   1412 void MacroAssemblerMIPS64Compat::movePtr(Register src, Register dest) {
   1413  ma_move(dest, src);
   1414 }
   1415 void MacroAssemblerMIPS64Compat::movePtr(ImmWord imm, Register dest) {
   1416  ma_li(dest, imm);
   1417 }
   1418 
   1419 void MacroAssemblerMIPS64Compat::movePtr(ImmGCPtr imm, Register dest) {
   1420  ma_li(dest, imm);
   1421 }
   1422 
   1423 void MacroAssemblerMIPS64Compat::movePtr(ImmPtr imm, Register dest) {
   1424  movePtr(ImmWord(uintptr_t(imm.value)), dest);
   1425 }
   1426 void MacroAssemblerMIPS64Compat::movePtr(wasm::SymbolicAddress imm,
   1427                                         Register dest) {
   1428  append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
   1429  ma_liPatchable(dest, ImmWord(-1));
   1430 }
   1431 
   1432 FaultingCodeOffset MacroAssemblerMIPS64Compat::load8ZeroExtend(
   1433    const Address& address, Register dest) {
   1434  return ma_load(dest, address, SizeByte, ZeroExtend);
   1435 }
   1436 
   1437 FaultingCodeOffset MacroAssemblerMIPS64Compat::load8ZeroExtend(
   1438    const BaseIndex& src, Register dest) {
   1439  return ma_load(dest, src, SizeByte, ZeroExtend);
   1440 }
   1441 
   1442 FaultingCodeOffset MacroAssemblerMIPS64Compat::load8SignExtend(
   1443    const Address& address, Register dest) {
   1444  return ma_load(dest, address, SizeByte, SignExtend);
   1445 }
   1446 
   1447 FaultingCodeOffset MacroAssemblerMIPS64Compat::load8SignExtend(
   1448    const BaseIndex& src, Register dest) {
   1449  return ma_load(dest, src, SizeByte, SignExtend);
   1450 }
   1451 
   1452 FaultingCodeOffset MacroAssemblerMIPS64Compat::load16ZeroExtend(
   1453    const Address& address, Register dest) {
   1454  return ma_load(dest, address, SizeHalfWord, ZeroExtend);
   1455 }
   1456 
   1457 FaultingCodeOffset MacroAssemblerMIPS64Compat::load16ZeroExtend(
   1458    const BaseIndex& src, Register dest) {
   1459  return ma_load(dest, src, SizeHalfWord, ZeroExtend);
   1460 }
   1461 
   1462 FaultingCodeOffset MacroAssemblerMIPS64Compat::load16SignExtend(
   1463    const Address& address, Register dest) {
   1464  return ma_load(dest, address, SizeHalfWord, SignExtend);
   1465 }
   1466 
   1467 FaultingCodeOffset MacroAssemblerMIPS64Compat::load16SignExtend(
   1468    const BaseIndex& src, Register dest) {
   1469  return ma_load(dest, src, SizeHalfWord, SignExtend);
   1470 }
   1471 
   1472 FaultingCodeOffset MacroAssemblerMIPS64Compat::load32(const Address& address,
   1473                                                      Register dest) {
   1474  return ma_load(dest, address, SizeWord);
   1475 }
   1476 
   1477 FaultingCodeOffset MacroAssemblerMIPS64Compat::load32(const BaseIndex& address,
   1478                                                      Register dest) {
   1479  return ma_load(dest, address, SizeWord);
   1480 }
   1481 
   1482 void MacroAssemblerMIPS64Compat::load32(AbsoluteAddress address,
   1483                                        Register dest) {
   1484  UseScratchRegisterScope temps(*this);
   1485  Register scratch = temps.Acquire();
   1486  movePtr(ImmPtr(address.addr), scratch);
   1487  load32(Address(scratch, 0), dest);
   1488 }
   1489 
   1490 void MacroAssemblerMIPS64Compat::load32(wasm::SymbolicAddress address,
   1491                                        Register dest) {
   1492  UseScratchRegisterScope temps(*this);
   1493  Register scratch = temps.Acquire();
   1494  movePtr(address, scratch);
   1495  load32(Address(scratch, 0), dest);
   1496 }
   1497 
   1498 FaultingCodeOffset MacroAssemblerMIPS64Compat::loadPtr(const Address& address,
   1499                                                       Register dest) {
   1500  return ma_load(dest, address, SizeDouble);
   1501 }
   1502 
   1503 FaultingCodeOffset MacroAssemblerMIPS64Compat::loadPtr(const BaseIndex& src,
   1504                                                       Register dest) {
   1505  return ma_load(dest, src, SizeDouble);
   1506 }
   1507 
   1508 void MacroAssemblerMIPS64Compat::loadPtr(AbsoluteAddress address,
   1509                                         Register dest) {
   1510  UseScratchRegisterScope temps(*this);
   1511  Register scratch = temps.Acquire();
   1512  movePtr(ImmPtr(address.addr), scratch);
   1513  loadPtr(Address(scratch, 0), dest);
   1514 }
   1515 
   1516 void MacroAssemblerMIPS64Compat::loadPtr(wasm::SymbolicAddress address,
   1517                                         Register dest) {
   1518  UseScratchRegisterScope temps(*this);
   1519  Register scratch = temps.Acquire();
   1520  movePtr(address, scratch);
   1521  loadPtr(Address(scratch, 0), dest);
   1522 }
   1523 
   1524 void MacroAssemblerMIPS64Compat::loadPrivate(const Address& address,
   1525                                             Register dest) {
   1526  loadPtr(address, dest);
   1527 }
   1528 
   1529 void MacroAssemblerMIPS64Compat::loadUnalignedDouble(
   1530    const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
   1531    FloatRegister dest) {
   1532  UseScratchRegisterScope temps(*this);
   1533  Register scratch2 = temps.Acquire();
   1534  computeScaledAddress(src, scratch2);
   1535  BufferOffset load;
   1536  if (Imm16::IsInSignedRange(src.offset) &&
   1537      Imm16::IsInSignedRange(src.offset + 7)) {
   1538    load = as_ldl(temp, scratch2, src.offset + 7);
   1539    as_ldr(temp, scratch2, src.offset);
   1540  } else {
   1541    Register scratch = temps.Acquire();
   1542    ma_li(scratch, Imm32(src.offset));
   1543    as_daddu(scratch, scratch2, scratch);
   1544    load = as_ldl(temp, scratch, 7);
   1545    as_ldr(temp, scratch, 0);
   1546  }
   1547  append(access, wasm::TrapMachineInsnForLoad(Scalar::byteSize(access.type())),
   1548         FaultingCodeOffset(load.getOffset()));
   1549  moveToDouble(temp, dest);
   1550 }
   1551 
   1552 void MacroAssemblerMIPS64Compat::loadUnalignedFloat32(
   1553    const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
   1554    FloatRegister dest) {
   1555  UseScratchRegisterScope temps(*this);
   1556  Register scratch2 = temps.Acquire();
   1557  computeScaledAddress(src, scratch2);
   1558  BufferOffset load;
   1559  if (Imm16::IsInSignedRange(src.offset) &&
   1560      Imm16::IsInSignedRange(src.offset + 3)) {
   1561    load = as_lwl(temp, scratch2, src.offset + 3);
   1562    as_lwr(temp, scratch2, src.offset);
   1563  } else {
   1564    Register scratch = temps.Acquire();
   1565    ma_li(scratch, Imm32(src.offset));
   1566    as_daddu(scratch, scratch2, scratch);
   1567    load = as_lwl(temp, scratch, 3);
   1568    as_lwr(temp, scratch, 0);
   1569  }
   1570  append(access, wasm::TrapMachineInsnForLoad(Scalar::byteSize(access.type())),
   1571         FaultingCodeOffset(load.getOffset()));
   1572  moveToFloat32(temp, dest);
   1573 }
   1574 
   1575 void MacroAssemblerMIPS64Compat::store8(Imm32 imm, const Address& address) {
   1576  UseScratchRegisterScope temps(*this);
   1577  Register scratch2 = temps.Acquire();
   1578  ma_li(scratch2, imm);
   1579  ma_store(scratch2, address, SizeByte);
   1580 }
   1581 
   1582 FaultingCodeOffset MacroAssemblerMIPS64Compat::store8(Register src,
   1583                                                      const Address& address) {
   1584  return ma_store(src, address, SizeByte);
   1585 }
   1586 
   1587 void MacroAssemblerMIPS64Compat::store8(Imm32 imm, const BaseIndex& dest) {
   1588  ma_store(imm, dest, SizeByte);
   1589 }
   1590 
   1591 FaultingCodeOffset MacroAssemblerMIPS64Compat::store8(Register src,
   1592                                                      const BaseIndex& dest) {
   1593  return ma_store(src, dest, SizeByte);
   1594 }
   1595 
   1596 void MacroAssemblerMIPS64Compat::store16(Imm32 imm, const Address& address) {
   1597  UseScratchRegisterScope temps(*this);
   1598  Register scratch2 = temps.Acquire();
   1599  ma_li(scratch2, imm);
   1600  ma_store(scratch2, address, SizeHalfWord);
   1601 }
   1602 
   1603 FaultingCodeOffset MacroAssemblerMIPS64Compat::store16(Register src,
   1604                                                       const Address& address) {
   1605  return ma_store(src, address, SizeHalfWord);
   1606 }
   1607 
   1608 void MacroAssemblerMIPS64Compat::store16(Imm32 imm, const BaseIndex& dest) {
   1609  ma_store(imm, dest, SizeHalfWord);
   1610 }
   1611 
   1612 FaultingCodeOffset MacroAssemblerMIPS64Compat::store16(
   1613    Register src, const BaseIndex& address) {
   1614  return ma_store(src, address, SizeHalfWord);
   1615 }
   1616 
   1617 void MacroAssemblerMIPS64Compat::store32(Register src,
   1618                                         AbsoluteAddress address) {
   1619  UseScratchRegisterScope temps(*this);
   1620  Register scratch = temps.Acquire();
   1621  movePtr(ImmPtr(address.addr), scratch);
   1622  store32(src, Address(scratch, 0));
   1623 }
   1624 
   1625 FaultingCodeOffset MacroAssemblerMIPS64Compat::store32(Register src,
   1626                                                       const Address& address) {
   1627  return ma_store(src, address, SizeWord);
   1628 }
   1629 
   1630 void MacroAssemblerMIPS64Compat::store32(Imm32 src, const Address& address) {
   1631  UseScratchRegisterScope temps(*this);
   1632  Register scratch2 = temps.Acquire();
   1633  move32(src, scratch2);
   1634  ma_store(scratch2, address, SizeWord);
   1635 }
   1636 
   1637 void MacroAssemblerMIPS64Compat::store32(Imm32 imm, const BaseIndex& dest) {
   1638  ma_store(imm, dest, SizeWord);
   1639 }
   1640 
   1641 FaultingCodeOffset MacroAssemblerMIPS64Compat::store32(Register src,
   1642                                                       const BaseIndex& dest) {
   1643  return ma_store(src, dest, SizeWord);
   1644 }
   1645 
   1646 template <typename T>
   1647 void MacroAssemblerMIPS64Compat::storePtr(ImmWord imm, T address) {
   1648  ma_store(imm, address, SizeDouble);
   1649 }
   1650 
   1651 template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmWord imm,
   1652                                                            Address address);
   1653 template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(
   1654    ImmWord imm, BaseIndex address);
   1655 
   1656 template <typename T>
   1657 void MacroAssemblerMIPS64Compat::storePtr(ImmPtr imm, T address) {
   1658  storePtr(ImmWord(uintptr_t(imm.value)), address);
   1659 }
   1660 
   1661 template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmPtr imm,
   1662                                                            Address address);
   1663 template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(
   1664    ImmPtr imm, BaseIndex address);
   1665 
   1666 template <typename T>
   1667 void MacroAssemblerMIPS64Compat::storePtr(ImmGCPtr imm, T address) {
   1668  UseScratchRegisterScope temps(*this);
   1669  Register scratch2 = temps.Acquire();
   1670  movePtr(imm, scratch2);
   1671  storePtr(scratch2, address);
   1672 }
   1673 
   1674 template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmGCPtr imm,
   1675                                                            Address address);
   1676 template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(
   1677    ImmGCPtr imm, BaseIndex address);
   1678 
   1679 FaultingCodeOffset MacroAssemblerMIPS64Compat::storePtr(
   1680    Register src, const Address& address) {
   1681  return ma_store(src, address, SizeDouble);
   1682 }
   1683 
   1684 FaultingCodeOffset MacroAssemblerMIPS64Compat::storePtr(
   1685    Register src, const BaseIndex& address) {
   1686  return ma_store(src, address, SizeDouble);
   1687 }
   1688 
   1689 void MacroAssemblerMIPS64Compat::storePtr(Register src, AbsoluteAddress dest) {
   1690  UseScratchRegisterScope temps(*this);
   1691  Register scratch = temps.Acquire();
   1692  movePtr(ImmPtr(dest.addr), scratch);
   1693  storePtr(src, Address(scratch, 0));
   1694 }
   1695 
   1696 void MacroAssemblerMIPS64Compat::storeUnalignedFloat32(
   1697    const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
   1698    const BaseIndex& dest) {
   1699  UseScratchRegisterScope temps(*this);
   1700  Register scratch2 = temps.Acquire();
   1701  computeScaledAddress(dest, scratch2);
   1702  moveFromFloat32(src, temp);
   1703  BufferOffset store;
   1704  if (Imm16::IsInSignedRange(dest.offset) &&
   1705      Imm16::IsInSignedRange(dest.offset + 3)) {
   1706    store = as_swl(temp, scratch2, dest.offset + 3);
   1707    as_swr(temp, scratch2, dest.offset);
   1708  } else {
   1709    Register scratch = temps.Acquire();
   1710    ma_li(scratch, Imm32(dest.offset));
   1711    as_daddu(scratch, scratch2, scratch);
   1712    store = as_swl(temp, scratch, 3);
   1713    as_swr(temp, scratch, 0);
   1714  }
   1715  append(access, wasm::TrapMachineInsnForStore(Scalar::byteSize(access.type())),
   1716         FaultingCodeOffset(store.getOffset()));
   1717 }
   1718 
   1719 void MacroAssemblerMIPS64Compat::storeUnalignedDouble(
   1720    const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
   1721    const BaseIndex& dest) {
   1722  UseScratchRegisterScope temps(*this);
   1723  Register scratch2 = temps.Acquire();
   1724  computeScaledAddress(dest, scratch2);
   1725  moveFromDouble(src, temp);
   1726 
   1727  BufferOffset store;
   1728  if (Imm16::IsInSignedRange(dest.offset) &&
   1729      Imm16::IsInSignedRange(dest.offset + 7)) {
   1730    store = as_sdl(temp, scratch2, dest.offset + 7);
   1731    as_sdr(temp, scratch2, dest.offset);
   1732  } else {
   1733    Register scratch = temps.Acquire();
   1734    ma_li(scratch, Imm32(dest.offset));
   1735    as_daddu(scratch, scratch2, scratch);
   1736    store = as_sdl(temp, scratch, 7);
   1737    as_sdr(temp, scratch, 0);
   1738  }
   1739  append(access, wasm::TrapMachineInsnForStore(Scalar::byteSize(access.type())),
   1740         FaultingCodeOffset(store.getOffset()));
   1741 }
   1742 
   1743 void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
   1744  UseScratchRegisterScope temps(*this);
   1745  Register scratch = temps.Acquire();
   1746  Register scratch2 = temps.Acquire();
   1747  as_roundwd(ScratchDoubleReg, input);
   1748  ma_li(scratch, Imm32(255));
   1749  as_mfc1(output, ScratchDoubleReg);
   1750 #ifdef MIPSR6
   1751  as_slti(scratch2, output, 0);
   1752  as_seleqz(output, output, scratch2);
   1753  as_sltiu(scratch2, output, 255);
   1754  as_selnez(output, output, scratch2);
   1755  as_seleqz(scratch, scratch, scratch2);
   1756  as_or(output, output, scratch);
   1757 #else
   1758  zeroDouble(ScratchDoubleReg);
   1759  as_sltiu(scratch2, output, 255);
   1760  as_colt(DoubleFloat, ScratchDoubleReg, input);
   1761  // if res > 255; res = 255;
   1762  as_movz(output, scratch, scratch2);
   1763  // if !(input > 0); res = 0;
   1764  as_movf(output, zero);
   1765 #endif
   1766 }
   1767 
   1768 void MacroAssemblerMIPS64Compat::testNullSet(Condition cond,
   1769                                             const ValueOperand& value,
   1770                                             Register dest) {
   1771  MOZ_ASSERT(cond == Equal || cond == NotEqual);
   1772  UseScratchRegisterScope temps(*this);
   1773  Register scratch2 = temps.Acquire();
   1774  splitTag(value, scratch2);
   1775  ma_cmp_set(dest, scratch2, ImmTag(JSVAL_TAG_NULL), cond);
   1776 }
   1777 
   1778 void MacroAssemblerMIPS64Compat::testObjectSet(Condition cond,
   1779                                               const ValueOperand& value,
   1780                                               Register dest) {
   1781  MOZ_ASSERT(cond == Equal || cond == NotEqual);
   1782  UseScratchRegisterScope temps(*this);
   1783  Register scratch2 = temps.Acquire();
   1784  splitTag(value, scratch2);
   1785  ma_cmp_set(dest, scratch2, ImmTag(JSVAL_TAG_OBJECT), cond);
   1786 }
   1787 
   1788 void MacroAssemblerMIPS64Compat::testUndefinedSet(Condition cond,
   1789                                                  const ValueOperand& value,
   1790                                                  Register dest) {
   1791  MOZ_ASSERT(cond == Equal || cond == NotEqual);
   1792  UseScratchRegisterScope temps(*this);
   1793  Register scratch2 = temps.Acquire();
   1794  splitTag(value, scratch2);
   1795  ma_cmp_set(dest, scratch2, ImmTag(JSVAL_TAG_UNDEFINED), cond);
   1796 }
   1797 
   1798 void MacroAssemblerMIPS64Compat::unboxInt32(const ValueOperand& operand,
   1799                                            Register dest) {
   1800  ma_sll(dest, operand.valueReg(), Imm32(0));
   1801 }
   1802 
   1803 void MacroAssemblerMIPS64Compat::unboxInt32(Register src, Register dest) {
   1804  ma_sll(dest, src, Imm32(0));
   1805 }
   1806 
   1807 void MacroAssemblerMIPS64Compat::unboxInt32(const Address& src, Register dest) {
   1808  load32(Address(src.base, src.offset), dest);
   1809 }
   1810 
   1811 void MacroAssemblerMIPS64Compat::unboxInt32(const BaseIndex& src,
   1812                                            Register dest) {
   1813  UseScratchRegisterScope temps(*this);
   1814  Register scratch2 = temps.Acquire();
   1815  computeScaledAddress(src, scratch2);
   1816  load32(Address(scratch2, src.offset), dest);
   1817 }
   1818 
   1819 void MacroAssemblerMIPS64Compat::unboxBoolean(const ValueOperand& operand,
   1820                                              Register dest) {
   1821  ma_dext(dest, operand.valueReg(), Imm32(0), Imm32(32));
   1822 }
   1823 
   1824 void MacroAssemblerMIPS64Compat::unboxBoolean(Register src, Register dest) {
   1825  ma_dext(dest, src, Imm32(0), Imm32(32));
   1826 }
   1827 
   1828 void MacroAssemblerMIPS64Compat::unboxBoolean(const Address& src,
   1829                                              Register dest) {
   1830  ma_load(dest, Address(src.base, src.offset), SizeWord, ZeroExtend);
   1831 }
   1832 
   1833 void MacroAssemblerMIPS64Compat::unboxBoolean(const BaseIndex& src,
   1834                                              Register dest) {
   1835  UseScratchRegisterScope temps(*this);
   1836  Register scratch2 = temps.Acquire();
   1837  computeScaledAddress(src, scratch2);
   1838  ma_load(dest, Address(scratch2, src.offset), SizeWord, ZeroExtend);
   1839 }
   1840 
   1841 void MacroAssemblerMIPS64Compat::unboxDouble(const ValueOperand& operand,
   1842                                             FloatRegister dest) {
   1843  as_dmtc1(operand.valueReg(), dest);
   1844 }
   1845 
   1846 void MacroAssemblerMIPS64Compat::unboxDouble(const Address& src,
   1847                                             FloatRegister dest) {
   1848  ma_ld(dest, Address(src.base, src.offset));
   1849 }
   1850 void MacroAssemblerMIPS64Compat::unboxDouble(const BaseIndex& src,
   1851                                             FloatRegister dest) {
   1852  UseScratchRegisterScope temps(*this);
   1853  Register scratch = temps.Acquire();
   1854  loadPtr(src, scratch);
   1855  unboxDouble(ValueOperand(scratch), dest);
   1856 }
   1857 
   1858 void MacroAssemblerMIPS64Compat::unboxString(const ValueOperand& operand,
   1859                                             Register dest) {
   1860  unboxNonDouble(operand, dest, JSVAL_TYPE_STRING);
   1861 }
   1862 
   1863 void MacroAssemblerMIPS64Compat::unboxString(Register src, Register dest) {
   1864  unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
   1865 }
   1866 
   1867 void MacroAssemblerMIPS64Compat::unboxString(const Address& src,
   1868                                             Register dest) {
   1869  unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
   1870 }
   1871 
   1872 void MacroAssemblerMIPS64Compat::unboxSymbol(const ValueOperand& operand,
   1873                                             Register dest) {
   1874  unboxNonDouble(operand, dest, JSVAL_TYPE_SYMBOL);
   1875 }
   1876 
   1877 void MacroAssemblerMIPS64Compat::unboxSymbol(Register src, Register dest) {
   1878  unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
   1879 }
   1880 
   1881 void MacroAssemblerMIPS64Compat::unboxSymbol(const Address& src,
   1882                                             Register dest) {
   1883  unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
   1884 }
   1885 
   1886 void MacroAssemblerMIPS64Compat::unboxBigInt(const ValueOperand& operand,
   1887                                             Register dest) {
   1888  unboxNonDouble(operand, dest, JSVAL_TYPE_BIGINT);
   1889 }
   1890 
   1891 void MacroAssemblerMIPS64Compat::unboxBigInt(Register src, Register dest) {
   1892  unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
   1893 }
   1894 
   1895 void MacroAssemblerMIPS64Compat::unboxBigInt(const Address& src,
   1896                                             Register dest) {
   1897  unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
   1898 }
   1899 
   1900 void MacroAssemblerMIPS64Compat::unboxObject(const ValueOperand& src,
   1901                                             Register dest) {
   1902  unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
   1903 }
   1904 
   1905 void MacroAssemblerMIPS64Compat::unboxObject(Register src, Register dest) {
   1906  unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
   1907 }
   1908 
   1909 void MacroAssemblerMIPS64Compat::unboxObject(const Address& src,
   1910                                             Register dest) {
   1911  unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
   1912 }
   1913 
   1914 void MacroAssemblerMIPS64Compat::unboxValue(const ValueOperand& src,
   1915                                            AnyRegister dest,
   1916                                            JSValueType type) {
   1917  if (dest.isFloat()) {
   1918    Label notInt32, end;
   1919    asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
   1920    convertInt32ToDouble(src.valueReg(), dest.fpu());
   1921    ma_b(&end, ShortJump);
   1922    bind(&notInt32);
   1923    unboxDouble(src, dest.fpu());
   1924    bind(&end);
   1925  } else {
   1926    unboxNonDouble(src, dest.gpr(), type);
   1927  }
   1928 }
   1929 
   1930 void MacroAssemblerMIPS64Compat::boxDouble(FloatRegister src,
   1931                                           const ValueOperand& dest,
   1932                                           FloatRegister) {
   1933  as_dmfc1(dest.valueReg(), src);
   1934 }
   1935 
   1936 #ifdef DEBUG
   1937 static constexpr int32_t PayloadSize(JSValueType type) {
   1938  switch (type) {
   1939    case JSVAL_TYPE_UNDEFINED:
   1940    case JSVAL_TYPE_NULL:
   1941      return 0;
   1942    case JSVAL_TYPE_BOOLEAN:
   1943      return 1;
   1944    case JSVAL_TYPE_INT32:
   1945    case JSVAL_TYPE_MAGIC:
   1946      return 32;
   1947    case JSVAL_TYPE_STRING:
   1948    case JSVAL_TYPE_SYMBOL:
   1949    case JSVAL_TYPE_PRIVATE_GCTHING:
   1950    case JSVAL_TYPE_BIGINT:
   1951    case JSVAL_TYPE_OBJECT:
   1952      return JSVAL_TAG_SHIFT;
   1953    case JSVAL_TYPE_DOUBLE:
   1954    case JSVAL_TYPE_UNKNOWN:
   1955      break;
   1956  }
   1957  MOZ_CRASH("bad value type");
   1958 }
   1959 #endif
   1960 
   1961 static void AssertValidPayload(MacroAssemblerMIPS64Compat& masm,
   1962                               JSValueType type, Register payload,
   1963                               Register scratch) {
   1964 #ifdef DEBUG
   1965  if (type == JSVAL_TYPE_INT32) {
   1966    // Ensure the payload is a properly sign-extended int32.
   1967    Label signExtended;
   1968    masm.ma_sll(scratch, payload, Imm32(0));
   1969    masm.ma_b(payload, scratch, &signExtended, Assembler::Equal, ShortJump);
   1970    masm.breakpoint();
   1971    masm.bind(&signExtended);
   1972  } else {
   1973    // All bits above the payload must be zeroed.
   1974    Label zeroed;
   1975    masm.ma_dsrl(scratch, payload, Imm32(PayloadSize(type)));
   1976    masm.ma_b(scratch, scratch, &zeroed, Assembler::Zero, ShortJump);
   1977    masm.breakpoint();
   1978    masm.bind(&zeroed);
   1979  }
   1980 #endif
   1981 }
   1982 
   1983 void MacroAssemblerMIPS64Compat::boxValue(JSValueType type, Register src,
   1984                                          Register dest) {
   1985  MOZ_ASSERT(type != JSVAL_TYPE_UNDEFINED && type != JSVAL_TYPE_NULL);
   1986  MOZ_ASSERT(src != dest);
   1987 
   1988  AssertValidPayload(*this, type, src, dest);
   1989 
   1990  ma_li(dest, ImmShiftedTag(type));
   1991 
   1992  if (type == JSVAL_TYPE_INT32) {
   1993    ma_dins(dest, src, Imm32(0), Imm32(32));
   1994  } else {
   1995    as_or(dest, dest, src);
   1996  }
   1997 }
   1998 
   1999 void MacroAssemblerMIPS64Compat::boxValue(Register type, Register src,
   2000                                          Register dest) {
   2001  MOZ_ASSERT(src != dest);
   2002 
   2003 #ifdef DEBUG
   2004  Label done, isNullOrUndefined, isBoolean, isInt32OrMagic, isPointerSized;
   2005 
   2006  asMasm().branch32(Assembler::Equal, type, Imm32(JSVAL_TYPE_NULL),
   2007                    &isNullOrUndefined);
   2008  asMasm().branch32(Assembler::Equal, type, Imm32(JSVAL_TYPE_UNDEFINED),
   2009                    &isNullOrUndefined);
   2010  asMasm().branch32(Assembler::Equal, type, Imm32(JSVAL_TYPE_BOOLEAN),
   2011                    &isBoolean);
   2012  asMasm().branch32(Assembler::Equal, type, Imm32(JSVAL_TYPE_INT32),
   2013                    &isInt32OrMagic);
   2014  asMasm().branch32(Assembler::Equal, type, Imm32(JSVAL_TYPE_MAGIC),
   2015                    &isInt32OrMagic);
   2016  // GCThing types aren't currently supported, because ma_dins truncates
   2017  // payloads above UINT32_MAX.
   2018  breakpoint();
   2019  {
   2020    bind(&isNullOrUndefined);
   2021 
   2022    // Ensure no payload for null and undefined.
   2023    ma_b(src, src, &done, Assembler::Zero, ShortJump);
   2024    breakpoint();
   2025  }
   2026  {
   2027    bind(&isBoolean);
   2028 
   2029    // Ensure boolean values are either 0 or 1.
   2030    ma_b(src, Imm32(1), &done, Assembler::BelowOrEqual, ShortJump);
   2031    breakpoint();
   2032  }
   2033  {
   2034    bind(&isInt32OrMagic);
   2035 
   2036    // Ensure |src| is sign-extended.
   2037    UseScratchRegisterScope temps(*this);
   2038    Register scratch = temps.Acquire();
   2039    ma_sll(scratch, src, Imm32(0));
   2040    ma_b(src, scratch, &done, Assembler::Equal, ShortJump);
   2041    breakpoint();
   2042  }
   2043  bind(&done);
   2044 #endif
   2045 
   2046  ma_or(dest, type, Imm32(JSVAL_TAG_MAX_DOUBLE));
   2047  ma_dsll(dest, dest, Imm32(JSVAL_TAG_SHIFT));
   2048  ma_dins(dest, src, Imm32(0), Imm32(32));
   2049 }
   2050 
   2051 void MacroAssemblerMIPS64Compat::loadConstantFloat32(float f,
   2052                                                     FloatRegister dest) {
   2053  ma_lis(dest, f);
   2054 }
   2055 
   2056 void MacroAssemblerMIPS64Compat::loadInt32OrDouble(const Address& src,
   2057                                                   FloatRegister dest) {
   2058  UseScratchRegisterScope temps(*this);
   2059  Register scratch = temps.Acquire();
   2060 
   2061  Label notInt32, end;
   2062  {
   2063    // Inlined |branchTestInt32| to use a short-jump.
   2064    Register tag = extractTag(src, scratch);
   2065    ma_b(tag, ImmTag(JSVAL_TAG_INT32), &notInt32, Assembler::NotEqual,
   2066         ShortJump);
   2067  }
   2068  {
   2069    // If it's an int, convert it to double.
   2070    unboxInt32(src, scratch);
   2071    convertInt32ToDouble(scratch, dest);
   2072    ma_b(&end, ShortJump);
   2073  }
   2074  bind(&notInt32);
   2075  {
   2076    // Not an int, just load as double.
   2077    unboxDouble(src, dest);
   2078  }
   2079  bind(&end);
   2080 }
   2081 
   2082 void MacroAssemblerMIPS64Compat::loadInt32OrDouble(const BaseIndex& addr,
   2083                                                   FloatRegister dest) {
   2084  UseScratchRegisterScope temps(*this);
   2085  Register scratch = temps.Acquire();
   2086 
   2087  computeScaledAddress(addr, scratch);
   2088  loadInt32OrDouble(Address(scratch, addr.offset), dest);
   2089 }
   2090 
   2091 void MacroAssemblerMIPS64Compat::loadConstantDouble(double dp,
   2092                                                    FloatRegister dest) {
   2093  ma_lid(dest, dp);
   2094 }
   2095 
   2096 Register MacroAssemblerMIPS64Compat::extractObject(const Address& address,
   2097                                                   Register scratch) {
   2098  loadPtr(address, scratch);
   2099  ma_dext(scratch, scratch, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
   2100  return scratch;
   2101 }
   2102 
   2103 Register MacroAssemblerMIPS64Compat::extractTag(const Address& address,
   2104                                                Register scratch) {
   2105  loadPtr(address, scratch);
   2106  ma_dext(scratch, scratch, Imm32(JSVAL_TAG_SHIFT),
   2107          Imm32(64 - JSVAL_TAG_SHIFT));
   2108  return scratch;
   2109 }
   2110 
   2111 Register MacroAssemblerMIPS64Compat::extractTag(const BaseIndex& address,
   2112                                                Register scratch) {
   2113  computeScaledAddress(address, scratch);
   2114  return extractTag(Address(scratch, address.offset), scratch);
   2115 }
   2116 
   2117 /////////////////////////////////////////////////////////////////
   2118 // X86/X64-common/ARM/MIPS interface.
   2119 /////////////////////////////////////////////////////////////////
   2120 void MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, Operand dst) {
   2121  storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
   2122 }
   2123 
   2124 void MacroAssemblerMIPS64Compat::storeValue(ValueOperand val,
   2125                                            const BaseIndex& dest) {
   2126  UseScratchRegisterScope temps(*this);
   2127  Register scratch2 = temps.Acquire();
   2128  computeScaledAddress(dest, scratch2);
   2129  storeValue(val, Address(scratch2, dest.offset));
   2130 }
   2131 
   2132 void MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg,
   2133                                            BaseIndex dest) {
   2134  UseScratchRegisterScope temps(*this);
   2135  Register scratch = temps.Acquire();
   2136  computeScaledAddress(dest, scratch);
   2137 
   2138  int32_t offset = dest.offset;
   2139  if (!Imm16::IsInSignedRange(offset)) {
   2140    Register scratch2 = temps.Acquire();
   2141    ma_li(scratch2, Imm32(offset));
   2142    as_daddu(scratch, scratch, scratch2);
   2143    offset = 0;
   2144  }
   2145 
   2146  storeValue(type, reg, Address(scratch, offset));
   2147 }
   2148 
   2149 void MacroAssemblerMIPS64Compat::storeValue(ValueOperand val,
   2150                                            const Address& dest) {
   2151  storePtr(val.valueReg(), Address(dest.base, dest.offset));
   2152 }
   2153 
   2154 void MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg,
   2155                                            Address dest) {
   2156  UseScratchRegisterScope temps(*this);
   2157  Register scratch2 = temps.Acquire();
   2158  MOZ_ASSERT(dest.base != scratch2);
   2159 
   2160  if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
   2161    AssertValidPayload(*this, type, reg, scratch2);
   2162 
   2163    store32(reg, dest);
   2164    JSValueShiftedTag tag = (JSValueShiftedTag)JSVAL_TYPE_TO_SHIFTED_TAG(type);
   2165    move32(Imm64(tag).hi(), scratch2);
   2166    store32(scratch2, Address(dest.base, dest.offset + 4));
   2167  } else {
   2168    boxValue(type, reg, scratch2);
   2169    storePtr(scratch2, Address(dest.base, dest.offset));
   2170  }
   2171 }
   2172 
   2173 void MacroAssemblerMIPS64Compat::storeValue(const Value& val, Address dest) {
   2174  UseScratchRegisterScope temps(*this);
   2175  Register scratch2 = temps.Acquire();
   2176  if (val.isGCThing()) {
   2177    writeDataRelocation(val);
   2178    movWithPatch(ImmWord(val.asRawBits()), scratch2);
   2179  } else {
   2180    ma_li(scratch2, ImmWord(val.asRawBits()));
   2181  }
   2182  storePtr(scratch2, Address(dest.base, dest.offset));
   2183 }
   2184 
   2185 void MacroAssemblerMIPS64Compat::storeValue(const Value& val, BaseIndex dest) {
   2186  UseScratchRegisterScope temps(*this);
   2187  Register scratch = temps.Acquire();
   2188  computeScaledAddress(dest, scratch);
   2189 
   2190  int32_t offset = dest.offset;
   2191  if (!Imm16::IsInSignedRange(offset)) {
   2192    Register scratch2 = temps.Acquire();
   2193    ma_li(scratch2, Imm32(offset));
   2194    as_daddu(scratch, scratch, scratch2);
   2195    offset = 0;
   2196  }
   2197  storeValue(val, Address(scratch, offset));
   2198 }
   2199 
   2200 void MacroAssemblerMIPS64Compat::loadValue(const BaseIndex& addr,
   2201                                           ValueOperand val) {
   2202  UseScratchRegisterScope temps(*this);
   2203  Register scratch2 = temps.Acquire();
   2204  computeScaledAddress(addr, scratch2);
   2205  loadValue(Address(scratch2, addr.offset), val);
   2206 }
   2207 
   2208 void MacroAssemblerMIPS64Compat::loadValue(Address src, ValueOperand val) {
   2209  loadPtr(Address(src.base, src.offset), val.valueReg());
   2210 }
   2211 
   2212 void MacroAssemblerMIPS64Compat::tagValue(JSValueType type, Register payload,
   2213                                          ValueOperand dest) {
   2214  MOZ_ASSERT(type != JSVAL_TYPE_UNDEFINED && type != JSVAL_TYPE_NULL);
   2215 
   2216  if (payload == dest.valueReg()) {
   2217    UseScratchRegisterScope temps(*this);
   2218    Register scratch = temps.Acquire();
   2219    MOZ_ASSERT(dest.valueReg() != scratch);
   2220 
   2221    AssertValidPayload(*this, type, payload, scratch);
   2222 
   2223    switch (type) {
   2224      case JSVAL_TYPE_BOOLEAN:
   2225      case JSVAL_TYPE_INT32:
   2226      case JSVAL_TYPE_MAGIC: {
   2227        int64_t shifted = int64_t(JSVAL_TYPE_TO_SHIFTED_TAG(type)) >> 32;
   2228 
   2229        // Load upper 32 bits of shifted tag into scratch register.
   2230        ma_li(scratch, Imm32(shifted));
   2231 
   2232        // Insert tag into the result.
   2233        ma_dins(dest.valueReg(), scratch, Imm32(32), Imm32(32));
   2234        return;
   2235      }
   2236      case JSVAL_TYPE_STRING:
   2237      case JSVAL_TYPE_SYMBOL:
   2238      case JSVAL_TYPE_PRIVATE_GCTHING:
   2239      case JSVAL_TYPE_BIGINT:
   2240      case JSVAL_TYPE_OBJECT: {
   2241        int64_t signExtendedShiftedTag =
   2242            int64_t(JSVAL_TYPE_TO_SHIFTED_TAG(type)) >> JSVAL_TAG_SHIFT;
   2243        MOZ_ASSERT(Imm16::IsInSignedRange(signExtendedShiftedTag),
   2244                   "sign-extended shifted tag can be materialised in a single "
   2245                   "daddiu instruction");
   2246 
   2247        // Store sign-extended tag into lower 17 bits of the scratch register.
   2248        as_daddiu(scratch, zero, signExtendedShiftedTag);
   2249 
   2250        // Insert tag into the result.
   2251        ma_dins(dest.valueReg(), scratch, Imm32(JSVAL_TAG_SHIFT),
   2252                Imm32(64 - JSVAL_TAG_SHIFT));
   2253        return;
   2254      }
   2255      case JSVAL_TYPE_DOUBLE:
   2256      case JSVAL_TYPE_UNDEFINED:
   2257      case JSVAL_TYPE_NULL:
   2258      case JSVAL_TYPE_UNKNOWN:
   2259        break;
   2260    }
   2261    MOZ_CRASH("bad value type");
   2262  } else {
   2263    boxNonDouble(type, payload, dest);
   2264  }
   2265 }
   2266 
   2267 void MacroAssemblerMIPS64Compat::pushValue(ValueOperand val) {
   2268  // Allocate stack slots for Value. One for each.
   2269  asMasm().subPtr(Imm32(sizeof(Value)), StackPointer);
   2270  // Store Value
   2271  storeValue(val, Address(StackPointer, 0));
   2272 }
   2273 
   2274 void MacroAssemblerMIPS64Compat::pushValue(const Address& addr) {
   2275  UseScratchRegisterScope temps(*this);
   2276  Register scratch = temps.Acquire();
   2277  // Load value before allocate stack, addr.base may be is sp.
   2278  loadPtr(Address(addr.base, addr.offset), scratch);
   2279  ma_dsubu(StackPointer, StackPointer, Imm32(sizeof(Value)));
   2280  storePtr(scratch, Address(StackPointer, 0));
   2281 }
   2282 
   2283 void MacroAssemblerMIPS64Compat::popValue(ValueOperand val) {
   2284  as_ld(val.valueReg(), StackPointer, 0);
   2285  as_daddiu(StackPointer, StackPointer, sizeof(Value));
   2286 }
   2287 
   2288 void MacroAssemblerMIPS64Compat::breakpoint() { as_break(0); }
   2289 
   2290 void MacroAssemblerMIPS64Compat::checkStackAlignment() {
   2291 #ifdef DEBUG
   2292  UseScratchRegisterScope temps(*this);
   2293  Register scratch = temps.Acquire();
   2294  Label aligned;
   2295  as_andi(scratch, sp, ABIStackAlignment - 1);
   2296  ma_b(scratch, zero, &aligned, Equal, ShortJump);
   2297  as_break(BREAK_STACK_UNALIGNED);
   2298  bind(&aligned);
   2299 #endif
   2300 }
   2301 
   2302 void MacroAssemblerMIPS64Compat::handleFailureWithHandlerTail(
   2303    Label* profilerExitTail, Label* bailoutTail,
   2304    uint32_t* returnValueCheckOffset) {
   2305  // Reserve space for exception information.
   2306  int size = (sizeof(ResumeFromException) + ABIStackAlignment) &
   2307             ~(ABIStackAlignment - 1);
   2308  asMasm().subPtr(Imm32(size), StackPointer);
   2309  ma_move(a0, StackPointer);  // Use a0 since it is a first function argument
   2310 
   2311  // Call the handler.
   2312  using Fn = void (*)(ResumeFromException* rfe);
   2313  asMasm().setupUnalignedABICall(a1);
   2314  asMasm().passABIArg(a0);
   2315  asMasm().callWithABI<Fn, HandleException>(
   2316      ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
   2317 
   2318  *returnValueCheckOffset = asMasm().currentOffset();
   2319 
   2320  Label entryFrame;
   2321  Label catch_;
   2322  Label finally;
   2323  Label returnBaseline;
   2324  Label returnIon;
   2325  Label bailout;
   2326  Label wasmInterpEntry;
   2327  Label wasmCatch;
   2328 
   2329  // Already clobbered a0, so use it...
   2330  load32(Address(StackPointer, ResumeFromException::offsetOfKind()), a0);
   2331  asMasm().branch32(Assembler::Equal, a0,
   2332                    Imm32(ExceptionResumeKind::EntryFrame), &entryFrame);
   2333  asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Catch),
   2334                    &catch_);
   2335  asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Finally),
   2336                    &finally);
   2337  asMasm().branch32(Assembler::Equal, a0,
   2338                    Imm32(ExceptionResumeKind::ForcedReturnBaseline),
   2339                    &returnBaseline);
   2340  asMasm().branch32(Assembler::Equal, a0,
   2341                    Imm32(ExceptionResumeKind::ForcedReturnIon), &returnIon);
   2342  asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Bailout),
   2343                    &bailout);
   2344  asMasm().branch32(Assembler::Equal, a0,
   2345                    Imm32(ExceptionResumeKind::WasmInterpEntry),
   2346                    &wasmInterpEntry);
   2347  asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::WasmCatch),
   2348                    &wasmCatch);
   2349 
   2350  breakpoint();  // Invalid kind.
   2351 
   2352  // No exception handler. Load the error value, restore state and return from
   2353  // the entry frame.
   2354  bind(&entryFrame);
   2355  asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
   2356  loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
   2357          FramePointer);
   2358  loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
   2359          StackPointer);
   2360 
   2361  // We're going to be returning by the ion calling convention
   2362  ma_pop(ra);
   2363  as_jr(ra);
   2364  as_nop();
   2365 
   2366  // If we found a catch handler, this must be a baseline frame. Restore
   2367  // state and jump to the catch block.
   2368  bind(&catch_);
   2369  loadPtr(Address(StackPointer, ResumeFromException::offsetOfTarget()), a0);
   2370  loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
   2371          FramePointer);
   2372  loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
   2373          StackPointer);
   2374  jump(a0);
   2375 
   2376  // If we found a finally block, this must be a baseline frame. Push three
   2377  // values expected by the finally block: the exception, the exception stack,
   2378  // and BooleanValue(true).
   2379  bind(&finally);
   2380  ValueOperand exception = ValueOperand(a1);
   2381  loadValue(Address(sp, ResumeFromException::offsetOfException()), exception);
   2382 
   2383  ValueOperand exceptionStack = ValueOperand(a2);
   2384  loadValue(Address(sp, ResumeFromException::offsetOfExceptionStack()),
   2385            exceptionStack);
   2386 
   2387  loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a0);
   2388  loadPtr(Address(sp, ResumeFromException::offsetOfFramePointer()),
   2389          FramePointer);
   2390  loadPtr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp);
   2391 
   2392  pushValue(exception);
   2393  pushValue(exceptionStack);
   2394  pushValue(BooleanValue(true));
   2395  jump(a0);
   2396 
   2397  // Return BaselineFrame->returnValue() to the caller.
   2398  // Used in debug mode and for GeneratorReturn.
   2399  Label profilingInstrumentation;
   2400  bind(&returnBaseline);
   2401  loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
   2402          FramePointer);
   2403  loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
   2404          StackPointer);
   2405  loadValue(Address(FramePointer, BaselineFrame::reverseOffsetOfReturnValue()),
   2406            JSReturnOperand);
   2407  jump(&profilingInstrumentation);
   2408 
   2409  // Return the given value to the caller.
   2410  bind(&returnIon);
   2411  loadValue(Address(StackPointer, ResumeFromException::offsetOfException()),
   2412            JSReturnOperand);
   2413  loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
   2414          FramePointer);
   2415  loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
   2416          StackPointer);
   2417 
   2418  // If profiling is enabled, then update the lastProfilingFrame to refer to
   2419  // caller frame before returning. This code is shared by ForcedReturnIon
   2420  // and ForcedReturnBaseline.
   2421  bind(&profilingInstrumentation);
   2422  {
   2423    Label skipProfilingInstrumentation;
   2424    // Test if profiler enabled.
   2425    AbsoluteAddress addressOfEnabled(
   2426        asMasm().runtime()->geckoProfiler().addressOfEnabled());
   2427    asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
   2428                      &skipProfilingInstrumentation);
   2429    jump(profilerExitTail);
   2430    bind(&skipProfilingInstrumentation);
   2431  }
   2432 
   2433  ma_move(StackPointer, FramePointer);
   2434  pop(FramePointer);
   2435  ret();
   2436 
   2437  // If we are bailing out to baseline to handle an exception, jump to
   2438  // the bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
   2439  bind(&bailout);
   2440  loadPtr(Address(sp, ResumeFromException::offsetOfBailoutInfo()), a2);
   2441  loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
   2442          StackPointer);
   2443  ma_li(ReturnReg, Imm32(1));
   2444  jump(bailoutTail);
   2445 
   2446  // Reset SP and FP; SP is pointing to the unwound return address to the wasm
   2447  // interpreter entry, so we can just ret().
   2448  bind(&wasmInterpEntry);
   2449  loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
   2450          FramePointer);
   2451  loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
   2452          StackPointer);
   2453  ma_li(InstanceReg, ImmWord(wasm::InterpFailInstanceReg));
   2454  ret();
   2455 
   2456  // Found a wasm catch handler, restore state and jump to it.
   2457  bind(&wasmCatch);
   2458  wasm::GenerateJumpToCatchHandler(asMasm(), sp, a1, a2);
   2459 }
   2460 
   2461 CodeOffset MacroAssemblerMIPS64Compat::toggledJump(Label* label) {
   2462  CodeOffset ret(nextOffset().getOffset());
   2463  ma_b(label);
   2464  return ret;
   2465 }
   2466 
   2467 CodeOffset MacroAssemblerMIPS64Compat::toggledCall(JitCode* target,
   2468                                                   bool enabled) {
   2469  UseScratchRegisterScope temps(*this);
   2470  BufferOffset bo = nextOffset();
   2471  CodeOffset offset(bo.getOffset());
   2472  addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
   2473  Register scratch = temps.Acquire();
   2474  ma_liPatchable(scratch, ImmPtr(target->raw()));
   2475  if (enabled) {
   2476    as_jalr(scratch);
   2477    as_nop();
   2478  } else {
   2479    as_nop();
   2480    as_nop();
   2481  }
   2482  MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() ==
   2483                            ToggledCallSize(nullptr));
   2484  return offset;
   2485 }
   2486 
   2487 void MacroAssemblerMIPS64Compat::profilerEnterFrame(Register framePtr,
   2488                                                    Register scratch) {
   2489  asMasm().loadJSContext(scratch);
   2490  loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
   2491  storePtr(framePtr,
   2492           Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
   2493  storePtr(ImmPtr(nullptr),
   2494           Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
   2495 }
   2496 
   2497 void MacroAssemblerMIPS64Compat::profilerExitFrame() {
   2498  jump(asMasm().runtime()->jitRuntime()->getProfilerExitFrameTail());
   2499 }
   2500 
   2501 void MacroAssembler::subFromStackPtr(Imm32 imm32) {
   2502  if (imm32.value) {
   2503    asMasm().subPtr(imm32, StackPointer);
   2504  }
   2505 }
   2506 
   2507 //{{{ check_macroassembler_style
   2508 // ===============================================================
   2509 // Stack manipulation functions.
   2510 
   2511 size_t MacroAssembler::PushRegsInMaskSizeInBytes(LiveRegisterSet set) {
   2512  return set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
   2513 }
   2514 
   2515 void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
   2516  int32_t diff =
   2517      set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
   2518  const int32_t reserved = diff;
   2519 
   2520  reserveStack(reserved);
   2521  for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
   2522    diff -= sizeof(intptr_t);
   2523    storePtr(*iter, Address(StackPointer, diff));
   2524  }
   2525 
   2526 #ifdef ENABLE_WASM_SIMD
   2527 #  error "Needs more careful logic if SIMD is enabled"
   2528 #endif
   2529 
   2530  for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
   2531       iter.more(); ++iter) {
   2532    diff -= sizeof(double);
   2533    storeDouble(*iter, Address(StackPointer, diff));
   2534  }
   2535  MOZ_ASSERT(diff == 0);
   2536 }
   2537 
   2538 void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
   2539                                         LiveRegisterSet ignore) {
   2540  int32_t diff =
   2541      set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
   2542  const int32_t reserved = diff;
   2543 
   2544  for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
   2545    diff -= sizeof(intptr_t);
   2546    if (!ignore.has(*iter)) {
   2547      loadPtr(Address(StackPointer, diff), *iter);
   2548    }
   2549  }
   2550 
   2551 #ifdef ENABLE_WASM_SIMD
   2552 #  error "Needs more careful logic if SIMD is enabled"
   2553 #endif
   2554 
   2555  for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
   2556       iter.more(); ++iter) {
   2557    diff -= sizeof(double);
   2558    if (!ignore.has(*iter)) {
   2559      loadDouble(Address(StackPointer, diff), *iter);
   2560    }
   2561  }
   2562  MOZ_ASSERT(diff == 0);
   2563  freeStack(reserved);
   2564 }
   2565 
   2566 void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
   2567                                     Register) {
   2568  FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
   2569  mozilla::DebugOnly<unsigned> numFpu = fpuSet.size();
   2570  int32_t diffF = fpuSet.getPushSizeInBytes();
   2571  mozilla::DebugOnly<int32_t> diffG = set.gprs().size() * sizeof(intptr_t);
   2572 
   2573  MOZ_ASSERT(dest.offset >= diffG + diffF);
   2574 
   2575  for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
   2576    diffG -= sizeof(intptr_t);
   2577    dest.offset -= sizeof(intptr_t);
   2578    storePtr(*iter, dest);
   2579  }
   2580  MOZ_ASSERT(diffG == 0);
   2581 
   2582 #ifdef ENABLE_WASM_SIMD
   2583 #  error "Needs more careful logic if SIMD is enabled"
   2584 #endif
   2585 
   2586  for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
   2587    FloatRegister reg = *iter;
   2588    diffF -= reg.size();
   2589    numFpu -= 1;
   2590    dest.offset -= reg.size();
   2591    if (reg.isDouble()) {
   2592      storeDouble(reg, dest);
   2593    } else if (reg.isSingle()) {
   2594      storeFloat32(reg, dest);
   2595    } else {
   2596      MOZ_CRASH("Unknown register type.");
   2597    }
   2598  }
   2599  MOZ_ASSERT(numFpu == 0);
   2600 
   2601  diffF -= diffF % sizeof(uintptr_t);
   2602  MOZ_ASSERT(diffF == 0);
   2603 }
   2604 
   2605 void MacroAssembler::freeStackTo(uint32_t framePushed) {
   2606  MOZ_ASSERT(framePushed <= framePushed_);
   2607  ma_dsubu(StackPointer, FramePointer, Imm32(framePushed));
   2608  framePushed_ = framePushed;
   2609 }
   2610 
   2611 // ===============================================================
   2612 // ABI function calls.
   2613 
   2614 void MacroAssembler::setupUnalignedABICall(Register scratch) {
   2615  MOZ_ASSERT(!IsCompilingWasm(), "wasm should only use aligned ABI calls");
   2616  setupNativeABICall();
   2617  dynamicAlignment_ = true;
   2618 
   2619  ma_move(scratch, StackPointer);
   2620 
   2621  // Force sp to be aligned
   2622  asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
   2623  ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
   2624  storePtr(scratch, Address(StackPointer, 0));
   2625 }
   2626 
   2627 void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
   2628  MOZ_ASSERT(inCall_);
   2629  uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
   2630 
   2631  // Reserve place for $ra.
   2632  stackForCall += sizeof(intptr_t);
   2633 
   2634  if (dynamicAlignment_) {
   2635    stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
   2636  } else {
   2637    uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
   2638    stackForCall += ComputeByteAlignment(
   2639        stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
   2640  }
   2641 
   2642  *stackAdjust = stackForCall;
   2643  reserveStack(stackForCall);
   2644 
   2645  // Save $ra because call is going to clobber it. Restore it in
   2646  // callWithABIPost. NOTE: This is needed for calls from SharedIC.
   2647  // Maybe we can do this differently.
   2648  storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
   2649 
   2650  // Position all arguments.
   2651  {
   2652    enoughMemory_ &= moveResolver_.resolve();
   2653    if (!enoughMemory_) {
   2654      return;
   2655    }
   2656 
   2657    MoveEmitter emitter(*this);
   2658    emitter.emit(moveResolver_);
   2659    emitter.finish();
   2660  }
   2661 
   2662  assertStackAlignment(ABIStackAlignment);
   2663 }
   2664 
   2665 void MacroAssembler::callWithABIPost(uint32_t stackAdjust, ABIType result) {
   2666  // Restore ra value (as stored in callWithABIPre()).
   2667  loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
   2668 
   2669  if (dynamicAlignment_) {
   2670    // Restore sp value from stack (as stored in setupUnalignedABICall()).
   2671    loadPtr(Address(StackPointer, stackAdjust), StackPointer);
   2672    // Use adjustFrame instead of freeStack because we already restored sp.
   2673    adjustFrame(-stackAdjust);
   2674  } else {
   2675    freeStack(stackAdjust);
   2676  }
   2677 
   2678 #ifdef DEBUG
   2679  MOZ_ASSERT(inCall_);
   2680  inCall_ = false;
   2681 #endif
   2682 }
   2683 
   2684 void MacroAssembler::callWithABINoProfiler(Register fun, ABIType result) {
   2685  // Load the callee in t9, no instruction between the lw and call
   2686  // should clobber it. Note that we can't use fun.base because it may
   2687  // be one of the IntArg registers clobbered before the call.
   2688  ma_move(CallReg, fun);
   2689  uint32_t stackAdjust;
   2690  callWithABIPre(&stackAdjust);
   2691  call(CallReg);
   2692  callWithABIPost(stackAdjust, result);
   2693 }
   2694 
   2695 void MacroAssembler::callWithABINoProfiler(const Address& fun, ABIType result) {
   2696  // Load the callee in t9, as above.
   2697  loadPtr(Address(fun.base, fun.offset), CallReg);
   2698  uint32_t stackAdjust;
   2699  callWithABIPre(&stackAdjust);
   2700  call(CallReg);
   2701  callWithABIPost(stackAdjust, result);
   2702 }
   2703 
   2704 // ===============================================================
   2705 // Move
   2706 
   2707 void MacroAssembler::moveValue(const ValueOperand& src,
   2708                               const ValueOperand& dest) {
   2709  if (src == dest) {
   2710    return;
   2711  }
   2712  movePtr(src.valueReg(), dest.valueReg());
   2713 }
   2714 
   2715 void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
   2716  if (!src.isGCThing()) {
   2717    ma_li(dest.valueReg(), ImmWord(src.asRawBits()));
   2718    return;
   2719  }
   2720 
   2721  writeDataRelocation(src);
   2722  movWithPatch(ImmWord(src.asRawBits()), dest.valueReg());
   2723 }
   2724 
   2725 // ===============================================================
   2726 // Branch functions
   2727 
   2728 void MacroAssembler::branchValueIsNurseryCell(Condition cond,
   2729                                              const Address& address,
   2730                                              Register temp, Label* label) {
   2731  branchValueIsNurseryCellImpl(cond, address, temp, label);
   2732 }
   2733 
   2734 void MacroAssembler::branchValueIsNurseryCell(Condition cond,
   2735                                              ValueOperand value, Register temp,
   2736                                              Label* label) {
   2737  branchValueIsNurseryCellImpl(cond, value, temp, label);
   2738 }
   2739 
   2740 template <typename T>
   2741 void MacroAssembler::branchValueIsNurseryCellImpl(Condition cond,
   2742                                                  const T& value, Register temp,
   2743                                                  Label* label) {
   2744  MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
   2745  Label done;
   2746  branchTestGCThing(Assembler::NotEqual, value,
   2747                    cond == Assembler::Equal ? &done : label);
   2748 
   2749  // temp may be InvalidReg, use scratch2 instead.
   2750  UseScratchRegisterScope temps(*this);
   2751  Register scratch2 = temps.Acquire();
   2752 
   2753  getGCThingValueChunk(value, scratch2);
   2754  loadPtr(Address(scratch2, gc::ChunkStoreBufferOffset), scratch2);
   2755  branchPtr(InvertCondition(cond), scratch2, ImmWord(0), label);
   2756 
   2757  bind(&done);
   2758 }
   2759 
   2760 void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
   2761                                     const Value& rhs, Label* label) {
   2762  MOZ_ASSERT(cond == Equal || cond == NotEqual);
   2763  MOZ_ASSERT(!rhs.isNaN());
   2764 
   2765  if (!rhs.isGCThing()) {
   2766    ma_b(lhs.valueReg(), ImmWord(rhs.asRawBits()), label, cond);
   2767  } else {
   2768    UseScratchRegisterScope temps(*this);
   2769    Register scratch = temps.Acquire();
   2770    MOZ_ASSERT(lhs.valueReg() != scratch);
   2771    moveValue(rhs, ValueOperand(scratch));
   2772    ma_b(lhs.valueReg(), scratch, label, cond);
   2773  }
   2774 }
   2775 
   2776 void MacroAssembler::branchTestNaNValue(Condition cond, const ValueOperand& val,
   2777                                        Register temp, Label* label) {
   2778  MOZ_ASSERT(cond == Equal || cond == NotEqual);
   2779  UseScratchRegisterScope temps(*this);
   2780  Register scratch = temps.Acquire();
   2781  MOZ_ASSERT(val.valueReg() != scratch);
   2782 
   2783  // When testing for NaN, we want to ignore the sign bit.
   2784  if (hasR2()) {
   2785    // Clear the sign bit by extracting the lower 63 bits.
   2786    ma_dext(temp, val.valueReg(), Imm32(0), Imm32(63));
   2787  } else {
   2788    // Clear the sign bit by shifting left and then right.
   2789    ma_dsll(temp, val.valueReg(), Imm32(1));
   2790    ma_dsrl(temp, temp, Imm32(1));
   2791  }
   2792 
   2793  // Compare against a NaN with sign bit 0.
   2794  static_assert(JS::detail::CanonicalizedNaNSignBit == 0);
   2795  moveValue(DoubleValue(JS::GenericNaN()), ValueOperand(scratch));
   2796  ma_b(temp, scratch, label, cond);
   2797 }
   2798 
   2799 // ========================================================================
   2800 // Memory access primitives.
   2801 template <typename T>
   2802 void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
   2803                                       MIRType valueType, const T& dest) {
   2804  MOZ_ASSERT(valueType < MIRType::Value);
   2805 
   2806  if (valueType == MIRType::Double) {
   2807    boxDouble(value.reg().typedReg().fpu(), dest);
   2808    return;
   2809  }
   2810 
   2811  if (value.constant()) {
   2812    storeValue(value.value(), dest);
   2813  } else {
   2814    storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(),
   2815               dest);
   2816  }
   2817 }
   2818 
   2819 template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
   2820                                                MIRType valueType,
   2821                                                const Address& dest);
   2822 template void MacroAssembler::storeUnboxedValue(
   2823    const ConstantOrRegister& value, MIRType valueType,
   2824    const BaseObjectElementIndex& dest);
   2825 
   2826 void MacroAssembler::PushBoxed(FloatRegister reg) {
   2827  subFromStackPtr(Imm32(sizeof(double)));
   2828  boxDouble(reg, Address(getStackPointer(), 0));
   2829  adjustFrame(sizeof(double));
   2830 }
   2831 
   2832 void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
   2833                                       Register boundsCheckLimit,
   2834                                       Label* label) {
   2835  ma_b(index, boundsCheckLimit, label, cond);
   2836 }
   2837 
   2838 void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
   2839                                       Address boundsCheckLimit, Label* label) {
   2840  UseScratchRegisterScope temps(*this);
   2841  Register scratch2 = temps.Acquire();
   2842  load32(boundsCheckLimit, scratch2);
   2843  ma_b(index, scratch2, label, cond);
   2844 }
   2845 
   2846 void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
   2847                                       Register64 boundsCheckLimit,
   2848                                       Label* label) {
   2849  ma_b(index.reg, boundsCheckLimit.reg, label, cond);
   2850 }
   2851 
   2852 void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
   2853                                       Address boundsCheckLimit, Label* label) {
   2854  UseScratchRegisterScope temps(*this);
   2855  Register scratch2 = temps.Acquire();
   2856  loadPtr(boundsCheckLimit, scratch2);
   2857  ma_b(index.reg, scratch2, label, cond);
   2858 }
   2859 
   2860 void MacroAssembler::widenInt32(Register r) {
   2861  // I *think* this is correct.  It may be redundant.
   2862  move32To64SignExtend(r, Register64(r));
   2863 }
   2864 
   2865 void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
   2866                                                Register output,
   2867                                                bool isSaturating,
   2868                                                Label* oolEntry) {
   2869  UseScratchRegisterScope temps(*this);
   2870  branchDouble(Assembler::DoubleUnordered, input, input, oolEntry);
   2871  as_truncld(ScratchDoubleReg, input);
   2872  moveFromDouble(ScratchDoubleReg, output);
   2873  Register scratch = temps.Acquire();
   2874  ma_dsrl(scratch, output, Imm32(32));
   2875  as_sll(output, output, 0);
   2876  ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
   2877 }
   2878 
   2879 void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
   2880                                                 Register output,
   2881                                                 bool isSaturating,
   2882                                                 Label* oolEntry) {
   2883  UseScratchRegisterScope temps(*this);
   2884  branchFloat(Assembler::DoubleUnordered, input, input, oolEntry);
   2885  as_truncls(ScratchDoubleReg, input);
   2886  moveFromDouble(ScratchDoubleReg, output);
   2887  Register scratch = temps.Acquire();
   2888  ma_dsrl(scratch, output, Imm32(32));
   2889  as_sll(output, output, 0);
   2890  ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
   2891 }
   2892 
   2893 void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
   2894                                 Register memoryBase, Register ptr,
   2895                                 Register ptrScratch, Register64 output) {
   2896  wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
   2897 }
   2898 
   2899 void MacroAssembler::wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access,
   2900                                          Register memoryBase, Register ptr,
   2901                                          Register ptrScratch,
   2902                                          Register64 output, Register tmp) {
   2903  wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, tmp);
   2904 }
   2905 
   2906 void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
   2907                                  Register64 value, Register memoryBase,
   2908                                  Register ptr, Register ptrScratch) {
   2909  wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
   2910 }
   2911 
   2912 void MacroAssembler::wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access,
   2913                                           Register64 value,
   2914                                           Register memoryBase, Register ptr,
   2915                                           Register ptrScratch, Register tmp) {
   2916  wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, tmp);
   2917 }
   2918 
   2919 void MacroAssembler::wasmTruncateDoubleToInt64(
   2920    FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
   2921    Label* oolRejoin, FloatRegister tempDouble) {
   2922  MOZ_ASSERT(tempDouble.isInvalid());
   2923  UseScratchRegisterScope temps(*this);
   2924  Register scratch = temps.Acquire();
   2925 
   2926  as_truncld(ScratchDoubleReg, input);
   2927  as_cfc1(scratch, Assembler::FCSR);
   2928  moveFromDouble(ScratchDoubleReg, output.reg);
   2929  ma_ext(scratch, scratch, Assembler::CauseV, 1);
   2930  ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
   2931 
   2932  if (isSaturating) {
   2933    bind(oolRejoin);
   2934  }
   2935 }
   2936 
   2937 void MacroAssembler::wasmTruncateDoubleToUInt64(
   2938    FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry,
   2939    Label* oolRejoin, FloatRegister tempDouble) {
   2940  MOZ_ASSERT(tempDouble.isInvalid());
   2941  Register output = output_.reg;
   2942 
   2943  Label done;
   2944 
   2945  UseScratchRegisterScope temps(*this);
   2946  Register scratch = temps.Acquire();
   2947 
   2948  // Guard against NaN.
   2949  branchDouble(Assembler::DoubleUnordered, input, input, oolEntry);
   2950 
   2951  as_truncld(ScratchDoubleReg, input);
   2952  // ma_li INT64_MAX
   2953  ma_li(scratch, Imm32(-1));
   2954  ma_dext(scratch, scratch, Imm32(0), Imm32(63));
   2955  moveFromDouble(ScratchDoubleReg, output);
   2956  // For numbers in  -1.[ : ]INT64_MAX range do nothing more
   2957  ma_b(output, scratch, &done, Assembler::Below, ShortJump);
   2958 
   2959  loadConstantDouble(double(INT64_MAX + 1ULL), ScratchDoubleReg);
   2960  {
   2961    UseScratchRegisterScope temps(*this);
   2962    Register scratch2 = temps.Acquire();
   2963    // ma_li INT64_MIN
   2964    ma_daddu(scratch2, scratch, Imm32(1));
   2965    as_subd(ScratchDoubleReg, input, ScratchDoubleReg);
   2966    as_truncld(ScratchDoubleReg, ScratchDoubleReg);
   2967    as_cfc1(scratch, Assembler::FCSR);
   2968    moveFromDouble(ScratchDoubleReg, output);
   2969    ma_ext(scratch, scratch, Assembler::CauseV, 1);
   2970    ma_daddu(output, scratch2);
   2971 
   2972    // Guard against negative values that result in 0 due the precision loss.
   2973    as_sltiu(scratch2, output, 1);
   2974    ma_or(scratch, scratch2);
   2975  }
   2976  ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
   2977 
   2978  bind(&done);
   2979 
   2980  if (isSaturating) {
   2981    bind(oolRejoin);
   2982  }
   2983 }
   2984 
   2985 void MacroAssembler::wasmTruncateFloat32ToInt64(
   2986    FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
   2987    Label* oolRejoin, FloatRegister tempFloat) {
   2988  MOZ_ASSERT(tempFloat.isInvalid());
   2989  UseScratchRegisterScope temps(*this);
   2990  Register scratch = temps.Acquire();
   2991 
   2992  as_truncls(ScratchDoubleReg, input);
   2993  as_cfc1(scratch, Assembler::FCSR);
   2994  moveFromDouble(ScratchDoubleReg, output.reg);
   2995  ma_ext(scratch, scratch, Assembler::CauseV, 1);
   2996  ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
   2997 
   2998  if (isSaturating) {
   2999    bind(oolRejoin);
   3000  }
   3001 }
   3002 
   3003 void MacroAssembler::wasmTruncateFloat32ToUInt64(
   3004    FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry,
   3005    Label* oolRejoin, FloatRegister tempFloat) {
   3006  MOZ_ASSERT(tempFloat.isInvalid());
   3007  Register output = output_.reg;
   3008 
   3009  Label done;
   3010 
   3011  UseScratchRegisterScope temps(*this);
   3012  Register scratch = temps.Acquire();
   3013 
   3014  // Guard against NaN.
   3015  branchFloat(Assembler::DoubleUnordered, input, input, oolEntry);
   3016 
   3017  as_truncls(ScratchDoubleReg, input);
   3018  // ma_li INT64_MAX
   3019  ma_li(scratch, Imm32(-1));
   3020  ma_dext(scratch, scratch, Imm32(0), Imm32(63));
   3021  moveFromDouble(ScratchDoubleReg, output);
   3022  // For numbers in  -1.[ : ]INT64_MAX range do nothing more
   3023  ma_b(output, scratch, &done, Assembler::Below, ShortJump);
   3024 
   3025  loadConstantFloat32(float(INT64_MAX + 1ULL), ScratchFloat32Reg);
   3026  {
   3027    // ma_li INT64_MIN
   3028    UseScratchRegisterScope temps(*this);
   3029    Register scratch2 = temps.Acquire();
   3030    ma_daddu(scratch2, scratch, Imm32(1));
   3031    as_subs(ScratchFloat32Reg, input, ScratchFloat32Reg);
   3032    as_truncls(ScratchDoubleReg, ScratchFloat32Reg);
   3033    as_cfc1(scratch, Assembler::FCSR);
   3034    moveFromDouble(ScratchDoubleReg, output);
   3035    ma_ext(scratch, scratch, Assembler::CauseV, 1);
   3036    ma_daddu(output, scratch2);
   3037 
   3038    // Guard against negative values that result in 0 due the precision loss.
   3039    as_sltiu(scratch2, output, 1);
   3040    ma_or(scratch, scratch2);
   3041  }
   3042  ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
   3043 
   3044  bind(&done);
   3045 
   3046  if (isSaturating) {
   3047    bind(oolRejoin);
   3048  }
   3049 }
   3050 
   3051 void MacroAssemblerMIPS64Compat::wasmLoadI64Impl(
   3052    const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
   3053    Register ptrScratch, Register64 output, Register tmp) {
   3054  access.assertOffsetInGuardPages();
   3055  uint32_t offset = access.offset32();
   3056  MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
   3057 
   3058  MOZ_ASSERT(!access.isZeroExtendSimd128Load());
   3059  MOZ_ASSERT(!access.isSplatSimd128Load());
   3060  MOZ_ASSERT(!access.isWidenSimd128Load());
   3061 
   3062  // Maybe add the offset.
   3063  if (offset) {
   3064    asMasm().addPtr(ImmWord(offset), ptrScratch);
   3065    ptr = ptrScratch;
   3066  }
   3067 
   3068  unsigned byteSize = access.byteSize();
   3069  bool isSigned = Scalar::isSignedIntType(access.type());
   3070 
   3071  BaseIndex address(memoryBase, ptr, TimesOne);
   3072  if (IsUnaligned(access)) {
   3073    MOZ_ASSERT(tmp != InvalidReg);
   3074    asMasm().ma_load_unaligned(access, output.reg, address, tmp,
   3075                               static_cast<LoadStoreSize>(8 * byteSize),
   3076                               isSigned ? SignExtend : ZeroExtend);
   3077    return;
   3078  }
   3079 
   3080  asMasm().memoryBarrierBefore(access.sync());
   3081  FaultingCodeOffset fco = asMasm().ma_load(
   3082      output.reg, address, static_cast<LoadStoreSize>(8 * byteSize),
   3083      isSigned ? SignExtend : ZeroExtend);
   3084  asMasm().append(access,
   3085                  wasm::TrapMachineInsnForLoad(Scalar::byteSize(access.type())),
   3086                  fco);
   3087  asMasm().memoryBarrierAfter(access.sync());
   3088 }
   3089 
   3090 void MacroAssemblerMIPS64Compat::wasmStoreI64Impl(
   3091    const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
   3092    Register ptr, Register ptrScratch, Register tmp) {
   3093  access.assertOffsetInGuardPages();
   3094  uint32_t offset = access.offset32();
   3095  MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
   3096 
   3097  // Maybe add the offset.
   3098  if (offset) {
   3099    asMasm().addPtr(ImmWord(offset), ptrScratch);
   3100    ptr = ptrScratch;
   3101  }
   3102 
   3103  unsigned byteSize = access.byteSize();
   3104  bool isSigned = Scalar::isSignedIntType(access.type());
   3105 
   3106  BaseIndex address(memoryBase, ptr, TimesOne);
   3107 
   3108  if (IsUnaligned(access)) {
   3109    MOZ_ASSERT(tmp != InvalidReg);
   3110    asMasm().ma_store_unaligned(access, value.reg, address, tmp,
   3111                                static_cast<LoadStoreSize>(8 * byteSize),
   3112                                isSigned ? SignExtend : ZeroExtend);
   3113    return;
   3114  }
   3115 
   3116  asMasm().memoryBarrierBefore(access.sync());
   3117  FaultingCodeOffset fco = asMasm().ma_store(
   3118      value.reg, address, static_cast<LoadStoreSize>(8 * byteSize),
   3119      isSigned ? SignExtend : ZeroExtend);
   3120  asMasm().append(
   3121      access, wasm::TrapMachineInsnForStore(Scalar::byteSize(access.type())),
   3122      fco);
   3123  asMasm().memoryBarrierAfter(access.sync());
   3124 }
   3125 
   3126 template <typename T>
   3127 static void CompareExchange64(MacroAssembler& masm,
   3128                              const wasm::MemoryAccessDesc* access,
   3129                              Synchronization sync, const T& mem,
   3130                              Register64 expect, Register64 replace,
   3131                              Register64 output) {
   3132  MOZ_ASSERT(expect != output && replace != output);
   3133 
   3134  UseScratchRegisterScope temps(masm);
   3135  Register scratch2 = temps.Acquire();
   3136 
   3137  masm.computeEffectiveAddress(mem, scratch2);
   3138 
   3139  Label tryAgain;
   3140  Label exit;
   3141 
   3142  masm.memoryBarrierBefore(sync);
   3143 
   3144  masm.bind(&tryAgain);
   3145 
   3146  if (access) {
   3147    masm.append(*access, wasm::TrapMachineInsn::Load64,
   3148                FaultingCodeOffset(masm.currentOffset()));
   3149  }
   3150 
   3151  Register scratch = temps.Acquire();
   3152  masm.as_lld(output.reg, scratch2, 0);
   3153  masm.ma_b(output.reg, expect.reg, &exit, Assembler::NotEqual, ShortJump);
   3154  masm.movePtr(replace.reg, scratch);
   3155  masm.as_scd(scratch, scratch2, 0);
   3156  masm.ma_b(scratch, scratch, &tryAgain, Assembler::Zero, ShortJump);
   3157 
   3158  masm.memoryBarrierAfter(sync);
   3159 
   3160  masm.bind(&exit);
   3161 }
   3162 
   3163 void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
   3164                                           const Address& mem,
   3165                                           Register64 expect,
   3166                                           Register64 replace,
   3167                                           Register64 output) {
   3168  CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
   3169                    output);
   3170 }
   3171 
   3172 void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
   3173                                           const BaseIndex& mem,
   3174                                           Register64 expect,
   3175                                           Register64 replace,
   3176                                           Register64 output) {
   3177  CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
   3178                    output);
   3179 }
   3180 
   3181 void MacroAssembler::compareExchange64(Synchronization sync, const Address& mem,
   3182                                       Register64 expect, Register64 replace,
   3183                                       Register64 output) {
   3184  CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
   3185 }
   3186 
   3187 void MacroAssembler::compareExchange64(Synchronization sync,
   3188                                       const BaseIndex& mem, Register64 expect,
   3189                                       Register64 replace, Register64 output) {
   3190  CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
   3191 }
   3192 
   3193 template <typename T>
   3194 static void AtomicExchange64(MacroAssembler& masm,
   3195                             const wasm::MemoryAccessDesc* access,
   3196                             Synchronization sync, const T& mem,
   3197                             Register64 value, Register64 output) {
   3198  MOZ_ASSERT(value != output);
   3199 
   3200  UseScratchRegisterScope temps(masm);
   3201  Register scratch2 = temps.Acquire();
   3202 
   3203  masm.computeEffectiveAddress(mem, scratch2);
   3204 
   3205  Label tryAgain;
   3206 
   3207  masm.memoryBarrierBefore(sync);
   3208 
   3209  masm.bind(&tryAgain);
   3210 
   3211  if (access) {
   3212    masm.append(*access, wasm::TrapMachineInsn::Load64,
   3213                FaultingCodeOffset(masm.currentOffset()));
   3214  }
   3215 
   3216  Register scratch = temps.Acquire();
   3217  masm.as_lld(output.reg, scratch2, 0);
   3218  masm.movePtr(value.reg, scratch);
   3219  masm.as_scd(scratch, scratch2, 0);
   3220  masm.ma_b(scratch, scratch, &tryAgain, Assembler::Zero, ShortJump);
   3221 
   3222  masm.memoryBarrierAfter(sync);
   3223 }
   3224 
   3225 template <typename T>
   3226 static void WasmAtomicExchange64(MacroAssembler& masm,
   3227                                 const wasm::MemoryAccessDesc& access,
   3228                                 const T& mem, Register64 value,
   3229                                 Register64 output) {
   3230  AtomicExchange64(masm, &access, access.sync(), mem, value, output);
   3231 }
   3232 
   3233 void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
   3234                                          const Address& mem, Register64 src,
   3235                                          Register64 output) {
   3236  WasmAtomicExchange64(*this, access, mem, src, output);
   3237 }
   3238 
   3239 void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
   3240                                          const BaseIndex& mem, Register64 src,
   3241                                          Register64 output) {
   3242  WasmAtomicExchange64(*this, access, mem, src, output);
   3243 }
   3244 
   3245 void MacroAssembler::atomicExchange64(Synchronization sync, const Address& mem,
   3246                                      Register64 value, Register64 output) {
   3247  AtomicExchange64(*this, nullptr, sync, mem, value, output);
   3248 }
   3249 
   3250 void MacroAssembler::atomicExchange64(Synchronization sync,
   3251                                      const BaseIndex& mem, Register64 value,
   3252                                      Register64 output) {
   3253  AtomicExchange64(*this, nullptr, sync, mem, value, output);
   3254 }
   3255 
   3256 template <typename T>
   3257 static void AtomicFetchOp64(MacroAssembler& masm,
   3258                            const wasm::MemoryAccessDesc* access,
   3259                            Synchronization sync, AtomicOp op, Register64 value,
   3260                            const T& mem, Register64 temp, Register64 output) {
   3261  MOZ_ASSERT(value != output);
   3262  MOZ_ASSERT(value != temp);
   3263 
   3264  UseScratchRegisterScope temps(masm);
   3265  Register scratch2 = temps.Acquire();
   3266 
   3267  masm.computeEffectiveAddress(mem, scratch2);
   3268 
   3269  Label tryAgain;
   3270 
   3271  masm.memoryBarrierBefore(sync);
   3272 
   3273  masm.bind(&tryAgain);
   3274  if (access) {
   3275    masm.append(*access, wasm::TrapMachineInsn::Load64,
   3276                FaultingCodeOffset(masm.currentOffset()));
   3277  }
   3278 
   3279  masm.as_lld(output.reg, scratch2, 0);
   3280 
   3281  switch (op) {
   3282    case AtomicOp::Add:
   3283      masm.as_daddu(temp.reg, output.reg, value.reg);
   3284      break;
   3285    case AtomicOp::Sub:
   3286      masm.as_dsubu(temp.reg, output.reg, value.reg);
   3287      break;
   3288    case AtomicOp::And:
   3289      masm.as_and(temp.reg, output.reg, value.reg);
   3290      break;
   3291    case AtomicOp::Or:
   3292      masm.as_or(temp.reg, output.reg, value.reg);
   3293      break;
   3294    case AtomicOp::Xor:
   3295      masm.as_xor(temp.reg, output.reg, value.reg);
   3296      break;
   3297    default:
   3298      MOZ_CRASH();
   3299  }
   3300 
   3301  masm.as_scd(temp.reg, scratch2, 0);
   3302  masm.ma_b(temp.reg, temp.reg, &tryAgain, Assembler::Zero, ShortJump);
   3303 
   3304  masm.memoryBarrierAfter(sync);
   3305 }
   3306 
   3307 void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
   3308                                         AtomicOp op, Register64 value,
   3309                                         const Address& mem, Register64 temp,
   3310                                         Register64 output) {
   3311  AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
   3312 }
   3313 
   3314 void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
   3315                                         AtomicOp op, Register64 value,
   3316                                         const BaseIndex& mem, Register64 temp,
   3317                                         Register64 output) {
   3318  AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
   3319 }
   3320 
   3321 void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
   3322                                     Register64 value, const Address& mem,
   3323                                     Register64 temp, Register64 output) {
   3324  AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
   3325 }
   3326 
   3327 void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
   3328                                     Register64 value, const BaseIndex& mem,
   3329                                     Register64 temp, Register64 output) {
   3330  AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
   3331 }
   3332 
   3333 void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
   3334                                      Register64 value, const Address& mem,
   3335                                      Register64 temp) {
   3336  AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
   3337 }
   3338 
   3339 void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
   3340                                      Register64 value, const BaseIndex& mem,
   3341                                      Register64 temp) {
   3342  AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
   3343 }
   3344 
   3345 // ========================================================================
   3346 // Convert floating point.
   3347 
   3348 void MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest) {
   3349  as_dmtc1(src.reg, dest);
   3350  as_cvtdl(dest, dest);
   3351 }
   3352 
   3353 void MacroAssembler::convertInt64ToFloat32(Register64 src, FloatRegister dest) {
   3354  as_dmtc1(src.reg, dest);
   3355  as_cvtsl(dest, dest);
   3356 }
   3357 
   3358 bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
   3359 
   3360 void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
   3361                                           Register temp) {
   3362  MOZ_ASSERT(temp == Register::Invalid());
   3363  MacroAssemblerSpecific::convertUInt64ToDouble(src.reg, dest);
   3364 }
   3365 
   3366 void MacroAssembler::convertUInt64ToFloat32(Register64 src_, FloatRegister dest,
   3367                                            Register temp) {
   3368  MOZ_ASSERT(temp == Register::Invalid());
   3369 
   3370  UseScratchRegisterScope temps(*this);
   3371  Register scratch = temps.Acquire();
   3372  Register scratch2 = temps.Acquire();
   3373 
   3374  Register src = src_.reg;
   3375  Label positive, done;
   3376  ma_b(src, src, &positive, NotSigned, ShortJump);
   3377 
   3378  MOZ_ASSERT(src != scratch);
   3379  MOZ_ASSERT(src != scratch2);
   3380 
   3381  ma_and(scratch, src, Imm32(1));
   3382  ma_dsrl(scratch2, src, Imm32(1));
   3383  ma_or(scratch, scratch2);
   3384  as_dmtc1(scratch, dest);
   3385  as_cvtsl(dest, dest);
   3386  addFloat32(dest, dest);
   3387  ma_b(&done, ShortJump);
   3388 
   3389  bind(&positive);
   3390  as_dmtc1(src, dest);
   3391  as_cvtsl(dest, dest);
   3392 
   3393  bind(&done);
   3394 }
   3395 
   3396 void MacroAssembler::flexibleQuotientPtr(
   3397    Register lhs, Register rhs, Register dest, bool isUnsigned,
   3398    const LiveRegisterSet& volatileLiveRegs) {
   3399  quotient64(lhs, rhs, dest, isUnsigned);
   3400 }
   3401 
   3402 void MacroAssembler::flexibleRemainderPtr(
   3403    Register lhs, Register rhs, Register dest, bool isUnsigned,
   3404    const LiveRegisterSet& volatileLiveRegs) {
   3405  remainder64(lhs, rhs, dest, isUnsigned);
   3406 }
   3407 
   3408 void MacroAssembler::wasmMarkCallAsSlow() { mov(ra, ra); }
   3409 
   3410 const int32_t SlowCallMarker = 0x37ff0000;  // ori ra, ra, 0
   3411 
   3412 void MacroAssembler::wasmCheckSlowCallsite(Register ra_, Label* notSlow,
   3413                                           Register temp1, Register temp2) {
   3414  MOZ_ASSERT(ra_ != temp2);
   3415  load32(Address(ra_, 0), temp2);
   3416  branch32(Assembler::NotEqual, temp2, Imm32(SlowCallMarker), notSlow);
   3417 }
   3418 
   3419 CodeOffset MacroAssembler::wasmMarkedSlowCall(const wasm::CallSiteDesc& desc,
   3420                                              const Register reg) {
   3421  CodeOffset offset = call(desc, reg);
   3422  wasmMarkCallAsSlow();
   3423  return offset;
   3424 }
   3425 
   3426 //}}} check_macroassembler_style