tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

MacroAssembler-arm.h (57693B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef jit_arm_MacroAssembler_arm_h
      8 #define jit_arm_MacroAssembler_arm_h
      9 
     10 #include "mozilla/DebugOnly.h"
     11 
     12 #include "jit/arm/Assembler-arm.h"
     13 #include "jit/MoveResolver.h"
     14 #include "vm/BytecodeUtil.h"
     15 #include "wasm/WasmBuiltins.h"
     16 #include "wasm/WasmCodegenTypes.h"
     17 
     18 using js::wasm::FaultingCodeOffsetPair;
     19 
     20 namespace js {
     21 namespace jit {
     22 
     23 static Register CallReg = ip;
     24 static const int defaultShift = 3;
     25 static_assert(1 << defaultShift == sizeof(JS::Value));
     26 
     27 // See documentation for ScratchTagScope and ScratchTagScopeRelease in
     28 // MacroAssembler-x64.h.
     29 
     30 class ScratchTagScope {
     31  const ValueOperand& v_;
     32 
     33 public:
     34  ScratchTagScope(MacroAssembler&, const ValueOperand& v) : v_(v) {}
     35  operator Register() { return v_.typeReg(); }
     36  void release() {}
     37  void reacquire() {}
     38 };
     39 
     40 class ScratchTagScopeRelease {
     41 public:
     42  explicit ScratchTagScopeRelease(ScratchTagScope*) {}
     43 };
     44 
     45 // MacroAssemblerARM is inheriting form Assembler defined in
     46 // Assembler-arm.{h,cpp}
     47 class MacroAssemblerARM : public Assembler {
     48 private:
     49  // Perform a downcast. Should be removed by Bug 996602.
     50  MacroAssembler& asMasm();
     51  const MacroAssembler& asMasm() const;
     52 
     53 protected:
     54  // On ARM, some instructions require a second scratch register. This
     55  // register defaults to lr, since it's non-allocatable (as it can be
     56  // clobbered by some instructions). Allow the baseline compiler to override
     57  // this though, since baseline IC stubs rely on lr holding the return
     58  // address.
     59  Register secondScratchReg_;
     60 
     61 public:
     62  Register getSecondScratchReg() const { return secondScratchReg_; }
     63 
     64 public:
     65  // Higher level tag testing code.
     66  // TODO: Can probably remove the Operand versions.
     67  Operand ToPayload(Operand base) const {
     68    return Operand(Register::FromCode(base.base()), base.disp());
     69  }
     70  Address ToPayload(const Address& base) const { return base; }
     71  BaseIndex ToPayload(const BaseIndex& base) const { return base; }
     72 
     73 protected:
     74  Operand ToType(Operand base) const {
     75    return Operand(Register::FromCode(base.base()),
     76                   base.disp() + sizeof(void*));
     77  }
     78  Address ToType(const Address& base) const {
     79    return ToType(Operand(base)).toAddress();
     80  }
     81  BaseIndex ToType(const BaseIndex& base) const {
     82    return BaseIndex(base.base, base.index, base.scale,
     83                     base.offset + sizeof(void*));
     84  }
     85 
     86  Address ToPayloadAfterStackPush(const Address& base) const {
     87    // If we are based on StackPointer, pass over the type tag just pushed.
     88    if (base.base == StackPointer) {
     89      return Address(base.base, base.offset + sizeof(void*));
     90    }
     91    return ToPayload(base);
     92  }
     93 
     94 public:
     95  MacroAssemblerARM() : secondScratchReg_(lr) {}
     96 
     97  void setSecondScratchReg(Register reg) {
     98    MOZ_ASSERT(reg != ScratchRegister);
     99    secondScratchReg_ = reg;
    100  }
    101 
    102  void convertBoolToInt32(Register source, Register dest);
    103  void convertInt32ToDouble(Register src, FloatRegister dest);
    104  void convertInt32ToDouble(const Address& src, FloatRegister dest);
    105  void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest);
    106  void convertUInt32ToFloat32(Register src, FloatRegister dest);
    107  void convertUInt32ToDouble(Register src, FloatRegister dest);
    108  void convertDoubleToFloat32(FloatRegister src, FloatRegister dest,
    109                              Condition c = Always);
    110  void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
    111                            bool negativeZeroCheck = true);
    112  void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail,
    113                          bool negativeZeroCheck = true) {
    114    convertDoubleToInt32(src, dest, fail, negativeZeroCheck);
    115  }
    116  void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
    117                             bool negativeZeroCheck = true);
    118 
    119  void convertFloat32ToDouble(FloatRegister src, FloatRegister dest);
    120  void convertInt32ToFloat32(Register src, FloatRegister dest);
    121  void convertInt32ToFloat32(const Address& src, FloatRegister dest);
    122 
    123  void convertDoubleToFloat16(FloatRegister src, FloatRegister dest) {
    124    MOZ_CRASH("Not supported for this target");
    125  }
    126  void convertFloat16ToDouble(FloatRegister src, FloatRegister dest) {
    127    MOZ_CRASH("Not supported for this target");
    128  }
    129  void convertFloat32ToFloat16(FloatRegister src, FloatRegister dest);
    130  void convertFloat16ToFloat32(FloatRegister src, FloatRegister dest);
    131  void convertInt32ToFloat16(Register src, FloatRegister dest);
    132 
    133  void wasmTruncateToInt32(FloatRegister input, Register output,
    134                           MIRType fromType, bool isUnsigned, bool isSaturating,
    135                           Label* oolEntry);
    136  void outOfLineWasmTruncateToIntCheck(FloatRegister input, MIRType fromType,
    137                                       MIRType toType, TruncFlags flags,
    138                                       Label* rejoin,
    139                                       const wasm::TrapSiteDesc& trapSiteDesc);
    140 
    141  // Somewhat direct wrappers for the low-level assembler funcitons
    142  // bitops. Attempt to encode a virtual alu instruction using two real
    143  // instructions.
    144 private:
    145  bool alu_dbl(Register src1, Imm32 imm, Register dest, ALUOp op, SBit s,
    146               Condition c);
    147 
    148 public:
    149  void ma_alu(Register src1, Imm32 imm, Register dest,
    150              AutoRegisterScope& scratch, ALUOp op, SBit s = LeaveCC,
    151              Condition c = Always);
    152  void ma_alu(Register src1, Operand2 op2, Register dest, ALUOp op,
    153              SBit s = LeaveCC, Condition c = Always);
    154  void ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
    155              SBit s = LeaveCC, Condition c = Always);
    156  void ma_nop();
    157 
    158  BufferOffset ma_movPatchable(Imm32 imm, Register dest,
    159                               Assembler::Condition c);
    160  BufferOffset ma_movPatchable(ImmPtr imm, Register dest,
    161                               Assembler::Condition c);
    162 
    163  // To be used with Iter := InstructionIterator or BufferInstructionIterator.
    164  template <class Iter>
    165  static void ma_mov_patch(Imm32 imm, Register dest, Assembler::Condition c,
    166                           RelocStyle rs, Iter iter);
    167 
    168  // ALU based ops
    169  // mov
    170  void ma_mov(Register src, Register dest, SBit s = LeaveCC,
    171              Condition c = Always);
    172 
    173  void ma_mov(Imm32 imm, Register dest, Condition c = Always);
    174  void ma_mov(ImmWord imm, Register dest, Condition c = Always);
    175 
    176  void ma_mov(ImmGCPtr ptr, Register dest);
    177 
    178  // Shifts (just a move with a shifting op2)
    179  void ma_lsl(Imm32 shift, Register src, Register dst);
    180  void ma_lsr(Imm32 shift, Register src, Register dst);
    181  void ma_asr(Imm32 shift, Register src, Register dst);
    182  void ma_ror(Imm32 shift, Register src, Register dst);
    183  void ma_rol(Imm32 shift, Register src, Register dst);
    184 
    185  void ma_lsl(Register shift, Register src, Register dst);
    186  void ma_lsr(Register shift, Register src, Register dst);
    187  void ma_asr(Register shift, Register src, Register dst);
    188  void ma_ror(Register shift, Register src, Register dst);
    189  void ma_rol(Register shift, Register src, Register dst,
    190              AutoRegisterScope& scratch);
    191 
    192  // Move not (dest <- ~src)
    193  void ma_mvn(Register src1, Register dest, SBit s = LeaveCC,
    194              Condition c = Always);
    195 
    196  // Negate (dest <- -src) implemented as rsb dest, src, 0
    197  void ma_neg(Register src, Register dest, SBit s = LeaveCC,
    198              Condition c = Always);
    199 
    200  void ma_neg(Register64 src, Register64 dest);
    201 
    202  // And
    203  void ma_and(Register src, Register dest, SBit s = LeaveCC,
    204              Condition c = Always);
    205 
    206  void ma_and(Register src1, Register src2, Register dest, SBit s = LeaveCC,
    207              Condition c = Always);
    208 
    209  void ma_and(Imm32 imm, Register dest, AutoRegisterScope& scratch,
    210              SBit s = LeaveCC, Condition c = Always);
    211 
    212  void ma_and(Imm32 imm, Register src1, Register dest,
    213              AutoRegisterScope& scratch, SBit s = LeaveCC,
    214              Condition c = Always);
    215 
    216  // Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2)
    217  void ma_bic(Imm32 imm, Register dest, AutoRegisterScope& scratch,
    218              SBit s = LeaveCC, Condition c = Always);
    219 
    220  // Exclusive or
    221  void ma_eor(Register src, Register dest, SBit s = LeaveCC,
    222              Condition c = Always);
    223 
    224  void ma_eor(Register src1, Register src2, Register dest, SBit s = LeaveCC,
    225              Condition c = Always);
    226 
    227  void ma_eor(Imm32 imm, Register dest, AutoRegisterScope& scratch,
    228              SBit s = LeaveCC, Condition c = Always);
    229 
    230  void ma_eor(Imm32 imm, Register src1, Register dest,
    231              AutoRegisterScope& scratch, SBit s = LeaveCC,
    232              Condition c = Always);
    233 
    234  // Or
    235  void ma_orr(Register src, Register dest, SBit s = LeaveCC,
    236              Condition c = Always);
    237 
    238  void ma_orr(Register src1, Register src2, Register dest, SBit s = LeaveCC,
    239              Condition c = Always);
    240 
    241  void ma_orr(Imm32 imm, Register dest, AutoRegisterScope& scratch,
    242              SBit s = LeaveCC, Condition c = Always);
    243 
    244  void ma_orr(Imm32 imm, Register src1, Register dest,
    245              AutoRegisterScope& scratch, SBit s = LeaveCC,
    246              Condition c = Always);
    247 
    248  // Arithmetic based ops.
    249  // Add with carry:
    250  void ma_adc(Imm32 imm, Register dest, AutoRegisterScope& scratch,
    251              SBit s = LeaveCC, Condition c = Always);
    252  void ma_adc(Register src, Register dest, SBit s = LeaveCC,
    253              Condition c = Always);
    254  void ma_adc(Register src1, Register src2, Register dest, SBit s = LeaveCC,
    255              Condition c = Always);
    256  void ma_adc(Register src1, Imm32 op, Register dest,
    257              AutoRegisterScope& scratch, SBit s = LeaveCC,
    258              Condition c = Always);
    259 
    260  // Add:
    261  void ma_add(Imm32 imm, Register dest, AutoRegisterScope& scratch,
    262              SBit s = LeaveCC, Condition c = Always);
    263  void ma_add(Register src1, Register dest, SBit s = LeaveCC,
    264              Condition c = Always);
    265  void ma_add(Register src1, Register src2, Register dest, SBit s = LeaveCC,
    266              Condition c = Always);
    267  void ma_add(Register src1, Operand op, Register dest, SBit s = LeaveCC,
    268              Condition c = Always);
    269  void ma_add(Register src1, Imm32 op, Register dest,
    270              AutoRegisterScope& scratch, SBit s = LeaveCC,
    271              Condition c = Always);
    272 
    273  // Subtract with carry:
    274  void ma_sbc(Imm32 imm, Register dest, AutoRegisterScope& scratch,
    275              SBit s = LeaveCC, Condition c = Always);
    276  void ma_sbc(Register src1, Register dest, SBit s = LeaveCC,
    277              Condition c = Always);
    278  void ma_sbc(Register src1, Register src2, Register dest, SBit s = LeaveCC,
    279              Condition c = Always);
    280 
    281  // Subtract:
    282  void ma_sub(Imm32 imm, Register dest, AutoRegisterScope& scratch,
    283              SBit s = LeaveCC, Condition c = Always);
    284  void ma_sub(Register src1, Register dest, SBit s = LeaveCC,
    285              Condition c = Always);
    286  void ma_sub(Register src1, Register src2, Register dest, SBit s = LeaveCC,
    287              Condition c = Always);
    288  void ma_sub(Register src1, Operand op, Register dest, SBit s = LeaveCC,
    289              Condition c = Always);
    290  void ma_sub(Register src1, Imm32 op, Register dest,
    291              AutoRegisterScope& scratch, SBit s = LeaveCC,
    292              Condition c = Always);
    293 
    294  // Reverse subtract:
    295  void ma_rsb(Imm32 imm, Register dest, AutoRegisterScope& scratch,
    296              SBit s = LeaveCC, Condition c = Always);
    297  void ma_rsb(Register src1, Register dest, SBit s = LeaveCC,
    298              Condition c = Always);
    299  void ma_rsb(Register src1, Register src2, Register dest, SBit s = LeaveCC,
    300              Condition c = Always);
    301  void ma_rsb(Register src1, Imm32 op2, Register dest,
    302              AutoRegisterScope& scratch, SBit s = LeaveCC,
    303              Condition c = Always);
    304 
    305  // Reverse subtract with carry:
    306  void ma_rsc(Imm32 imm, Register dest, AutoRegisterScope& scratch,
    307              SBit s = LeaveCC, Condition c = Always);
    308  void ma_rsc(Register src1, Register dest, SBit s = LeaveCC,
    309              Condition c = Always);
    310  void ma_rsc(Register src1, Register src2, Register dest, SBit s = LeaveCC,
    311              Condition c = Always);
    312 
    313  // Compares/tests.
    314  // Compare negative (sets condition codes as src1 + src2 would):
    315  void ma_cmn(Register src1, Imm32 imm, AutoRegisterScope& scratch,
    316              Condition c = Always);
    317  void ma_cmn(Register src1, Register src2, Condition c = Always);
    318  void ma_cmn(Register src1, Operand op, Condition c = Always);
    319 
    320  // Compare (src - src2):
    321  void ma_cmp(Register src1, Imm32 imm, AutoRegisterScope& scratch,
    322              Condition c = Always);
    323  void ma_cmp(Register src1, ImmTag tag, Condition c = Always);
    324  void ma_cmp(Register src1, ImmWord ptr, AutoRegisterScope& scratch,
    325              Condition c = Always);
    326  void ma_cmp(Register src1, ImmGCPtr ptr, AutoRegisterScope& scratch,
    327              Condition c = Always);
    328  void ma_cmp(Register src1, Operand op, AutoRegisterScope& scratch,
    329              AutoRegisterScope& scratch2, Condition c = Always);
    330  void ma_cmp(Register src1, Register src2, Condition c = Always);
    331 
    332  // Test for equality, (src1 ^ src2):
    333  void ma_teq(Register src1, Imm32 imm, AutoRegisterScope& scratch,
    334              Condition c = Always);
    335  void ma_teq(Register src1, Register src2, Condition c = Always);
    336  void ma_teq(Register src1, Operand op, Condition c = Always);
    337 
    338  // Test (src1 & src2):
    339  void ma_tst(Register src1, Imm32 imm, AutoRegisterScope& scratch,
    340              Condition c = Always);
    341  void ma_tst(Register src1, Register src2, Condition c = Always);
    342  void ma_tst(Register src1, Operand op, Condition c = Always);
    343 
    344  // Multiplies. For now, there are only two that we care about.
    345  void ma_mul(Register src1, Register src2, Register dest);
    346  void ma_mul(Register src1, Imm32 imm, Register dest,
    347              AutoRegisterScope& scratch);
    348  Condition ma_check_mul(Register src1, Register src2, Register dest,
    349                         AutoRegisterScope& scratch, Condition cond);
    350  Condition ma_check_mul(Register src1, Imm32 imm, Register dest,
    351                         AutoRegisterScope& scratch, Condition cond);
    352 
    353  void ma_umull(Register src1, Imm32 imm, Register destHigh, Register destLow,
    354                AutoRegisterScope& scratch);
    355  void ma_umull(Register src1, Register src2, Register destHigh,
    356                Register destLow);
    357 
    358  // Fast mod, uses scratch registers, and thus needs to be in the assembler
    359  // implicitly assumes that we can overwrite dest at the beginning of the
    360  // sequence.
    361  void ma_mod_mask(Register src, Register dest, Register hold, Register tmp,
    362                   AutoRegisterScope& scratch, AutoRegisterScope& scratch2,
    363                   int32_t shift);
    364 
    365  // Mod - depends on integer divide instructions being supported.
    366  void ma_smod(Register num, Register div, Register dest,
    367               AutoRegisterScope& scratch);
    368  void ma_umod(Register num, Register div, Register dest,
    369               AutoRegisterScope& scratch);
    370 
    371  // Division - depends on integer divide instructions being supported.
    372  void ma_sdiv(Register num, Register div, Register dest,
    373               Condition cond = Always);
    374  void ma_udiv(Register num, Register div, Register dest,
    375               Condition cond = Always);
    376  // Misc operations
    377  void ma_clz(Register src, Register dest, Condition cond = Always);
    378  void ma_ctz(Register src, Register dest, AutoRegisterScope& scratch);
    379  // Memory:
    380  // Shortcut for when we know we're transferring 32 bits of data.
    381  void ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt,
    382              AutoRegisterScope& scratch, Index mode = Offset,
    383              Condition cc = Always);
    384  FaultingCodeOffset ma_dtr(LoadStore ls, Register rt, const Address& addr,
    385                            AutoRegisterScope& scratch, Index mode,
    386                            Condition cc);
    387 
    388  FaultingCodeOffset ma_str(Register rt, DTRAddr addr, Index mode = Offset,
    389                            Condition cc = Always);
    390  FaultingCodeOffset ma_str(Register rt, const Address& addr,
    391                            AutoRegisterScope& scratch, Index mode = Offset,
    392                            Condition cc = Always);
    393 
    394  FaultingCodeOffset ma_ldr(DTRAddr addr, Register rt, Index mode = Offset,
    395                            Condition cc = Always);
    396  FaultingCodeOffset ma_ldr(const Address& addr, Register rt,
    397                            AutoRegisterScope& scratch, Index mode = Offset,
    398                            Condition cc = Always);
    399 
    400  FaultingCodeOffset ma_ldrb(DTRAddr addr, Register rt, Index mode = Offset,
    401                             Condition cc = Always);
    402  FaultingCodeOffset ma_ldrh(EDtrAddr addr, Register rt, Index mode = Offset,
    403                             Condition cc = Always);
    404  FaultingCodeOffset ma_ldrsh(EDtrAddr addr, Register rt, Index mode = Offset,
    405                              Condition cc = Always);
    406  FaultingCodeOffset ma_ldrsb(EDtrAddr addr, Register rt, Index mode = Offset,
    407                              Condition cc = Always);
    408  void ma_ldrd(EDtrAddr addr, Register rt, mozilla::DebugOnly<Register> rt2,
    409               Index mode = Offset, Condition cc = Always);
    410  FaultingCodeOffset ma_strb(Register rt, DTRAddr addr, Index mode = Offset,
    411                             Condition cc = Always);
    412  FaultingCodeOffset ma_strh(Register rt, EDtrAddr addr, Index mode = Offset,
    413                             Condition cc = Always);
    414  void ma_strd(Register rt, mozilla::DebugOnly<Register> rt2, EDtrAddr addr,
    415               Index mode = Offset, Condition cc = Always);
    416 
    417  // Specialty for moving N bits of data, where n == 8,16,32,64.
    418  BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
    419                                Register rn, Register rm, Register rt,
    420                                AutoRegisterScope& scratch, Index mode = Offset,
    421                                Condition cc = Always, Scale scale = TimesOne);
    422 
    423  BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
    424                                Register rn, Register rm, Register rt,
    425                                Index mode = Offset, Condition cc = Always);
    426 
    427  BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
    428                                Register rn, Imm32 offset, Register rt,
    429                                AutoRegisterScope& scratch, Index mode = Offset,
    430                                Condition cc = Always);
    431 
    432  BufferOffset ma_pop(Register r);
    433  void ma_popn_pc(Imm32 n, AutoRegisterScope& scratch,
    434                  AutoRegisterScope& scratch2);
    435  void ma_push(Register r);
    436  void ma_push_sp(Register r, AutoRegisterScope& scratch);
    437 
    438  void ma_vpop(VFPRegister r);
    439  void ma_vpush(VFPRegister r);
    440 
    441  // Barriers.
    442  void ma_dmb(BarrierOption option = BarrierSY);
    443  void ma_dsb(BarrierOption option = BarrierSY);
    444 
    445  // Branches when done from within arm-specific code.
    446  BufferOffset ma_b(Label* dest, Condition c = Always);
    447  void ma_b(void* target, Condition c = Always);
    448  void ma_bx(Register dest, Condition c = Always);
    449 
    450  // This is almost NEVER necessary, we'll basically never be calling a label
    451  // except, possibly in the crazy bailout-table case.
    452  void ma_bl(Label* dest, Condition c = Always);
    453 
    454  void ma_blx(Register dest, Condition c = Always);
    455 
    456  // VFP/ALU:
    457  void ma_vadd(FloatRegister src1, FloatRegister src2, FloatRegister dst);
    458  void ma_vsub(FloatRegister src1, FloatRegister src2, FloatRegister dst);
    459 
    460  void ma_vmul(FloatRegister src1, FloatRegister src2, FloatRegister dst);
    461  void ma_vdiv(FloatRegister src1, FloatRegister src2, FloatRegister dst);
    462 
    463  void ma_vneg(FloatRegister src, FloatRegister dest, Condition cc = Always);
    464  void ma_vmov(FloatRegister src, FloatRegister dest, Condition cc = Always);
    465  void ma_vmov_f32(FloatRegister src, FloatRegister dest,
    466                   Condition cc = Always);
    467  void ma_vabs(FloatRegister src, FloatRegister dest, Condition cc = Always);
    468  void ma_vabs_f32(FloatRegister src, FloatRegister dest,
    469                   Condition cc = Always);
    470 
    471  void ma_vsqrt(FloatRegister src, FloatRegister dest, Condition cc = Always);
    472  void ma_vsqrt_f32(FloatRegister src, FloatRegister dest,
    473                    Condition cc = Always);
    474 
    475  void ma_vimm(double value, FloatRegister dest, Condition cc = Always);
    476  void ma_vimm_f32(float value, FloatRegister dest, Condition cc = Always);
    477 
    478  void ma_vcmp(FloatRegister src1, FloatRegister src2, Condition cc = Always);
    479  void ma_vcmp_f32(FloatRegister src1, FloatRegister src2,
    480                   Condition cc = Always);
    481  void ma_vcmpz(FloatRegister src1, Condition cc = Always);
    482  void ma_vcmpz_f32(FloatRegister src1, Condition cc = Always);
    483 
    484  void ma_vadd_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
    485  void ma_vsub_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
    486 
    487  void ma_vmul_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
    488  void ma_vdiv_f32(FloatRegister src1, FloatRegister src2, FloatRegister dst);
    489 
    490  void ma_vneg_f32(FloatRegister src, FloatRegister dest,
    491                   Condition cc = Always);
    492 
    493  // Source is F64, dest is I32:
    494  void ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest,
    495                       Condition cc = Always);
    496  void ma_vcvt_F64_U32(FloatRegister src, FloatRegister dest,
    497                       Condition cc = Always);
    498 
    499  // Source is I32, dest is F64:
    500  void ma_vcvt_I32_F64(FloatRegister src, FloatRegister dest,
    501                       Condition cc = Always);
    502  void ma_vcvt_U32_F64(FloatRegister src, FloatRegister dest,
    503                       Condition cc = Always);
    504 
    505  // Source is F32, dest is I32:
    506  void ma_vcvt_F32_I32(FloatRegister src, FloatRegister dest,
    507                       Condition cc = Always);
    508  void ma_vcvt_F32_U32(FloatRegister src, FloatRegister dest,
    509                       Condition cc = Always);
    510 
    511  // Source is I32, dest is F32:
    512  void ma_vcvt_I32_F32(FloatRegister src, FloatRegister dest,
    513                       Condition cc = Always);
    514  void ma_vcvt_U32_F32(FloatRegister src, FloatRegister dest,
    515                       Condition cc = Always);
    516 
    517  // Transfer (do not coerce) a float into a gpr.
    518  void ma_vxfer(VFPRegister src, Register dest, Condition cc = Always);
    519  // Transfer (do not coerce) a double into a couple of gpr.
    520  void ma_vxfer(VFPRegister src, Register dest1, Register dest2,
    521                Condition cc = Always);
    522 
    523  // Transfer (do not coerce) a gpr into a float
    524  void ma_vxfer(Register src, FloatRegister dest, Condition cc = Always);
    525  // Transfer (do not coerce) a couple of gpr into a double
    526  void ma_vxfer(Register src1, Register src2, FloatRegister dest,
    527                Condition cc = Always);
    528 
    529  BufferOffset ma_vdtr(LoadStore ls, const Address& addr, VFPRegister dest,
    530                       AutoRegisterScope& scratch, Condition cc = Always);
    531 
    532  BufferOffset ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc = Always);
    533  BufferOffset ma_vldr(const Address& addr, VFPRegister dest,
    534                       AutoRegisterScope& scratch, Condition cc = Always);
    535  BufferOffset ma_vldr(VFPRegister src, Register base, Register index,
    536                       AutoRegisterScope& scratch, int32_t shift = defaultShift,
    537                       Condition cc = Always);
    538 
    539  BufferOffset ma_vstr(VFPRegister src, VFPAddr addr, Condition cc = Always);
    540  BufferOffset ma_vstr(VFPRegister src, const Address& addr,
    541                       AutoRegisterScope& scratch, Condition cc = Always);
    542  BufferOffset ma_vstr(VFPRegister src, Register base, Register index,
    543                       AutoRegisterScope& scratch, AutoRegisterScope& scratch2,
    544                       int32_t shift, int32_t offset, Condition cc = Always);
    545  BufferOffset ma_vstr(VFPRegister src, Register base, Register index,
    546                       AutoRegisterScope& scratch, int32_t shift,
    547                       Condition cc = Always);
    548 
    549  void ma_call(ImmPtr dest);
    550 
    551  // Float registers can only be loaded/stored in continuous runs when using
    552  // vstm/vldm. This function breaks set into continuous runs and loads/stores
    553  // them at [rm]. rm will be modified and left in a state logically suitable
    554  // for the next load/store. Returns the offset from [dm] for the logical
    555  // next load/store.
    556  int32_t transferMultipleByRuns(FloatRegisterSet set, LoadStore ls,
    557                                 Register rm, DTMMode mode) {
    558    if (mode == IA) {
    559      return transferMultipleByRunsImpl<FloatRegisterForwardIterator>(
    560          set, ls, rm, mode, 1);
    561    }
    562    if (mode == DB) {
    563      return transferMultipleByRunsImpl<FloatRegisterBackwardIterator>(
    564          set, ls, rm, mode, -1);
    565    }
    566    MOZ_CRASH("Invalid data transfer addressing mode");
    567  }
    568 
    569  // `outAny` is valid if and only if `out64` == Register64::Invalid().
    570  void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase,
    571                    Register ptr, Register ptrScratch, AnyRegister outAny,
    572                    Register64 out64);
    573 
    574  // `valAny` is valid if and only if `val64` == Register64::Invalid().
    575  void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister valAny,
    576                     Register64 val64, Register memoryBase, Register ptr,
    577                     Register ptrScratch);
    578 
    579 private:
    580  // Implementation for transferMultipleByRuns so we can use different
    581  // iterators for forward/backward traversals. The sign argument should be 1
    582  // if we traverse forwards, -1 if we traverse backwards.
    583  template <typename RegisterIterator>
    584  int32_t transferMultipleByRunsImpl(FloatRegisterSet set, LoadStore ls,
    585                                     Register rm, DTMMode mode, int32_t sign) {
    586    MOZ_ASSERT(sign == 1 || sign == -1);
    587 
    588    int32_t delta = sign * sizeof(float);
    589    int32_t offset = 0;
    590    // Build up a new set, which is the sum of all of the single and double
    591    // registers. This set can have up to 48 registers in it total
    592    // s0-s31 and d16-d31
    593    FloatRegisterSet mod = set.reduceSetForPush();
    594 
    595    RegisterIterator iter(mod);
    596    while (iter.more()) {
    597      startFloatTransferM(ls, rm, mode, WriteBack);
    598      int32_t reg = (*iter).code();
    599      do {
    600        offset += delta;
    601        if ((*iter).isDouble()) {
    602          offset += delta;
    603        }
    604        transferFloatReg(*iter);
    605      } while ((++iter).more() && int32_t((*iter).code()) == (reg += sign));
    606      finishFloatTransfer();
    607    }
    608    return offset;
    609  }
    610 };
    611 
    612 class MacroAssembler;
    613 
    614 class MacroAssemblerARMCompat : public MacroAssemblerARM {
    615 private:
    616  // Perform a downcast. Should be removed by Bug 996602.
    617  MacroAssembler& asMasm();
    618  const MacroAssembler& asMasm() const;
    619 
    620 public:
    621  MacroAssemblerARMCompat() {}
    622 
    623 public:
    624  // Jumps + other functions that should be called from non-arm specific
    625  // code. Basically, an x86 front end on top of the ARM code.
    626  void j(Condition code, Label* dest) { as_b(dest, code); }
    627  void j(Label* dest) { as_b(dest, Always); }
    628 
    629  void mov(Register src, Register dest) { ma_mov(src, dest); }
    630  void mov(ImmWord imm, Register dest) { ma_mov(Imm32(imm.value), dest); }
    631  void mov(ImmPtr imm, Register dest) {
    632    mov(ImmWord(uintptr_t(imm.value)), dest);
    633  }
    634  void mov(CodeLabel* label, Register dest);
    635 
    636  void branch(JitCode* c) {
    637    BufferOffset bo = m_buffer.nextOffset();
    638    addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
    639    ScratchRegisterScope scratch(asMasm());
    640    ma_movPatchable(ImmPtr(c->raw()), scratch, Always);
    641    ma_bx(scratch);
    642  }
    643  void branch(const Register reg) { ma_bx(reg); }
    644  void nop() { ma_nop(); }
    645  void shortJumpSizedNop() { ma_nop(); }
    646  BufferOffset ret() { return ma_pop(pc); }
    647  void retn(Imm32 n) {
    648    ScratchRegisterScope scratch(asMasm());
    649    SecondScratchRegisterScope scratch2(asMasm());
    650    ma_popn_pc(n, scratch, scratch2);
    651  }
    652  void push(Imm32 imm) {
    653    ScratchRegisterScope scratch(asMasm());
    654    ma_mov(imm, scratch);
    655    ma_push(scratch);
    656  }
    657  void push(ImmWord imm) { push(Imm32(imm.value)); }
    658  void push(ImmGCPtr imm) {
    659    ScratchRegisterScope scratch(asMasm());
    660    ma_mov(imm, scratch);
    661    ma_push(scratch);
    662  }
    663  void push(const Address& addr) {
    664    ScratchRegisterScope scratch(asMasm());
    665    SecondScratchRegisterScope scratch2(asMasm());
    666    ma_ldr(addr, scratch, scratch2);
    667    ma_push(scratch);
    668  }
    669  void push(Register reg) {
    670    if (reg == sp) {
    671      ScratchRegisterScope scratch(asMasm());
    672      ma_push_sp(reg, scratch);
    673    } else {
    674      ma_push(reg);
    675    }
    676  }
    677  void push(FloatRegister reg) {
    678    MOZ_ASSERT(reg.isFloat(), "simd128 not supported");
    679    ma_vpush(VFPRegister(reg));
    680  }
    681  void pushWithPadding(Register reg, const Imm32 extraSpace) {
    682    ScratchRegisterScope scratch(asMasm());
    683    Imm32 totSpace = Imm32(extraSpace.value + 4);
    684    ma_dtr(IsStore, sp, totSpace, reg, scratch, PreIndex);
    685  }
    686  void pushWithPadding(Imm32 imm, const Imm32 extraSpace) {
    687    ScratchRegisterScope scratch(asMasm());
    688    SecondScratchRegisterScope scratch2(asMasm());
    689    Imm32 totSpace = Imm32(extraSpace.value + 4);
    690    ma_mov(imm, scratch);
    691    ma_dtr(IsStore, sp, totSpace, scratch, scratch2, PreIndex);
    692  }
    693 
    694  void pop(Register reg) { ma_pop(reg); }
    695  void pop(FloatRegister reg) {
    696    MOZ_ASSERT(reg.isFloat(), "simd128 not supported");
    697    ma_vpop(VFPRegister(reg));
    698  }
    699 
    700  void popN(Register reg, Imm32 extraSpace) {
    701    ScratchRegisterScope scratch(asMasm());
    702    Imm32 totSpace = Imm32(extraSpace.value + 4);
    703    ma_dtr(IsLoad, sp, totSpace, reg, scratch, PostIndex);
    704  }
    705 
    706  CodeOffset toggledJump(Label* label);
    707 
    708  // Emit a BLX or NOP instruction. ToggleCall can be used to patch this
    709  // instruction.
    710  CodeOffset toggledCall(JitCode* target, bool enabled);
    711 
    712  CodeOffset pushWithPatch(ImmWord imm) {
    713    ScratchRegisterScope scratch(asMasm());
    714    CodeOffset label = movWithPatch(imm, scratch);
    715    ma_push(scratch);
    716    return label;
    717  }
    718 
    719  CodeOffset movWithPatch(ImmWord imm, Register dest) {
    720    CodeOffset label = CodeOffset(currentOffset());
    721    ma_movPatchable(Imm32(imm.value), dest, Always);
    722    return label;
    723  }
    724  CodeOffset movWithPatch(ImmPtr imm, Register dest) {
    725    return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
    726  }
    727 
    728  void jump(Label* label) { as_b(label); }
    729  void jump(JitCode* code) { branch(code); }
    730  void jump(ImmPtr ptr) {
    731    ScratchRegisterScope scratch(asMasm());
    732    movePtr(ptr, scratch);
    733    ma_bx(scratch);
    734  }
    735  void jump(TrampolinePtr code) { jump(ImmPtr(code.value)); }
    736  void jump(Register reg) { ma_bx(reg); }
    737  void jump(const Address& addr) {
    738    ScratchRegisterScope scratch(asMasm());
    739    SecondScratchRegisterScope scratch2(asMasm());
    740    ma_ldr(addr, scratch, scratch2);
    741    ma_bx(scratch);
    742  }
    743 
    744  void negl(Register reg) { ma_neg(reg, reg, SetCC); }
    745  void test32(Register lhs, Register rhs) { ma_tst(lhs, rhs); }
    746  void test32(Register lhs, Imm32 imm) {
    747    ScratchRegisterScope scratch(asMasm());
    748    ma_tst(lhs, imm, scratch);
    749  }
    750  void test32(const Address& addr, Imm32 imm) {
    751    ScratchRegisterScope scratch(asMasm());
    752    SecondScratchRegisterScope scratch2(asMasm());
    753    ma_ldr(addr, scratch, scratch2);
    754    ma_tst(scratch, imm, scratch2);
    755  }
    756  void testPtr(Register lhs, Register rhs) { test32(lhs, rhs); }
    757 
    758  void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
    759    MOZ_ASSERT(value.typeReg() == tag);
    760  }
    761 
    762  // Higher level tag testing code.
    763  Condition testInt32(Condition cond, const ValueOperand& value);
    764  Condition testBoolean(Condition cond, const ValueOperand& value);
    765  Condition testDouble(Condition cond, const ValueOperand& value);
    766  Condition testNull(Condition cond, const ValueOperand& value);
    767  Condition testUndefined(Condition cond, const ValueOperand& value);
    768  Condition testString(Condition cond, const ValueOperand& value);
    769  Condition testSymbol(Condition cond, const ValueOperand& value);
    770  Condition testBigInt(Condition cond, const ValueOperand& value);
    771  Condition testObject(Condition cond, const ValueOperand& value);
    772  Condition testNumber(Condition cond, const ValueOperand& value);
    773  Condition testMagic(Condition cond, const ValueOperand& value);
    774 
    775  Condition testPrimitive(Condition cond, const ValueOperand& value);
    776  Condition testGCThing(Condition cond, const ValueOperand& value);
    777 
    778  // Register-based tests.
    779  Condition testInt32(Condition cond, Register tag);
    780  Condition testBoolean(Condition cond, Register tag);
    781  Condition testNull(Condition cond, Register tag);
    782  Condition testUndefined(Condition cond, Register tag);
    783  Condition testString(Condition cond, Register tag);
    784  Condition testSymbol(Condition cond, Register tag);
    785  Condition testBigInt(Condition cond, Register tag);
    786  Condition testObject(Condition cond, Register tag);
    787  Condition testDouble(Condition cond, Register tag);
    788  Condition testNumber(Condition cond, Register tag);
    789  Condition testMagic(Condition cond, Register tag);
    790  Condition testPrimitive(Condition cond, Register tag);
    791  Condition testGCThing(Condition cond, Register tag);
    792 
    793  Condition testGCThing(Condition cond, const Address& address);
    794  Condition testMagic(Condition cond, const Address& address);
    795  Condition testInt32(Condition cond, const Address& address);
    796  Condition testDouble(Condition cond, const Address& address);
    797  Condition testBoolean(Condition cond, const Address& address);
    798  Condition testNull(Condition cond, const Address& address);
    799  Condition testUndefined(Condition cond, const Address& address);
    800  Condition testString(Condition cond, const Address& address);
    801  Condition testSymbol(Condition cond, const Address& address);
    802  Condition testBigInt(Condition cond, const Address& address);
    803  Condition testObject(Condition cond, const Address& address);
    804  Condition testNumber(Condition cond, const Address& address);
    805 
    806  Condition testUndefined(Condition cond, const BaseIndex& src);
    807  Condition testNull(Condition cond, const BaseIndex& src);
    808  Condition testBoolean(Condition cond, const BaseIndex& src);
    809  Condition testString(Condition cond, const BaseIndex& src);
    810  Condition testSymbol(Condition cond, const BaseIndex& src);
    811  Condition testBigInt(Condition cond, const BaseIndex& src);
    812  Condition testInt32(Condition cond, const BaseIndex& src);
    813  Condition testObject(Condition cond, const BaseIndex& src);
    814  Condition testDouble(Condition cond, const BaseIndex& src);
    815  Condition testMagic(Condition cond, const BaseIndex& src);
    816  Condition testGCThing(Condition cond, const BaseIndex& src);
    817 
    818  // Unboxing code.
    819  void unboxNonDouble(const ValueOperand& operand, Register dest,
    820                      JSValueType type);
    821  void unboxNonDouble(const Address& src, Register dest, JSValueType type);
    822  void unboxNonDouble(const BaseIndex& src, Register dest, JSValueType type);
    823  void unboxInt32(const ValueOperand& src, Register dest) {
    824    unboxNonDouble(src, dest, JSVAL_TYPE_INT32);
    825  }
    826  void unboxInt32(const Address& src, Register dest) {
    827    unboxNonDouble(src, dest, JSVAL_TYPE_INT32);
    828  }
    829  void unboxInt32(const BaseIndex& src, Register dest) {
    830    unboxNonDouble(src, dest, JSVAL_TYPE_INT32);
    831  }
    832  void unboxBoolean(const ValueOperand& src, Register dest) {
    833    unboxNonDouble(src, dest, JSVAL_TYPE_BOOLEAN);
    834  }
    835  void unboxBoolean(const Address& src, Register dest) {
    836    unboxNonDouble(src, dest, JSVAL_TYPE_BOOLEAN);
    837  }
    838  void unboxBoolean(const BaseIndex& src, Register dest) {
    839    unboxNonDouble(src, dest, JSVAL_TYPE_BOOLEAN);
    840  }
    841  void unboxString(const ValueOperand& src, Register dest) {
    842    unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
    843  }
    844  void unboxString(const Address& src, Register dest) {
    845    unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
    846  }
    847  void unboxSymbol(const ValueOperand& src, Register dest) {
    848    unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
    849  }
    850  void unboxSymbol(const Address& src, Register dest) {
    851    unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
    852  }
    853  void unboxBigInt(const ValueOperand& src, Register dest) {
    854    unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
    855  }
    856  void unboxBigInt(const Address& src, Register dest) {
    857    unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
    858  }
    859  void unboxObject(const ValueOperand& src, Register dest) {
    860    unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
    861  }
    862  void unboxObject(const Address& src, Register dest) {
    863    unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
    864  }
    865  void unboxObject(const BaseIndex& src, Register dest) {
    866    unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
    867  }
    868  void unboxDouble(const ValueOperand& src, FloatRegister dest);
    869  void unboxDouble(const Address& src, FloatRegister dest);
    870  void unboxDouble(const BaseIndex& src, FloatRegister dest);
    871 
    872  void unboxValue(const ValueOperand& src, AnyRegister dest, JSValueType type);
    873 
    874  // See comment in MacroAssembler-x64.h.
    875  void unboxGCThingForGCBarrier(const Address& src, Register dest) {
    876    load32(ToPayload(src), dest);
    877  }
    878  void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) {
    879    if (src.payloadReg() != dest) {
    880      ma_mov(src.payloadReg(), dest);
    881    }
    882  }
    883 
    884  void unboxWasmAnyRefGCThingForGCBarrier(const Address& src, Register dest) {
    885    load32(ToPayload(src), dest);
    886    {
    887      ScratchRegisterScope scratch(asMasm());
    888      ma_and(Imm32(wasm::AnyRef::GCThingMask), dest, scratch);
    889    }
    890  }
    891 
    892  void getWasmAnyRefGCThingChunk(Register src, Register dest) {
    893    ScratchRegisterScope scratch(asMasm());
    894    ma_and(Imm32(wasm::AnyRef::GCThingChunkMask), src, dest, scratch);
    895  }
    896 
    897  void notBoolean(const ValueOperand& val) {
    898    as_eor(val.payloadReg(), val.payloadReg(), Imm8(1));
    899  }
    900 
    901  template <typename T>
    902  void fallibleUnboxPtrImpl(const T& src, Register dest, JSValueType type,
    903                            Label* fail);
    904 
    905  // Boxing code.
    906  void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister);
    907  void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest);
    908  void boxNonDouble(Register type, Register src, const ValueOperand& dest);
    909 
    910  // Extended unboxing API. If the payload is already in a register, returns
    911  // that register. Otherwise, provides a move to the given scratch register,
    912  // and returns that.
    913  [[nodiscard]] Register extractObject(const Address& address,
    914                                       Register scratch);
    915  [[nodiscard]] Register extractObject(const ValueOperand& value,
    916                                       Register scratch) {
    917    unboxNonDouble(value, value.payloadReg(), JSVAL_TYPE_OBJECT);
    918    return value.payloadReg();
    919  }
    920  [[nodiscard]] Register extractSymbol(const ValueOperand& value,
    921                                       Register scratch) {
    922    unboxNonDouble(value, value.payloadReg(), JSVAL_TYPE_SYMBOL);
    923    return value.payloadReg();
    924  }
    925  [[nodiscard]] Register extractInt32(const ValueOperand& value,
    926                                      Register scratch) {
    927    return value.payloadReg();
    928  }
    929  [[nodiscard]] Register extractBoolean(const ValueOperand& value,
    930                                        Register scratch) {
    931    return value.payloadReg();
    932  }
    933  [[nodiscard]] Register extractTag(const Address& address, Register scratch);
    934  [[nodiscard]] Register extractTag(const BaseIndex& address, Register scratch);
    935  [[nodiscard]] Register extractTag(const ValueOperand& value,
    936                                    Register scratch) {
    937    return value.typeReg();
    938  }
    939 
    940  void loadInt32OrDouble(const Address& src, FloatRegister dest);
    941  void loadInt32OrDouble(Register base, Register index, FloatRegister dest,
    942                         int32_t shift = defaultShift);
    943  void loadConstantDouble(double dp, FloatRegister dest);
    944 
    945  // Treat the value as a boolean, and set condition codes accordingly.
    946  Condition testInt32Truthy(bool truthy, const ValueOperand& operand);
    947  Condition testBooleanTruthy(bool truthy, const ValueOperand& operand);
    948  Condition testDoubleTruthy(bool truthy, FloatRegister reg);
    949  Condition testStringTruthy(bool truthy, const ValueOperand& value);
    950  Condition testBigIntTruthy(bool truthy, const ValueOperand& value);
    951 
    952  void loadConstantFloat32(float f, FloatRegister dest);
    953 
    954  void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
    955    if (dest.isFloat()) {
    956      loadInt32OrDouble(address, dest.fpu());
    957    } else {
    958      ScratchRegisterScope scratch(asMasm());
    959      ma_ldr(address, dest.gpr(), scratch);
    960    }
    961  }
    962 
    963  void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
    964    if (dest.isFloat()) {
    965      loadInt32OrDouble(address.base, address.index, dest.fpu(), address.scale);
    966    } else {
    967      load32(address, dest.gpr());
    968    }
    969  }
    970 
    971  void storeValue(ValueOperand val, const Address& dst);
    972  void storeValue(ValueOperand val, const BaseIndex& dest);
    973  void storeValue(JSValueType type, Register reg, BaseIndex dest) {
    974    ScratchRegisterScope scratch(asMasm());
    975    SecondScratchRegisterScope scratch2(asMasm());
    976 
    977    int32_t payloadoffset = dest.offset + NUNBOX32_PAYLOAD_OFFSET;
    978    int32_t typeoffset = dest.offset + NUNBOX32_TYPE_OFFSET;
    979 
    980    ma_alu(dest.base, lsl(dest.index, dest.scale), scratch, OpAdd);
    981 
    982    // Store the payload.
    983    if (payloadoffset < 4096 && payloadoffset > -4096) {
    984      ma_str(reg, DTRAddr(scratch, DtrOffImm(payloadoffset)));
    985    } else {
    986      ma_str(reg, Address(scratch, payloadoffset), scratch2);
    987    }
    988 
    989    // Store the type.
    990    if (typeoffset < 4096 && typeoffset > -4096) {
    991      // Encodable as DTRAddr, so only two instructions needed.
    992      ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), scratch2);
    993      ma_str(scratch2, DTRAddr(scratch, DtrOffImm(typeoffset)));
    994    } else {
    995      // Since there are only two scratch registers, the offset must be
    996      // applied early using a third instruction to be safe.
    997      ma_add(Imm32(typeoffset), scratch, scratch2);
    998      ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), scratch2);
    999      ma_str(scratch2, DTRAddr(scratch, DtrOffImm(0)));
   1000    }
   1001  }
   1002  void storeValue(JSValueType type, Register reg, Address dest) {
   1003    ScratchRegisterScope scratch(asMasm());
   1004    SecondScratchRegisterScope scratch2(asMasm());
   1005 
   1006    ma_str(reg, dest, scratch2);
   1007    ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), scratch);
   1008    ma_str(scratch, Address(dest.base, dest.offset + NUNBOX32_TYPE_OFFSET),
   1009           scratch2);
   1010  }
   1011  void storeValue(const Value& val, const Address& dest) {
   1012    ScratchRegisterScope scratch(asMasm());
   1013    SecondScratchRegisterScope scratch2(asMasm());
   1014 
   1015    ma_mov(Imm32(val.toNunboxTag()), scratch);
   1016    ma_str(scratch, ToType(dest), scratch2);
   1017    if (val.isGCThing()) {
   1018      ma_mov(ImmGCPtr(val.toGCThing()), scratch);
   1019    } else {
   1020      ma_mov(Imm32(val.toNunboxPayload()), scratch);
   1021    }
   1022    ma_str(scratch, ToPayload(dest), scratch2);
   1023  }
   1024  void storeValue(const Value& val, BaseIndex dest) {
   1025    ScratchRegisterScope scratch(asMasm());
   1026    SecondScratchRegisterScope scratch2(asMasm());
   1027 
   1028    int32_t typeoffset = dest.offset + NUNBOX32_TYPE_OFFSET;
   1029    int32_t payloadoffset = dest.offset + NUNBOX32_PAYLOAD_OFFSET;
   1030 
   1031    ma_alu(dest.base, lsl(dest.index, dest.scale), scratch, OpAdd);
   1032 
   1033    // Store the type.
   1034    if (typeoffset < 4096 && typeoffset > -4096) {
   1035      ma_mov(Imm32(val.toNunboxTag()), scratch2);
   1036      ma_str(scratch2, DTRAddr(scratch, DtrOffImm(typeoffset)));
   1037    } else {
   1038      ma_add(Imm32(typeoffset), scratch, scratch2);
   1039      ma_mov(Imm32(val.toNunboxTag()), scratch2);
   1040      ma_str(scratch2, DTRAddr(scratch, DtrOffImm(0)));
   1041      // Restore scratch for the payload store.
   1042      ma_alu(dest.base, lsl(dest.index, dest.scale), scratch, OpAdd);
   1043    }
   1044 
   1045    // Store the payload, marking if necessary.
   1046    if (payloadoffset < 4096 && payloadoffset > -4096) {
   1047      if (val.isGCThing()) {
   1048        ma_mov(ImmGCPtr(val.toGCThing()), scratch2);
   1049      } else {
   1050        ma_mov(Imm32(val.toNunboxPayload()), scratch2);
   1051      }
   1052      ma_str(scratch2, DTRAddr(scratch, DtrOffImm(payloadoffset)));
   1053    } else {
   1054      ma_add(Imm32(payloadoffset), scratch, scratch2);
   1055      if (val.isGCThing()) {
   1056        ma_mov(ImmGCPtr(val.toGCThing()), scratch2);
   1057      } else {
   1058        ma_mov(Imm32(val.toNunboxPayload()), scratch2);
   1059      }
   1060      ma_str(scratch2, DTRAddr(scratch, DtrOffImm(0)));
   1061    }
   1062  }
   1063  void storeValue(const Address& src, const Address& dest, Register temp) {
   1064    load32(ToType(src), temp);
   1065    store32(temp, ToType(dest));
   1066 
   1067    load32(ToPayload(src), temp);
   1068    store32(temp, ToPayload(dest));
   1069  }
   1070 
   1071  void storePrivateValue(Register src, const Address& dest) {
   1072    store32(Imm32(0), ToType(dest));
   1073    store32(src, ToPayload(dest));
   1074  }
   1075  void storePrivateValue(ImmGCPtr imm, const Address& dest) {
   1076    store32(Imm32(0), ToType(dest));
   1077    storePtr(imm, ToPayload(dest));
   1078  }
   1079 
   1080  void loadValue(Address src, ValueOperand val);
   1081  void loadValue(Operand dest, ValueOperand val) {
   1082    loadValue(dest.toAddress(), val);
   1083  }
   1084  void loadValue(const BaseIndex& addr, ValueOperand val);
   1085 
   1086  // Like loadValue but guaranteed to not use LDRD or LDM instructions (these
   1087  // don't support unaligned accesses).
   1088  void loadUnalignedValue(const Address& src, ValueOperand dest);
   1089 
   1090  void tagValue(JSValueType type, Register payload, ValueOperand dest) {
   1091    boxNonDouble(type, payload, dest);
   1092  }
   1093 
   1094  void pushValue(ValueOperand val);
   1095  void popValue(ValueOperand val);
   1096  void pushValue(const Value& val) {
   1097    push(Imm32(val.toNunboxTag()));
   1098    if (val.isGCThing()) {
   1099      push(ImmGCPtr(val.toGCThing()));
   1100    } else {
   1101      push(Imm32(val.toNunboxPayload()));
   1102    }
   1103  }
   1104  void pushValue(JSValueType type, Register reg) {
   1105    push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
   1106    ma_push(reg);
   1107  }
   1108  void pushValue(const Address& addr);
   1109  void pushValue(const BaseIndex& addr, Register scratch);
   1110 
   1111  void storePayload(const Value& val, const Address& dest);
   1112  void storePayload(Register src, const Address& dest);
   1113  void storePayload(const Value& val, const BaseIndex& dest);
   1114  void storePayload(Register src, const BaseIndex& dest);
   1115  void storeTypeTag(ImmTag tag, const Address& dest);
   1116  void storeTypeTag(ImmTag tag, const BaseIndex& dest);
   1117 
   1118  void handleFailureWithHandlerTail(Label* profilerExitTail, Label* bailoutTail,
   1119                                    uint32_t* returnValueCheckOffset);
   1120 
   1121  /////////////////////////////////////////////////////////////////
   1122  // Common interface.
   1123  /////////////////////////////////////////////////////////////////
   1124 public:
   1125  void not32(Register reg);
   1126 
   1127  void move32(Imm32 imm, Register dest);
   1128  void move32(Register src, Register dest);
   1129 
   1130  void movePtr(Register src, Register dest);
   1131  void movePtr(ImmWord imm, Register dest);
   1132  void movePtr(ImmPtr imm, Register dest);
   1133  void movePtr(wasm::SymbolicAddress imm, Register dest);
   1134  void movePtr(ImmGCPtr imm, Register dest);
   1135 
   1136  FaultingCodeOffset load8SignExtend(const Address& address, Register dest);
   1137  FaultingCodeOffset load8SignExtend(const BaseIndex& src, Register dest);
   1138 
   1139  FaultingCodeOffset load8ZeroExtend(const Address& address, Register dest);
   1140  FaultingCodeOffset load8ZeroExtend(const BaseIndex& src, Register dest);
   1141 
   1142  FaultingCodeOffset load16SignExtend(const Address& address, Register dest);
   1143  FaultingCodeOffset load16SignExtend(const BaseIndex& src, Register dest);
   1144 
   1145  template <typename S>
   1146  void load16UnalignedSignExtend(const S& src, Register dest) {
   1147    // load16SignExtend uses |ldrsh|, which supports unaligned access.
   1148    load16SignExtend(src, dest);
   1149  }
   1150 
   1151  FaultingCodeOffset load16ZeroExtend(const Address& address, Register dest);
   1152  FaultingCodeOffset load16ZeroExtend(const BaseIndex& src, Register dest);
   1153 
   1154  template <typename S>
   1155  void load16UnalignedZeroExtend(const S& src, Register dest) {
   1156    // load16ZeroExtend uses |ldrh|, which supports unaligned access.
   1157    load16ZeroExtend(src, dest);
   1158  }
   1159 
   1160  FaultingCodeOffset load32(const Address& address, Register dest);
   1161  FaultingCodeOffset load32(const BaseIndex& address, Register dest);
   1162  void load32(AbsoluteAddress address, Register dest);
   1163 
   1164  template <typename S>
   1165  void load32Unaligned(const S& src, Register dest) {
   1166    // load32 uses |ldr|, which supports unaligned access.
   1167    load32(src, dest);
   1168  }
   1169 
   1170  FaultingCodeOffsetPair load64(const Address& address, Register64 dest) {
   1171    FaultingCodeOffset fco1, fco2;
   1172    bool highBeforeLow = address.base == dest.low;
   1173    if (highBeforeLow) {
   1174      fco1 = load32(HighWord(address), dest.high);
   1175      fco2 = load32(LowWord(address), dest.low);
   1176    } else {
   1177      fco1 = load32(LowWord(address), dest.low);
   1178      fco2 = load32(HighWord(address), dest.high);
   1179    }
   1180    return FaultingCodeOffsetPair(fco1, fco2);
   1181  }
   1182  FaultingCodeOffsetPair load64(const BaseIndex& address, Register64 dest) {
   1183    // If you run into this, relax your register allocation constraints.
   1184    MOZ_RELEASE_ASSERT(
   1185        !((address.base == dest.low || address.base == dest.high) &&
   1186          (address.index == dest.low || address.index == dest.high)));
   1187    FaultingCodeOffset fco1, fco2;
   1188    bool highBeforeLow = address.base == dest.low || address.index == dest.low;
   1189    if (highBeforeLow) {
   1190      fco1 = load32(HighWord(address), dest.high);
   1191      fco2 = load32(LowWord(address), dest.low);
   1192    } else {
   1193      fco1 = load32(LowWord(address), dest.low);
   1194      fco2 = load32(HighWord(address), dest.high);
   1195    }
   1196    return FaultingCodeOffsetPair(fco1, fco2);
   1197  }
   1198 
   1199  template <typename S>
   1200  void load64Unaligned(const S& src, Register64 dest) {
   1201    // load64 calls load32, which supports unaligned accesses.
   1202    load64(src, dest);
   1203  }
   1204 
   1205  FaultingCodeOffset loadPtr(const Address& address, Register dest);
   1206  FaultingCodeOffset loadPtr(const BaseIndex& src, Register dest);
   1207  void loadPtr(AbsoluteAddress address, Register dest);
   1208  void loadPtr(wasm::SymbolicAddress address, Register dest);
   1209 
   1210  void loadPrivate(const Address& address, Register dest);
   1211 
   1212  FaultingCodeOffset loadDouble(const Address& addr, FloatRegister dest);
   1213  FaultingCodeOffset loadDouble(const BaseIndex& src, FloatRegister dest);
   1214 
   1215  FaultingCodeOffset loadFloat32(const Address& addr, FloatRegister dest);
   1216  FaultingCodeOffset loadFloat32(const BaseIndex& src, FloatRegister dest);
   1217 
   1218  FaultingCodeOffset loadFloat16(const Address& addr, FloatRegister dest,
   1219                                 Register scratch);
   1220  FaultingCodeOffset loadFloat16(const BaseIndex& src, FloatRegister dest,
   1221                                 Register scratch);
   1222 
   1223  FaultingCodeOffset store8(Register src, const Address& address);
   1224  void store8(Imm32 imm, const Address& address);
   1225  FaultingCodeOffset store8(Register src, const BaseIndex& address);
   1226  void store8(Imm32 imm, const BaseIndex& address);
   1227 
   1228  FaultingCodeOffset store16(Register src, const Address& address);
   1229  void store16(Imm32 imm, const Address& address);
   1230  FaultingCodeOffset store16(Register src, const BaseIndex& address);
   1231  void store16(Imm32 imm, const BaseIndex& address);
   1232 
   1233  template <typename S, typename T>
   1234  void store16Unaligned(const S& src, const T& dest) {
   1235    // store16 uses |strh|, which supports unaligned access.
   1236    store16(src, dest);
   1237  }
   1238 
   1239  void store32(Register src, AbsoluteAddress address);
   1240  FaultingCodeOffset store32(Register src, const Address& address);
   1241  FaultingCodeOffset store32(Register src, const BaseIndex& address);
   1242  void store32(Imm32 src, const Address& address);
   1243  void store32(Imm32 src, const BaseIndex& address);
   1244 
   1245  template <typename S, typename T>
   1246  void store32Unaligned(const S& src, const T& dest) {
   1247    // store32 uses |str|, which supports unaligned access.
   1248    store32(src, dest);
   1249  }
   1250 
   1251  FaultingCodeOffsetPair store64(Register64 src, Address address) {
   1252    FaultingCodeOffset fco1 = store32(src.low, LowWord(address));
   1253    FaultingCodeOffset fco2 = store32(src.high, HighWord(address));
   1254    return FaultingCodeOffsetPair(fco1, fco2);
   1255  }
   1256 
   1257  FaultingCodeOffsetPair store64(Register64 src, const BaseIndex& address) {
   1258    FaultingCodeOffset fco1 = store32(src.low, LowWord(address));
   1259    FaultingCodeOffset fco2 = store32(src.high, HighWord(address));
   1260    return FaultingCodeOffsetPair(fco1, fco2);
   1261  }
   1262 
   1263  void store64(Imm64 imm, Address address) {
   1264    store32(imm.low(), LowWord(address));
   1265    store32(imm.hi(), HighWord(address));
   1266  }
   1267 
   1268  void store64(Imm64 imm, const BaseIndex& address) {
   1269    store32(imm.low(), LowWord(address));
   1270    store32(imm.hi(), HighWord(address));
   1271  }
   1272 
   1273  template <typename S, typename T>
   1274  void store64Unaligned(const S& src, const T& dest) {
   1275    // store64 calls store32, which supports unaligned access.
   1276    store64(src, dest);
   1277  }
   1278 
   1279  void storePtr(ImmWord imm, const Address& address);
   1280  void storePtr(ImmWord imm, const BaseIndex& address);
   1281  void storePtr(ImmPtr imm, const Address& address);
   1282  void storePtr(ImmPtr imm, const BaseIndex& address);
   1283  void storePtr(ImmGCPtr imm, const Address& address);
   1284  void storePtr(ImmGCPtr imm, const BaseIndex& address);
   1285  FaultingCodeOffset storePtr(Register src, const Address& address);
   1286  FaultingCodeOffset storePtr(Register src, const BaseIndex& address);
   1287  void storePtr(Register src, AbsoluteAddress dest);
   1288 
   1289  void moveDouble(FloatRegister src, FloatRegister dest,
   1290                  Condition cc = Always) {
   1291    ma_vmov(src, dest, cc);
   1292  }
   1293 
   1294  inline void incrementInt32Value(const Address& addr);
   1295 
   1296  void cmp32(Register lhs, Imm32 rhs);
   1297  void cmp32(Register lhs, Register rhs);
   1298  void cmp32(const Address& lhs, Imm32 rhs);
   1299  void cmp32(const Address& lhs, Register rhs);
   1300 
   1301  void cmpPtr(Register lhs, Register rhs);
   1302  void cmpPtr(Register lhs, ImmWord rhs);
   1303  void cmpPtr(Register lhs, ImmPtr rhs);
   1304  void cmpPtr(Register lhs, ImmGCPtr rhs);
   1305  void cmpPtr(Register lhs, Imm32 rhs);
   1306  void cmpPtr(const Address& lhs, Register rhs);
   1307  void cmpPtr(const Address& lhs, ImmWord rhs);
   1308  void cmpPtr(const Address& lhs, ImmPtr rhs);
   1309  void cmpPtr(const Address& lhs, ImmGCPtr rhs);
   1310  void cmpPtr(const Address& lhs, Imm32 rhs);
   1311 
   1312  template <typename T1, typename T2>
   1313  inline void cmp64SetAliased(Condition cond, T1 lhs, T2 rhs, Register dest);
   1314 
   1315  template <typename T1, typename T2>
   1316  inline void cmp64SetNonAliased(Condition cond, T1 lhs, T2 rhs, Register dest);
   1317 
   1318  template <typename T1, typename T2>
   1319  inline void branch64Impl(Condition cond, T1 lhs, T2 rhs, Label* success,
   1320                           Label* fail);
   1321 
   1322  void setStackArg(Register reg, uint32_t arg);
   1323 
   1324  void breakpoint();
   1325  // Conditional breakpoint.
   1326  void breakpoint(Condition cc);
   1327 
   1328  // Trigger the simulator's interactive read-eval-print loop.
   1329  // The message will be printed at the stopping point.
   1330  // (On non-simulator builds, does nothing.)
   1331  void simulatorStop(const char* msg);
   1332 
   1333  void minMax32(Register lhs, Register rhs, Register dest, bool isMax);
   1334  void minMax32(Register lhs, Imm32 rhs, Register dest, bool isMax);
   1335 
   1336  // Evaluate srcDest = minmax<isMax>{Float32,Double}(srcDest, other).
   1337  // Checks for NaN if canBeNaN is true.
   1338  void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool canBeNaN,
   1339                    bool isMax);
   1340  void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool canBeNaN,
   1341                     bool isMax);
   1342 
   1343  void compareDouble(FloatRegister lhs, FloatRegister rhs);
   1344 
   1345  void compareFloat(FloatRegister lhs, FloatRegister rhs);
   1346 
   1347  void checkStackAlignment();
   1348 
   1349  void emitSet(Assembler::Condition cond, Register dest) {
   1350    ma_mov(Imm32(0), dest);
   1351    ma_mov(Imm32(1), dest, cond);
   1352  }
   1353 
   1354  void testNullSet(Condition cond, const ValueOperand& value, Register dest) {
   1355    cond = testNull(cond, value);
   1356    emitSet(cond, dest);
   1357  }
   1358 
   1359  void testObjectSet(Condition cond, const ValueOperand& value, Register dest) {
   1360    cond = testObject(cond, value);
   1361    emitSet(cond, dest);
   1362  }
   1363 
   1364  void testUndefinedSet(Condition cond, const ValueOperand& value,
   1365                        Register dest) {
   1366    cond = testUndefined(cond, value);
   1367    emitSet(cond, dest);
   1368  }
   1369 
   1370 protected:
   1371  bool buildOOLFakeExitFrame(void* fakeReturnAddr);
   1372 
   1373 public:
   1374  void computeEffectiveAddress(const Address& address, Register dest) {
   1375    ScratchRegisterScope scratch(asMasm());
   1376    ma_add(address.base, Imm32(address.offset), dest, scratch, LeaveCC);
   1377  }
   1378  void computeEffectiveAddress(const BaseIndex& address, Register dest) {
   1379    ScratchRegisterScope scratch(asMasm());
   1380    ma_alu(address.base, lsl(address.index, address.scale), dest, OpAdd,
   1381           LeaveCC);
   1382    if (address.offset) {
   1383      ma_add(dest, Imm32(address.offset), dest, scratch, LeaveCC);
   1384    }
   1385  }
   1386  void floor(FloatRegister input, Register output, Label* handleNotAnInt);
   1387  void floorf(FloatRegister input, Register output, Label* handleNotAnInt);
   1388  void ceil(FloatRegister input, Register output, Label* handleNotAnInt);
   1389  void ceilf(FloatRegister input, Register output, Label* handleNotAnInt);
   1390  void round(FloatRegister input, Register output, Label* handleNotAnInt,
   1391             FloatRegister tmp);
   1392  void roundf(FloatRegister input, Register output, Label* handleNotAnInt,
   1393              FloatRegister tmp);
   1394  void trunc(FloatRegister input, Register output, Label* handleNotAnInt);
   1395  void truncf(FloatRegister input, Register output, Label* handleNotAnInt);
   1396 
   1397  void lea(Operand addr, Register dest) {
   1398    ScratchRegisterScope scratch(asMasm());
   1399    ma_add(addr.baseReg(), Imm32(addr.disp()), dest, scratch);
   1400  }
   1401 
   1402  void abiret() { as_bx(lr); }
   1403 
   1404  void moveFloat32(FloatRegister src, FloatRegister dest,
   1405                   Condition cc = Always) {
   1406    as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(),
   1407            cc);
   1408  }
   1409 
   1410  // Instrumentation for entering and leaving the profiler.
   1411  void profilerEnterFrame(Register framePtr, Register scratch);
   1412  void profilerExitFrame();
   1413 };
   1414 
   1415 using MacroAssemblerSpecific = MacroAssemblerARMCompat;
   1416 
   1417 }  // namespace jit
   1418 }  // namespace js
   1419 
   1420 #endif /* jit_arm_MacroAssembler_arm_h */