tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Assembler-arm.h (79616B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef jit_arm_Assembler_arm_h
      8 #define jit_arm_Assembler_arm_h
      9 
     10 #include "mozilla/Attributes.h"
     11 #include "mozilla/MathAlgorithms.h"
     12 
     13 #include <algorithm>
     14 #include <type_traits>
     15 
     16 #include "jit/arm/Architecture-arm.h"
     17 #include "jit/arm/disasm/Disasm-arm.h"
     18 #include "jit/CompactBuffer.h"
     19 #include "jit/JitCode.h"
     20 #include "jit/shared/Assembler-shared.h"
     21 #include "jit/shared/Disassembler-shared.h"
     22 #include "jit/shared/IonAssemblerBufferWithConstantPools.h"
     23 #include "wasm/WasmTypeDecls.h"
     24 
     25 union PoolHintPun;
     26 
     27 namespace js {
     28 namespace jit {
     29 
     30 using LiteralDoc = DisassemblerSpew::LiteralDoc;
     31 using LabelDoc = DisassemblerSpew::LabelDoc;
     32 
     33 // NOTE: there are duplicates in this list! Sometimes we want to specifically
     34 // refer to the link register as a link register (bl lr is much clearer than bl
     35 // r14). HOWEVER, this register can easily be a gpr when it is not busy holding
     36 // the return address.
     37 static constexpr Register r0{Registers::r0};
     38 static constexpr Register r1{Registers::r1};
     39 static constexpr Register r2{Registers::r2};
     40 static constexpr Register r3{Registers::r3};
     41 static constexpr Register r4{Registers::r4};
     42 static constexpr Register r5{Registers::r5};
     43 static constexpr Register r6{Registers::r6};
     44 static constexpr Register r7{Registers::r7};
     45 static constexpr Register r8{Registers::r8};
     46 static constexpr Register r9{Registers::r9};
     47 static constexpr Register r10{Registers::r10};
     48 static constexpr Register r11{Registers::r11};
     49 static constexpr Register r12{Registers::ip};
     50 static constexpr Register ip{Registers::ip};
     51 static constexpr Register sp{Registers::sp};
     52 static constexpr Register r14{Registers::lr};
     53 static constexpr Register lr{Registers::lr};
     54 static constexpr Register pc{Registers::pc};
     55 
     56 static constexpr Register ScratchRegister{Registers::ip};
     57 
     58 // Helper class for ScratchRegister usage. Asserts that only one piece
     59 // of code thinks it has exclusive ownership of the scratch register.
     60 struct ScratchRegisterScope : public AutoRegisterScope {
     61  explicit ScratchRegisterScope(MacroAssembler& masm)
     62      : AutoRegisterScope(masm, ScratchRegister) {}
     63 };
     64 
     65 struct SecondScratchRegisterScope : public AutoRegisterScope {
     66  explicit SecondScratchRegisterScope(MacroAssembler& masm);
     67 };
     68 
     69 class MOZ_RAII AutoNonDefaultSecondScratchRegister {
     70 public:
     71  explicit AutoNonDefaultSecondScratchRegister(MacroAssembler& masm,
     72                                               Register reg);
     73  ~AutoNonDefaultSecondScratchRegister();
     74 
     75 private:
     76  Register prevSecondScratch_;
     77  MacroAssembler& masm_;
     78 };
     79 
     80 static constexpr Register OsrFrameReg = r3;
     81 static constexpr Register CallTempReg0 = r5;
     82 static constexpr Register CallTempReg1 = r6;
     83 static constexpr Register CallTempReg2 = r7;
     84 static constexpr Register CallTempReg3 = r8;
     85 static constexpr Register CallTempReg4 = r0;
     86 static constexpr Register CallTempReg5 = r1;
     87 
     88 static constexpr Register IntArgReg0 = r0;
     89 static constexpr Register IntArgReg1 = r1;
     90 static constexpr Register IntArgReg2 = r2;
     91 static constexpr Register IntArgReg3 = r3;
     92 static constexpr Register CallTempNonArgRegs[] = {r5, r6, r7, r8};
     93 static const uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
     94 
     95 // These register assignments for the 64-bit atomic ops are frequently too
     96 // constraining, but we have no way of expressing looser constraints to the
     97 // register allocator.
     98 
     99 // CompareExchange: Any two odd/even pairs would do for `new` and `out`, and any
    100 // pair would do for `old`, so long as none of them overlap.
    101 
    102 static constexpr Register CmpXchgOldLo = r4;
    103 static constexpr Register CmpXchgOldHi = r5;
    104 static constexpr Register64 CmpXchgOld64 =
    105    Register64(CmpXchgOldHi, CmpXchgOldLo);
    106 static constexpr Register CmpXchgNewLo = IntArgReg2;
    107 static constexpr Register CmpXchgNewHi = IntArgReg3;
    108 static constexpr Register64 CmpXchgNew64 =
    109    Register64(CmpXchgNewHi, CmpXchgNewLo);
    110 static constexpr Register CmpXchgOutLo = IntArgReg0;
    111 static constexpr Register CmpXchgOutHi = IntArgReg1;
    112 static constexpr Register64 CmpXchgOut64 =
    113    Register64(CmpXchgOutHi, CmpXchgOutLo);
    114 
    115 // Exchange: Any two non-equal odd/even pairs would do for `new` and `out`.
    116 
    117 static constexpr Register XchgNewLo = IntArgReg2;
    118 static constexpr Register XchgNewHi = IntArgReg3;
    119 static constexpr Register64 XchgNew64 = Register64(XchgNewHi, XchgNewLo);
    120 static constexpr Register XchgOutLo = IntArgReg0;
    121 static constexpr Register XchgOutHi = IntArgReg1;
    122 
    123 // Atomic rmw operations: Any two odd/even pairs would do for `tmp` and `out`,
    124 // and any pair would do for `val`, so long as none of them overlap.
    125 
    126 static constexpr Register FetchOpValLo = r4;
    127 static constexpr Register FetchOpValHi = r5;
    128 static constexpr Register64 FetchOpVal64 =
    129    Register64(FetchOpValHi, FetchOpValLo);
    130 static constexpr Register FetchOpTmpLo = IntArgReg2;
    131 static constexpr Register FetchOpTmpHi = IntArgReg3;
    132 static constexpr Register64 FetchOpTmp64 =
    133    Register64(FetchOpTmpHi, FetchOpTmpLo);
    134 static constexpr Register FetchOpOutLo = IntArgReg0;
    135 static constexpr Register FetchOpOutHi = IntArgReg1;
    136 static constexpr Register64 FetchOpOut64 =
    137    Register64(FetchOpOutHi, FetchOpOutLo);
    138 
    139 class ABIArgGenerator : public ABIArgGeneratorShared {
    140  unsigned intRegIndex_;
    141  unsigned floatRegIndex_;
    142  ABIArg current_;
    143 
    144  // ARM can either use HardFp (use float registers for float arguments), or
    145  // SoftFp (use general registers for float arguments) ABI.  We keep this
    146  // switch as a runtime switch because wasm always use the HardFp back-end
    147  // while the calls to native functions have to use the one provided by the
    148  // system.
    149  bool useHardFp_;
    150 
    151  ABIArg softNext(MIRType argType);
    152  ABIArg hardNext(MIRType argType);
    153 
    154 public:
    155  explicit ABIArgGenerator(ABIKind kind);
    156 
    157  void setUseHardFp(bool useHardFp) {
    158    MOZ_ASSERT(intRegIndex_ == 0 && floatRegIndex_ == 0);
    159    MOZ_ASSERT_IF(kind_ == ABIKind::Wasm, useHardFp);
    160    useHardFp_ = useHardFp;
    161  }
    162  ABIArg next(MIRType argType);
    163  ABIArg& current() { return current_; }
    164 };
    165 
    166 bool IsUnaligned(const wasm::MemoryAccessDesc& access);
    167 
    168 // See "ABI special registers" in Assembler-shared.h for more information.
    169 static constexpr Register ABINonArgReg0 = r4;
    170 static constexpr Register ABINonArgReg1 = r5;
    171 static constexpr Register ABINonArgReg2 = r6;
    172 static constexpr Register ABINonArgReg3 = r7;
    173 
    174 // See "ABI special registers" in Assembler-shared.h for more information.
    175 // Avoid d15 which is the ScratchDoubleReg_.
    176 static constexpr FloatRegister ABINonArgDoubleReg{FloatRegisters::d8,
    177                                                  VFPRegister::Double};
    178 
    179 // See "ABI special registers" in Assembler-shared.h for more information.
    180 static constexpr Register ABINonArgReturnReg0 = r4;
    181 static constexpr Register ABINonArgReturnReg1 = r5;
    182 static constexpr Register ABINonVolatileReg = r6;
    183 
    184 // See "ABI special registers" in Assembler-shared.h for more information.
    185 static constexpr Register ABINonArgReturnVolatileReg = lr;
    186 
    187 // See "ABI special registers" in Assembler-shared.h, and "The WASM ABIs" in
    188 // WasmFrame.h for more information.
    189 static constexpr Register InstanceReg = r9;
    190 static constexpr Register HeapReg = r10;
    191 
    192 // Registers used for wasm table calls. These registers must be disjoint
    193 // from the ABI argument registers, InstanceReg and each other.
    194 static constexpr Register WasmTableCallScratchReg0 = ABINonArgReg0;
    195 static constexpr Register WasmTableCallScratchReg1 = ABINonArgReg1;
    196 static constexpr Register WasmTableCallSigReg = ABINonArgReg2;
    197 static constexpr Register WasmTableCallIndexReg = ABINonArgReg3;
    198 
    199 // Registers used for ref calls.
    200 static constexpr Register WasmCallRefCallScratchReg0 = ABINonArgReg0;
    201 static constexpr Register WasmCallRefCallScratchReg1 = ABINonArgReg1;
    202 static constexpr Register WasmCallRefCallScratchReg2 = ABINonArgReg2;
    203 static constexpr Register WasmCallRefReg = ABINonArgReg3;
    204 
    205 // Registers used for wasm tail calls operations.
    206 static constexpr Register WasmTailCallInstanceScratchReg = ABINonArgReg1;
    207 static constexpr Register WasmTailCallRAScratchReg = lr;
    208 static constexpr Register WasmTailCallFPScratchReg = ABINonArgReg3;
    209 
    210 // Register used as a scratch along the return path in the fast js -> wasm stub
    211 // code.  This must not overlap ReturnReg, JSReturnOperand, or InstanceReg.
    212 // It must be a volatile register.
    213 static constexpr Register WasmJitEntryReturnScratch = r5;
    214 
    215 static constexpr Register PreBarrierReg = r1;
    216 
    217 static constexpr Register InterpreterPCReg = r9;
    218 
    219 static constexpr Register InvalidReg{Registers::invalid_reg};
    220 static constexpr FloatRegister InvalidFloatReg;
    221 
    222 static constexpr Register JSReturnReg_Type = r3;
    223 static constexpr Register JSReturnReg_Data = r2;
    224 static constexpr Register StackPointer = sp;
    225 static constexpr Register FramePointer = r11;
    226 static constexpr Register ReturnReg = r0;
    227 static constexpr Register64 ReturnReg64(r1, r0);
    228 
    229 // The attribute '__value_in_regs' alters the calling convention of a function
    230 // so that a structure of up to four elements can be returned via the argument
    231 // registers rather than being written to memory.
    232 static constexpr Register ReturnRegVal0 = IntArgReg0;
    233 static constexpr Register ReturnRegVal1 = IntArgReg1;
    234 static constexpr Register ReturnRegVal2 = IntArgReg2;
    235 static constexpr Register ReturnRegVal3 = IntArgReg3;
    236 
    237 static constexpr FloatRegister ReturnFloat32Reg = {FloatRegisters::d0,
    238                                                   VFPRegister::Single};
    239 static constexpr FloatRegister ReturnDoubleReg = {FloatRegisters::d0,
    240                                                  VFPRegister::Double};
    241 static constexpr FloatRegister ReturnSimd128Reg = InvalidFloatReg;
    242 static constexpr FloatRegister ScratchFloat32Reg_ = {FloatRegisters::s30,
    243                                                     VFPRegister::Single};
    244 static constexpr FloatRegister ScratchDoubleReg_ = {FloatRegisters::d15,
    245                                                    VFPRegister::Double};
    246 static constexpr FloatRegister ScratchSimd128Reg = InvalidFloatReg;
    247 static constexpr FloatRegister ScratchUIntReg = {FloatRegisters::d15,
    248                                                 VFPRegister::UInt};
    249 static constexpr FloatRegister ScratchIntReg = {FloatRegisters::d15,
    250                                                VFPRegister::Int};
    251 
    252 // Do not reference ScratchFloat32Reg_ directly, use ScratchFloat32Scope
    253 // instead.
    254 struct ScratchFloat32Scope : public AutoFloatRegisterScope {
    255  explicit ScratchFloat32Scope(MacroAssembler& masm)
    256      : AutoFloatRegisterScope(masm, ScratchFloat32Reg_) {}
    257 };
    258 
    259 // Do not reference ScratchDoubleReg_ directly, use ScratchDoubleScope instead.
    260 struct ScratchDoubleScope : public AutoFloatRegisterScope {
    261  explicit ScratchDoubleScope(MacroAssembler& masm)
    262      : AutoFloatRegisterScope(masm, ScratchDoubleReg_) {}
    263 };
    264 
    265 // Registers used by RegExpMatcher and RegExpExecMatch stubs (do not use
    266 // JSReturnOperand).
    267 static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
    268 static constexpr Register RegExpMatcherStringReg = CallTempReg1;
    269 static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
    270 
    271 // Registers used by RegExpExecTest stub (do not use ReturnReg).
    272 static constexpr Register RegExpExecTestRegExpReg = CallTempReg0;
    273 static constexpr Register RegExpExecTestStringReg = CallTempReg1;
    274 
    275 // Registers used by RegExpSearcher stub (do not use ReturnReg).
    276 static constexpr Register RegExpSearcherRegExpReg = CallTempReg0;
    277 static constexpr Register RegExpSearcherStringReg = CallTempReg1;
    278 static constexpr Register RegExpSearcherLastIndexReg = CallTempReg2;
    279 
    280 static constexpr FloatRegister d0 = {FloatRegisters::d0, VFPRegister::Double};
    281 static constexpr FloatRegister d1 = {FloatRegisters::d1, VFPRegister::Double};
    282 static constexpr FloatRegister d2 = {FloatRegisters::d2, VFPRegister::Double};
    283 static constexpr FloatRegister d3 = {FloatRegisters::d3, VFPRegister::Double};
    284 static constexpr FloatRegister d4 = {FloatRegisters::d4, VFPRegister::Double};
    285 static constexpr FloatRegister d5 = {FloatRegisters::d5, VFPRegister::Double};
    286 static constexpr FloatRegister d6 = {FloatRegisters::d6, VFPRegister::Double};
    287 static constexpr FloatRegister d7 = {FloatRegisters::d7, VFPRegister::Double};
    288 static constexpr FloatRegister d8 = {FloatRegisters::d8, VFPRegister::Double};
    289 static constexpr FloatRegister d9 = {FloatRegisters::d9, VFPRegister::Double};
    290 static constexpr FloatRegister d10 = {FloatRegisters::d10, VFPRegister::Double};
    291 static constexpr FloatRegister d11 = {FloatRegisters::d11, VFPRegister::Double};
    292 static constexpr FloatRegister d12 = {FloatRegisters::d12, VFPRegister::Double};
    293 static constexpr FloatRegister d13 = {FloatRegisters::d13, VFPRegister::Double};
    294 static constexpr FloatRegister d14 = {FloatRegisters::d14, VFPRegister::Double};
    295 static constexpr FloatRegister d15 = {FloatRegisters::d15, VFPRegister::Double};
    296 
    297 // For maximal awesomeness, 8 should be sufficent. ldrd/strd (dual-register
    298 // load/store) operate in a single cycle when the address they are dealing with
    299 // is 8 byte aligned. Also, the ARM abi wants the stack to be 8 byte aligned at
    300 // function boundaries. I'm trying to make sure this is always true.
    301 static constexpr uint32_t ABIStackAlignment = 8;
    302 static constexpr uint32_t CodeAlignment = 8;
    303 static constexpr uint32_t JitStackAlignment = 8;
    304 
    305 static constexpr uint32_t JitStackValueAlignment =
    306    JitStackAlignment / sizeof(Value);
    307 static_assert(JitStackAlignment % sizeof(Value) == 0 &&
    308                  JitStackValueAlignment >= 1,
    309              "Stack alignment should be a non-zero multiple of sizeof(Value)");
    310 
    311 static constexpr uint32_t SimdMemoryAlignment = 8;
    312 
    313 static_assert(CodeAlignment % SimdMemoryAlignment == 0,
    314              "Code alignment should be larger than any of the alignments "
    315              "which are used for "
    316              "the constant sections of the code buffer.  Thus it should be "
    317              "larger than the "
    318              "alignment for SIMD constants.");
    319 
    320 static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
    321              "Stack alignment should be larger than any of the alignments "
    322              "which are used for "
    323              "spilled values.  Thus it should be larger than the alignment "
    324              "for SIMD accesses.");
    325 
    326 static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
    327 static const uint32_t WasmTrapInstructionLength = 4;
    328 
    329 // See comments in wasm::GenerateFunctionPrologue.  The difference between these
    330 // is the size of the largest callable prologue on the platform.
    331 static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
    332 
    333 static const Scale ScalePointer = TimesFour;
    334 
    335 class Instruction;
    336 class InstBranchImm;
    337 uint32_t RM(Register r);
    338 uint32_t RS(Register r);
    339 uint32_t RD(Register r);
    340 uint32_t RT(Register r);
    341 uint32_t RN(Register r);
    342 
    343 uint32_t maybeRD(Register r);
    344 uint32_t maybeRT(Register r);
    345 uint32_t maybeRN(Register r);
    346 
    347 Register toRN(Instruction i);
    348 Register toRM(Instruction i);
    349 Register toRD(Instruction i);
    350 Register toR(Instruction i);
    351 
    352 class VFPRegister;
    353 uint32_t VD(VFPRegister vr);
    354 uint32_t VN(VFPRegister vr);
    355 uint32_t VM(VFPRegister vr);
    356 
    357 // For being passed into the generic vfp instruction generator when there is an
    358 // instruction that only takes two registers.
    359 static constexpr VFPRegister NoVFPRegister(VFPRegister::Double, 0, false, true);
    360 
    361 struct ImmTag : public Imm32 {
    362  explicit ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {}
    363 };
    364 
    365 struct ImmType : public ImmTag {
    366  explicit ImmType(JSValueType type) : ImmTag(JSVAL_TYPE_TO_TAG(type)) {}
    367 };
    368 
    369 enum Index {
    370  Offset = 0 << 21 | 1 << 24,
    371  PreIndex = 1 << 21 | 1 << 24,
    372  PostIndex = 0 << 21 | 0 << 24
    373  // The docs were rather unclear on this. It sounds like
    374  // 1 << 21 | 0 << 24 encodes dtrt.
    375 };
    376 
    377 enum IsImmOp2_ { IsImmOp2 = 1 << 25, IsNotImmOp2 = 0 << 25 };
    378 enum IsImmDTR_ { IsImmDTR = 0 << 25, IsNotImmDTR = 1 << 25 };
    379 // For the extra memory operations, ldrd, ldrsb, ldrh.
    380 enum IsImmEDTR_ { IsImmEDTR = 1 << 22, IsNotImmEDTR = 0 << 22 };
    381 
    382 enum ShiftType {
    383  LSL = 0,   // << 5
    384  LSR = 1,   // << 5
    385  ASR = 2,   // << 5
    386  ROR = 3,   // << 5
    387  RRX = ROR  // RRX is encoded as ROR with a 0 offset.
    388 };
    389 
    390 // Modes for STM/LDM. Names are the suffixes applied to the instruction.
    391 enum DTMMode {
    392  A = 0 << 24,  // empty / after
    393  B = 1 << 24,  // full / before
    394  D = 0 << 23,  // decrement
    395  I = 1 << 23,  // increment
    396  DA = D | A,
    397  DB = D | B,
    398  IA = I | A,
    399  IB = I | B
    400 };
    401 
    402 enum DTMWriteBack { WriteBack = 1 << 21, NoWriteBack = 0 << 21 };
    403 
    404 // Condition code updating mode.
    405 enum SBit {
    406  SetCC = 1 << 20,   // Set condition code.
    407  LeaveCC = 0 << 20  // Leave condition code unchanged.
    408 };
    409 
    410 enum LoadStore { IsLoad = 1 << 20, IsStore = 0 << 20 };
    411 
    412 // You almost never want to use this directly. Instead, you wantto pass in a
    413 // signed constant, and let this bit be implicitly set for you. This is however,
    414 // necessary if we want a negative index.
    415 enum IsUp_ { IsUp = 1 << 23, IsDown = 0 << 23 };
    416 enum ALUOp {
    417  OpMov = 0xd << 21,
    418  OpMvn = 0xf << 21,
    419  OpAnd = 0x0 << 21,
    420  OpBic = 0xe << 21,
    421  OpEor = 0x1 << 21,
    422  OpOrr = 0xc << 21,
    423  OpAdc = 0x5 << 21,
    424  OpAdd = 0x4 << 21,
    425  OpSbc = 0x6 << 21,
    426  OpSub = 0x2 << 21,
    427  OpRsb = 0x3 << 21,
    428  OpRsc = 0x7 << 21,
    429  OpCmn = 0xb << 21,
    430  OpCmp = 0xa << 21,
    431  OpTeq = 0x9 << 21,
    432  OpTst = 0x8 << 21,
    433  OpInvalid = -1
    434 };
    435 
    436 enum MULOp {
    437  OpmMul = 0 << 21,
    438  OpmMla = 1 << 21,
    439  OpmUmaal = 2 << 21,
    440  OpmMls = 3 << 21,
    441  OpmUmull = 4 << 21,
    442  OpmUmlal = 5 << 21,
    443  OpmSmull = 6 << 21,
    444  OpmSmlal = 7 << 21
    445 };
    446 enum BranchTag {
    447  OpB = 0x0a000000,
    448  OpBMask = 0x0f000000,
    449  OpBDestMask = 0x00ffffff,
    450  OpBl = 0x0b000000,
    451  OpBlx = 0x012fff30,
    452  OpBx = 0x012fff10
    453 };
    454 
    455 // Just like ALUOp, but for the vfp instruction set.
    456 enum VFPOp {
    457  OpvMul = 0x2 << 20,
    458  OpvAdd = 0x3 << 20,
    459  OpvSub = 0x3 << 20 | 0x1 << 6,
    460  OpvDiv = 0x8 << 20,
    461  OpvMov = 0xB << 20 | 0x1 << 6,
    462  OpvAbs = 0xB << 20 | 0x3 << 6,
    463  OpvNeg = 0xB << 20 | 0x1 << 6 | 0x1 << 16,
    464  OpvSqrt = 0xB << 20 | 0x3 << 6 | 0x1 << 16,
    465  OpvCmp = 0xB << 20 | 0x1 << 6 | 0x4 << 16,
    466  OpvCmpz = 0xB << 20 | 0x1 << 6 | 0x5 << 16
    467 };
    468 
    469 // Negate the operation, AND negate the immediate that we were passed in.
    470 ALUOp ALUNeg(ALUOp op, Register dest, Register scratch, Imm32* imm,
    471             Register* negDest);
    472 bool can_dbl(ALUOp op);
    473 bool condsAreSafe(ALUOp op);
    474 
    475 // If there is a variant of op that has a dest (think cmp/sub) return that
    476 // variant of it.
    477 ALUOp getDestVariant(ALUOp op);
    478 
    479 static constexpr ValueOperand JSReturnOperand{JSReturnReg_Type,
    480                                              JSReturnReg_Data};
    481 static const ValueOperand softfpReturnOperand = ValueOperand(r1, r0);
    482 
    483 // All of these classes exist solely to shuffle data into the various operands.
    484 // For example Operand2 can be an imm8, a register-shifted-by-a-constant or a
    485 // register-shifted-by-a-register. We represent this in C++ by having a base
    486 // class Operand2, which just stores the 32 bits of data as they will be encoded
    487 // in the instruction. You cannot directly create an Operand2 since it is
    488 // tricky, and not entirely sane to do so. Instead, you create one of its child
    489 // classes, e.g. Imm8. Imm8's constructor takes a single integer argument. Imm8
    490 // will verify that its argument can be encoded as an ARM 12 bit imm8, encode it
    491 // using an Imm8data, and finally call its parent's (Operand2) constructor with
    492 // the Imm8data. The Operand2 constructor will then call the Imm8data's encode()
    493 // function to extract the raw bits from it.
    494 //
    495 // In the future, we should be able to extract data from the Operand2 by asking
    496 // it for its component Imm8data structures. The reason this is so horribly
    497 // round-about is we wanted to have Imm8 and RegisterShiftedRegister inherit
    498 // directly from Operand2 but have all of them take up only a single word of
    499 // storage. We also wanted to avoid passing around raw integers at all since
    500 // they are error prone.
    501 class Op2Reg;
    502 class O2RegImmShift;
    503 class O2RegRegShift;
    504 
    505 namespace datastore {
    506 
    507 class Reg {
    508  // The "second register".
    509  uint32_t rm_ : 4;
    510  // Do we get another register for shifting.
    511  uint32_t rrs_ : 1;
    512  uint32_t type_ : 2;
    513  // We'd like this to be a more sensible encoding, but that would need to be
    514  // a struct and that would not pack :(
    515  uint32_t shiftAmount_ : 5;
    516 
    517 protected:
    518  // Mark as a protected field to avoid unused private field warnings.
    519  uint32_t pad_ : 20;
    520 
    521 public:
    522  Reg(uint32_t rm, ShiftType type, uint32_t rsr, uint32_t shiftAmount)
    523      : rm_(rm), rrs_(rsr), type_(type), shiftAmount_(shiftAmount), pad_(0) {}
    524  explicit Reg(const Op2Reg& op) { memcpy(this, &op, sizeof(*this)); }
    525 
    526  uint32_t shiftAmount() const { return shiftAmount_; }
    527 
    528  uint32_t encode() const {
    529    return rm_ | (rrs_ << 4) | (type_ << 5) | (shiftAmount_ << 7);
    530  }
    531 };
    532 
    533 // Op2 has a mode labelled "<imm8m>", which is arm's magical immediate encoding.
    534 // Some instructions actually get 8 bits of data, which is called Imm8Data
    535 // below. These should have edit distance > 1, but this is how it is for now.
    536 class Imm8mData {
    537  uint32_t data_ : 8;
    538  uint32_t rot_ : 4;
    539 
    540 protected:
    541  // Mark as a protected field to avoid unused private field warnings.
    542  uint32_t buff_ : 19;
    543 
    544 private:
    545  // Throw in an extra bit that will be 1 if we can't encode this properly.
    546  // if we can encode it properly, a simple "|" will still suffice to meld it
    547  // into the instruction.
    548  uint32_t invalid_ : 1;
    549 
    550 public:
    551  // Default constructor makes an invalid immediate.
    552  Imm8mData() : data_(0xff), rot_(0xf), buff_(0), invalid_(true) {}
    553 
    554  Imm8mData(uint32_t data, uint32_t rot)
    555      : data_(data), rot_(rot), buff_(0), invalid_(false) {
    556    MOZ_ASSERT(data == data_);
    557    MOZ_ASSERT(rot == rot_);
    558  }
    559 
    560  bool invalid() const { return invalid_; }
    561 
    562  uint32_t encode() const {
    563    MOZ_ASSERT(!invalid_);
    564    return data_ | (rot_ << 8);
    565  };
    566 };
    567 
    568 class Imm8Data {
    569  uint32_t imm4L_ : 4;
    570 
    571 protected:
    572  // Mark as a protected field to avoid unused private field warnings.
    573  uint32_t pad_ : 4;
    574 
    575 private:
    576  uint32_t imm4H_ : 4;
    577 
    578 public:
    579  explicit Imm8Data(uint32_t imm) : imm4L_(imm & 0xf), imm4H_(imm >> 4) {
    580    MOZ_ASSERT(imm <= 0xff);
    581  }
    582 
    583  uint32_t encode() const { return imm4L_ | (imm4H_ << 8); };
    584 };
    585 
    586 // VLDR/VSTR take an 8 bit offset, which is implicitly left shifted by 2.
    587 class Imm8VFPOffData {
    588  uint32_t data_;
    589 
    590 public:
    591  explicit Imm8VFPOffData(uint32_t imm) : data_(imm) {
    592    MOZ_ASSERT((imm & ~(0xff)) == 0);
    593  }
    594  uint32_t encode() const { return data_; };
    595 };
    596 
    597 // ARM can magically encode 256 very special immediates to be moved into a
    598 // register.
    599 struct Imm8VFPImmData {
    600  // This structure's members are public and it has no constructor to
    601  // initialize them, for a very special reason. Were this structure to
    602  // have a constructor, the initialization for DoubleEncoder's internal
    603  // table (see below) would require a rather large static constructor on
    604  // some of our supported compilers. The known solution to this is to mark
    605  // the constructor constexpr, but, again, some of our supported
    606  // compilers don't support constexpr! So we are reduced to public
    607  // members and eschewing a constructor in hopes that the initialization
    608  // of DoubleEncoder's table is correct.
    609  uint32_t imm4L : 4;
    610  uint32_t imm4H : 4;
    611  int32_t isInvalid : 24;
    612 
    613  uint32_t encode() const {
    614    // This assert is an attempting at ensuring that we don't create random
    615    // instances of this structure and then asking to encode() it.
    616    MOZ_ASSERT(isInvalid == 0);
    617    return imm4L | (imm4H << 16);
    618  };
    619 };
    620 
    621 class Imm12Data {
    622  uint32_t data_ : 12;
    623 
    624 public:
    625  explicit Imm12Data(uint32_t imm) : data_(imm) { MOZ_ASSERT(data_ == imm); }
    626 
    627  uint32_t encode() const { return data_; }
    628 };
    629 
    630 class RIS {
    631  uint32_t shiftAmount_ : 5;
    632 
    633 public:
    634  explicit RIS(uint32_t imm) : shiftAmount_(imm) {
    635    MOZ_ASSERT(shiftAmount_ == imm);
    636  }
    637 
    638  explicit RIS(Reg r) : shiftAmount_(r.shiftAmount()) {}
    639 
    640  uint32_t encode() const { return shiftAmount_; }
    641 };
    642 
    643 class RRS {
    644 protected:
    645  // Mark as a protected field to avoid unused private field warnings.
    646  uint32_t mustZero_ : 1;
    647 
    648 private:
    649  // The register that holds the shift amount.
    650  uint32_t rs_ : 4;
    651 
    652 public:
    653  explicit RRS(uint32_t rs) : rs_(rs) { MOZ_ASSERT(rs_ == rs); }
    654 
    655  uint32_t encode() const { return rs_ << 1; }
    656 };
    657 
    658 }  // namespace datastore
    659 
    660 class MacroAssemblerARM;
    661 class Operand;
    662 
    663 class Operand2 {
    664  friend class Operand;
    665  friend class MacroAssemblerARM;
    666  friend class InstALU;
    667 
    668  uint32_t oper_ : 31;
    669  uint32_t invalid_ : 1;
    670 
    671 protected:
    672  explicit Operand2(datastore::Imm8mData base)
    673      : oper_(base.invalid() ? -1 : (base.encode() | uint32_t(IsImmOp2))),
    674        invalid_(base.invalid()) {}
    675 
    676  explicit Operand2(datastore::Reg base)
    677      : oper_(base.encode() | uint32_t(IsNotImmOp2)), invalid_(false) {}
    678 
    679 private:
    680  explicit Operand2(uint32_t blob) : oper_(blob), invalid_(false) {}
    681 
    682 public:
    683  bool isO2Reg() const { return !(oper_ & IsImmOp2); }
    684 
    685  Op2Reg toOp2Reg() const;
    686 
    687  bool isImm8() const { return oper_ & IsImmOp2; }
    688 
    689  bool invalid() const { return invalid_; }
    690 
    691  uint32_t encode() const { return oper_; }
    692 };
    693 
    694 class Imm8 : public Operand2 {
    695 public:
    696  explicit Imm8(uint32_t imm) : Operand2(EncodeImm(imm)) {}
    697 
    698  static datastore::Imm8mData EncodeImm(uint32_t imm) {
    699    // RotateLeft below may not be called with a shift of zero.
    700    if (imm <= 0xFF) {
    701      return datastore::Imm8mData(imm, 0);
    702    }
    703 
    704    // An encodable integer has a maximum of 8 contiguous set bits,
    705    // with an optional wrapped left rotation to even bit positions.
    706    for (int rot = 1; rot < 16; rot++) {
    707      uint32_t rotimm = mozilla::RotateLeft(imm, rot * 2);
    708      if (rotimm <= 0xFF) {
    709        return datastore::Imm8mData(rotimm, rot);
    710      }
    711    }
    712    return datastore::Imm8mData();
    713  }
    714 
    715  // Pair template?
    716  struct TwoImm8mData {
    717    datastore::Imm8mData fst_, snd_;
    718 
    719    TwoImm8mData() = default;
    720 
    721    TwoImm8mData(datastore::Imm8mData fst, datastore::Imm8mData snd)
    722        : fst_(fst), snd_(snd) {}
    723 
    724    datastore::Imm8mData fst() const { return fst_; }
    725    datastore::Imm8mData snd() const { return snd_; }
    726  };
    727 
    728  static TwoImm8mData EncodeTwoImms(uint32_t);
    729 };
    730 
    731 class Op2Reg : public Operand2 {
    732 public:
    733  explicit Op2Reg(Register rm, ShiftType type, datastore::RIS shiftImm)
    734      : Operand2(datastore::Reg(rm.code(), type, 0, shiftImm.encode())) {}
    735 
    736  explicit Op2Reg(Register rm, ShiftType type, datastore::RRS shiftReg)
    737      : Operand2(datastore::Reg(rm.code(), type, 1, shiftReg.encode())) {}
    738 };
    739 
    740 static_assert(sizeof(Op2Reg) == sizeof(datastore::Reg),
    741              "datastore::Reg(const Op2Reg&) constructor relies on Reg/Op2Reg "
    742              "having same size");
    743 
    744 class O2RegImmShift : public Op2Reg {
    745 public:
    746  explicit O2RegImmShift(Register rn, ShiftType type, uint32_t shift)
    747      : Op2Reg(rn, type, datastore::RIS(shift)) {}
    748 };
    749 
    750 class O2RegRegShift : public Op2Reg {
    751 public:
    752  explicit O2RegRegShift(Register rn, ShiftType type, Register rs)
    753      : Op2Reg(rn, type, datastore::RRS(rs.code())) {}
    754 };
    755 
    756 O2RegImmShift O2Reg(Register r);
    757 O2RegImmShift lsl(Register r, int amt);
    758 O2RegImmShift lsr(Register r, int amt);
    759 O2RegImmShift asr(Register r, int amt);
    760 O2RegImmShift rol(Register r, int amt);
    761 O2RegImmShift ror(Register r, int amt);
    762 
    763 O2RegRegShift lsl(Register r, Register amt);
    764 O2RegRegShift lsr(Register r, Register amt);
    765 O2RegRegShift asr(Register r, Register amt);
    766 O2RegRegShift ror(Register r, Register amt);
    767 
    768 // An offset from a register to be used for ldr/str. This should include the
    769 // sign bit, since ARM has "signed-magnitude" offsets. That is it encodes an
    770 // unsigned offset, then the instruction specifies if the offset is positive or
    771 // negative. The +/- bit is necessary if the instruction set wants to be able to
    772 // have a negative register offset e.g. ldr pc, [r1,-r2];
    773 class DtrOff {
    774  uint32_t data_;
    775 
    776 protected:
    777  explicit DtrOff(datastore::Imm12Data immdata, IsUp_ iu)
    778      : data_(immdata.encode() | uint32_t(IsImmDTR) | uint32_t(iu)) {}
    779 
    780  explicit DtrOff(datastore::Reg reg, IsUp_ iu = IsUp)
    781      : data_(reg.encode() | uint32_t(IsNotImmDTR) | iu) {}
    782 
    783 public:
    784  uint32_t encode() const { return data_; }
    785 };
    786 
    787 class DtrOffImm : public DtrOff {
    788 public:
    789  explicit DtrOffImm(int32_t imm)
    790      : DtrOff(datastore::Imm12Data(mozilla::Abs(imm)),
    791               imm >= 0 ? IsUp : IsDown) {
    792    MOZ_ASSERT(mozilla::Abs(imm) < 4096);
    793  }
    794 };
    795 
    796 class DtrOffReg : public DtrOff {
    797  // These are designed to be called by a constructor of a subclass.
    798  // Constructing the necessary RIS/RRS structures is annoying.
    799 
    800 protected:
    801  explicit DtrOffReg(Register rn, ShiftType type, datastore::RIS shiftImm,
    802                     IsUp_ iu = IsUp)
    803      : DtrOff(datastore::Reg(rn.code(), type, 0, shiftImm.encode()), iu) {}
    804 
    805  explicit DtrOffReg(Register rn, ShiftType type, datastore::RRS shiftReg,
    806                     IsUp_ iu = IsUp)
    807      : DtrOff(datastore::Reg(rn.code(), type, 1, shiftReg.encode()), iu) {}
    808 };
    809 
    810 class DtrRegImmShift : public DtrOffReg {
    811 public:
    812  explicit DtrRegImmShift(Register rn, ShiftType type, uint32_t shift,
    813                          IsUp_ iu = IsUp)
    814      : DtrOffReg(rn, type, datastore::RIS(shift), iu) {}
    815 };
    816 
    817 class DtrRegRegShift : public DtrOffReg {
    818 public:
    819  explicit DtrRegRegShift(Register rn, ShiftType type, Register rs,
    820                          IsUp_ iu = IsUp)
    821      : DtrOffReg(rn, type, datastore::RRS(rs.code()), iu) {}
    822 };
    823 
    824 // We will frequently want to bundle a register with its offset so that we have
    825 // an "operand" to a load instruction.
    826 class DTRAddr {
    827  friend class Operand;
    828 
    829  uint32_t data_;
    830 
    831 public:
    832  explicit DTRAddr(Register reg, DtrOff dtr)
    833      : data_(dtr.encode() | (reg.code() << 16)) {}
    834 
    835  uint32_t encode() const { return data_; }
    836 
    837  Register getBase() const { return Register::FromCode((data_ >> 16) & 0xf); }
    838 };
    839 
    840 // Offsets for the extended data transfer instructions:
    841 // ldrsh, ldrd, ldrsb, etc.
    842 class EDtrOff {
    843  uint32_t data_;
    844 
    845 protected:
    846  explicit EDtrOff(datastore::Imm8Data imm8, IsUp_ iu = IsUp)
    847      : data_(imm8.encode() | IsImmEDTR | uint32_t(iu)) {}
    848 
    849  explicit EDtrOff(Register rm, IsUp_ iu = IsUp)
    850      : data_(rm.code() | IsNotImmEDTR | iu) {}
    851 
    852 public:
    853  uint32_t encode() const { return data_; }
    854 };
    855 
    856 class EDtrOffImm : public EDtrOff {
    857 public:
    858  explicit EDtrOffImm(int32_t imm)
    859      : EDtrOff(datastore::Imm8Data(mozilla::Abs(imm)),
    860                (imm >= 0) ? IsUp : IsDown) {
    861    MOZ_ASSERT(mozilla::Abs(imm) < 256);
    862  }
    863 };
    864 
    865 // This is the most-derived class, since the extended data transfer instructions
    866 // don't support any sort of modifying the "index" operand.
    867 class EDtrOffReg : public EDtrOff {
    868 public:
    869  explicit EDtrOffReg(Register rm) : EDtrOff(rm) {}
    870 };
    871 
    872 class EDtrAddr {
    873  uint32_t data_;
    874 
    875 public:
    876  explicit EDtrAddr(Register r, EDtrOff off) : data_(RN(r) | off.encode()) {}
    877 
    878  uint32_t encode() const { return data_; }
    879 #ifdef DEBUG
    880  Register maybeOffsetRegister() const {
    881    if (data_ & IsImmEDTR) {
    882      return InvalidReg;
    883    }
    884    return Register::FromCode(data_ & 0xf);
    885  }
    886 #endif
    887 };
    888 
    889 class VFPOff {
    890  uint32_t data_;
    891 
    892 protected:
    893  explicit VFPOff(datastore::Imm8VFPOffData imm, IsUp_ isup)
    894      : data_(imm.encode() | uint32_t(isup)) {}
    895 
    896 public:
    897  uint32_t encode() const { return data_; }
    898 };
    899 
    900 class VFPOffImm : public VFPOff {
    901 public:
    902  explicit VFPOffImm(int32_t imm)
    903      : VFPOff(datastore::Imm8VFPOffData(mozilla::Abs(imm) / 4),
    904               imm < 0 ? IsDown : IsUp) {
    905    MOZ_ASSERT(mozilla::Abs(imm) <= 255 * 4);
    906  }
    907 };
    908 
    909 class VFPAddr {
    910  friend class Operand;
    911 
    912  uint32_t data_;
    913 
    914 public:
    915  explicit VFPAddr(Register base, VFPOff off)
    916      : data_(RN(base) | off.encode()) {}
    917 
    918  uint32_t encode() const { return data_; }
    919 };
    920 
    921 class VFPImm {
    922  uint32_t data_;
    923 
    924 public:
    925  explicit VFPImm(uint32_t topWordOfDouble);
    926 
    927  static const VFPImm One;
    928 
    929  uint32_t encode() const { return data_; }
    930  bool isValid() const { return data_ != (~0U); }
    931 };
    932 
    933 // A BOffImm is an immediate that is used for branches. Namely, it is the offset
    934 // that will be encoded in the branch instruction. This is the only sane way of
    935 // constructing a branch.
    936 class BOffImm {
    937  friend class InstBranchImm;
    938 
    939  uint32_t data_;
    940 
    941 public:
    942  explicit BOffImm(int offset) : data_((offset - 8) >> 2 & 0x00ffffff) {
    943    MOZ_ASSERT((offset & 0x3) == 0);
    944    if (!IsInRange(offset)) {
    945      MOZ_CRASH("BOffImm offset out of range");
    946    }
    947  }
    948 
    949  explicit BOffImm() : data_(INVALID) {}
    950 
    951 private:
    952  explicit BOffImm(const Instruction& inst);
    953 
    954 public:
    955  static const uint32_t INVALID = 0x00800000;
    956 
    957  uint32_t encode() const { return data_; }
    958  int32_t decode() const { return ((int32_t(data_) << 8) >> 6) + 8; }
    959 
    960  static bool IsInRange(int offset) {
    961    if ((offset - 8) < -33554432) {
    962      return false;
    963    }
    964    if ((offset - 8) > 33554428) {
    965      return false;
    966    }
    967    return true;
    968  }
    969 
    970  bool isInvalid() const { return data_ == INVALID; }
    971  Instruction* getDest(Instruction* src) const;
    972 };
    973 
    974 class Imm16 {
    975  uint32_t lower_ : 12;
    976 
    977 protected:
    978  // Mark as a protected field to avoid unused private field warnings.
    979  uint32_t pad_ : 4;
    980 
    981 private:
    982  uint32_t upper_ : 4;
    983  uint32_t invalid_ : 12;
    984 
    985 public:
    986  explicit Imm16();
    987  explicit Imm16(uint32_t imm);
    988  explicit Imm16(Instruction& inst);
    989 
    990  uint32_t encode() const { return lower_ | (upper_ << 16); }
    991  uint32_t decode() const { return lower_ | (upper_ << 12); }
    992 
    993  bool isInvalid() const { return invalid_; }
    994 };
    995 
    996 // I would preffer that these do not exist, since there are essentially no
    997 // instructions that would ever take more than one of these, however, the MIR
    998 // wants to only have one type of arguments to functions, so bugger.
    999 class Operand {
   1000  // The encoding of registers is the same for OP2, DTR and EDTR yet the type
   1001  // system doesn't let us express this, so choices must be made.
   1002 public:
   1003  enum class Tag : uint8_t { OP2, MEM, FOP };
   1004 
   1005 private:
   1006  uint32_t tag_ : 8;
   1007  uint32_t reg_ : 5;
   1008  int32_t offset_;
   1009 
   1010 protected:
   1011  Operand(Tag tag, uint32_t regCode, int32_t offset)
   1012      : tag_(static_cast<uint32_t>(tag)), reg_(regCode), offset_(offset) {}
   1013 
   1014 public:
   1015  explicit Operand(Register reg) : Operand(Tag::OP2, reg.code(), 0) {}
   1016 
   1017  explicit Operand(FloatRegister freg) : Operand(Tag::FOP, freg.code(), 0) {}
   1018 
   1019  explicit Operand(Register base, Imm32 off)
   1020      : Operand(Tag::MEM, base.code(), off.value) {}
   1021 
   1022  explicit Operand(Register base, int32_t off)
   1023      : Operand(Tag::MEM, base.code(), off) {}
   1024 
   1025  explicit Operand(const Address& addr)
   1026      : Operand(Tag::MEM, addr.base.code(), addr.offset) {}
   1027 
   1028 public:
   1029  Tag tag() const { return static_cast<Tag>(tag_); }
   1030 
   1031  Operand2 toOp2() const {
   1032    MOZ_ASSERT(tag() == Tag::OP2);
   1033    return O2Reg(Register::FromCode(reg_));
   1034  }
   1035 
   1036  Register toReg() const {
   1037    MOZ_ASSERT(tag() == Tag::OP2);
   1038    return Register::FromCode(reg_);
   1039  }
   1040 
   1041  Address toAddress() const {
   1042    MOZ_ASSERT(tag() == Tag::MEM);
   1043    return Address(Register::FromCode(reg_), offset_);
   1044  }
   1045  int32_t disp() const {
   1046    MOZ_ASSERT(tag() == Tag::MEM);
   1047    return offset_;
   1048  }
   1049 
   1050  int32_t base() const {
   1051    MOZ_ASSERT(tag() == Tag::MEM);
   1052    return reg_;
   1053  }
   1054  Register baseReg() const {
   1055    MOZ_ASSERT(tag() == Tag::MEM);
   1056    return Register::FromCode(reg_);
   1057  }
   1058  DTRAddr toDTRAddr() const {
   1059    MOZ_ASSERT(tag() == Tag::MEM);
   1060    return DTRAddr(baseReg(), DtrOffImm(offset_));
   1061  }
   1062  VFPAddr toVFPAddr() const {
   1063    MOZ_ASSERT(tag() == Tag::MEM);
   1064    return VFPAddr(baseReg(), VFPOffImm(offset_));
   1065  }
   1066 };
   1067 
   1068 class InstructionIterator {
   1069 private:
   1070  Instruction* inst_;
   1071 
   1072 public:
   1073  explicit InstructionIterator(Instruction* inst) : inst_(inst) {
   1074    maybeSkipAutomaticInstructions();
   1075  }
   1076 
   1077  // Advances to the next intentionally-inserted instruction.
   1078  Instruction* next();
   1079 
   1080  // Advances past any automatically-inserted instructions.
   1081  Instruction* maybeSkipAutomaticInstructions();
   1082 
   1083  Instruction* cur() const { return inst_; }
   1084 
   1085 protected:
   1086  // Advances past the given number of instruction-length bytes.
   1087  inline void advanceRaw(ptrdiff_t instructions = 1);
   1088 };
   1089 
   1090 class Assembler;
   1091 using ARMBuffer =
   1092    js::jit::AssemblerBufferWithConstantPools<1024, 4, Instruction, Assembler>;
   1093 
   1094 class Assembler : public AssemblerShared {
   1095 public:
   1096  // ARM conditional constants:
   1097  enum ARMCondition : uint32_t {
   1098    EQ = 0x00000000,  // Zero
   1099    NE = 0x10000000,  // Non-zero
   1100    CS = 0x20000000,
   1101    CC = 0x30000000,
   1102    MI = 0x40000000,
   1103    PL = 0x50000000,
   1104    VS = 0x60000000,
   1105    VC = 0x70000000,
   1106    HI = 0x80000000,
   1107    LS = 0x90000000,
   1108    GE = 0xa0000000,
   1109    LT = 0xb0000000,
   1110    GT = 0xc0000000,
   1111    LE = 0xd0000000,
   1112    AL = 0xe0000000
   1113  };
   1114 
   1115  enum Condition : uint32_t {
   1116    Equal = EQ,
   1117    NotEqual = NE,
   1118    Above = HI,
   1119    AboveOrEqual = CS,
   1120    Below = CC,
   1121    BelowOrEqual = LS,
   1122    GreaterThan = GT,
   1123    GreaterThanOrEqual = GE,
   1124    LessThan = LT,
   1125    LessThanOrEqual = LE,
   1126    Overflow = VS,
   1127    CarrySet = CS,
   1128    CarryClear = CC,
   1129    Signed = MI,
   1130    NotSigned = PL,
   1131    Zero = EQ,
   1132    NonZero = NE,
   1133    Always = AL,
   1134 
   1135    VFP_NotEqualOrUnordered = NE,
   1136    VFP_Equal = EQ,
   1137    VFP_Unordered = VS,
   1138    VFP_NotUnordered = VC,
   1139    VFP_GreaterThanOrEqualOrUnordered = CS,
   1140    VFP_GreaterThanOrEqual = GE,
   1141    VFP_GreaterThanOrUnordered = HI,
   1142    VFP_GreaterThan = GT,
   1143    VFP_LessThanOrEqualOrUnordered = LE,
   1144    VFP_LessThanOrEqual = LS,
   1145    VFP_LessThanOrUnordered = LT,
   1146    VFP_LessThan = CC  // MI is valid too.
   1147  };
   1148 
   1149  // Bit set when a DoubleCondition does not map to a single ARM condition.
   1150  // The macro assembler has to special-case these conditions, or else
   1151  // ConditionFromDoubleCondition will complain.
   1152  static const int DoubleConditionBitSpecial = 0x1;
   1153 
   1154  enum DoubleCondition : uint32_t {
   1155    // These conditions will only evaluate to true if the comparison is
   1156    // ordered - i.e. neither operand is NaN.
   1157    DoubleOrdered = VFP_NotUnordered,
   1158    DoubleEqual = VFP_Equal,
   1159    DoubleNotEqual = VFP_NotEqualOrUnordered | DoubleConditionBitSpecial,
   1160    DoubleGreaterThan = VFP_GreaterThan,
   1161    DoubleGreaterThanOrEqual = VFP_GreaterThanOrEqual,
   1162    DoubleLessThan = VFP_LessThan,
   1163    DoubleLessThanOrEqual = VFP_LessThanOrEqual,
   1164    // If either operand is NaN, these conditions always evaluate to true.
   1165    DoubleUnordered = VFP_Unordered,
   1166    DoubleEqualOrUnordered = VFP_Equal | DoubleConditionBitSpecial,
   1167    DoubleNotEqualOrUnordered = VFP_NotEqualOrUnordered,
   1168    DoubleGreaterThanOrUnordered = VFP_GreaterThanOrUnordered,
   1169    DoubleGreaterThanOrEqualOrUnordered = VFP_GreaterThanOrEqualOrUnordered,
   1170    DoubleLessThanOrUnordered = VFP_LessThanOrUnordered,
   1171    DoubleLessThanOrEqualOrUnordered = VFP_LessThanOrEqualOrUnordered
   1172  };
   1173 
   1174  Condition getCondition(uint32_t inst) {
   1175    return (Condition)(0xf0000000 & inst);
   1176  }
   1177  static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) {
   1178    MOZ_ASSERT(!(cond & DoubleConditionBitSpecial));
   1179    return static_cast<Condition>(cond);
   1180  }
   1181 
   1182  enum BarrierOption {
   1183    BarrierSY = 15,  // Full system barrier
   1184    BarrierST = 14   // StoreStore barrier
   1185  };
   1186 
   1187  // This should be protected, but since CodeGenerator wants to use it, it
   1188  // needs to go out here :(
   1189 
   1190  BufferOffset nextOffset() { return m_buffer.nextOffset(); }
   1191 
   1192 protected:
   1193  // Shim around AssemblerBufferWithConstantPools::allocEntry.
   1194  BufferOffset allocLiteralLoadEntry(size_t numInst, unsigned numPoolEntries,
   1195                                     PoolHintPun& php, uint8_t* data,
   1196                                     const LiteralDoc& doc = LiteralDoc(),
   1197                                     ARMBuffer::PoolEntry* pe = nullptr,
   1198                                     bool loadToPC = false);
   1199 
   1200  Instruction* editSrc(BufferOffset bo) { return m_buffer.getInst(bo); }
   1201 
   1202 #ifdef JS_DISASM_ARM
   1203  using DisasmBuffer =
   1204      disasm::EmbeddedVector<char, disasm::ReasonableBufferSize>;
   1205 
   1206  static void disassembleInstruction(const Instruction* i,
   1207                                     DisasmBuffer& buffer);
   1208 
   1209  void initDisassembler();
   1210  void finishDisassembler();
   1211  void spew(Instruction* i);
   1212  void spewBranch(Instruction* i, const LabelDoc& target);
   1213  void spewLiteralLoad(PoolHintPun& php, bool loadToPC, const Instruction* offs,
   1214                       const LiteralDoc& doc);
   1215 #endif
   1216 
   1217 public:
   1218  void resetCounter();
   1219  static uint32_t NopFill;
   1220  static uint32_t GetNopFill();
   1221  static uint32_t AsmPoolMaxOffset;
   1222  static uint32_t GetPoolMaxOffset();
   1223 
   1224 protected:
   1225  // Structure for fixing up pc-relative loads/jumps when a the machine code
   1226  // gets moved (executable copy, gc, etc.).
   1227  class RelativePatch {
   1228    void* target_;
   1229    RelocationKind kind_;
   1230 
   1231   public:
   1232    RelativePatch(void* target, RelocationKind kind)
   1233        : target_(target), kind_(kind) {}
   1234    void* target() const { return target_; }
   1235    RelocationKind kind() const { return kind_; }
   1236  };
   1237 
   1238  // TODO: this should actually be a pool-like object. It is currently a big
   1239  // hack, and probably shouldn't exist.
   1240  js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
   1241 
   1242  CompactBufferWriter jumpRelocations_;
   1243  CompactBufferWriter dataRelocations_;
   1244 
   1245  ARMBuffer m_buffer;
   1246 
   1247 #ifdef JS_DISASM_ARM
   1248  DisassemblerSpew spew_;
   1249 #endif
   1250 
   1251 public:
   1252  // For the alignment fill use NOP: 0x0320f000 or (Always | InstNOP::NopInst).
   1253  // For the nopFill use a branch to the next instruction: 0xeaffffff.
   1254  Assembler()
   1255      : m_buffer(1, 1, 8, GetPoolMaxOffset(), 8, 0xe320f000, 0xeaffffff,
   1256                 GetNopFill()),
   1257        isFinished(false),
   1258        dtmActive(false),
   1259        dtmCond(Always) {
   1260 #ifdef JS_DISASM_ARM
   1261    initDisassembler();
   1262 #endif
   1263  }
   1264 
   1265  ~Assembler() {
   1266 #ifdef JS_DISASM_ARM
   1267    finishDisassembler();
   1268 #endif
   1269  }
   1270 
   1271  void setUnlimitedBuffer() { m_buffer.setUnlimited(); }
   1272 
   1273  static Condition InvertCondition(Condition cond);
   1274  static Condition UnsignedCondition(Condition cond);
   1275  static Condition ConditionWithoutEqual(Condition cond);
   1276 
   1277  static DoubleCondition InvertCondition(DoubleCondition cond);
   1278 
   1279  void writeDataRelocation(BufferOffset offset, ImmGCPtr ptr) {
   1280    // Raw GC pointer relocations and Value relocations both end up in
   1281    // Assembler::TraceDataRelocations.
   1282    if (ptr.value) {
   1283      if (gc::IsInsideNursery(ptr.value)) {
   1284        embedsNurseryPointers_ = true;
   1285      }
   1286      dataRelocations_.writeUnsigned(offset.getOffset());
   1287    }
   1288  }
   1289 
   1290  enum RelocBranchStyle { B_MOVWT, B_LDR_BX, B_LDR, B_MOVW_ADD };
   1291 
   1292  enum RelocStyle { L_MOVWT, L_LDR };
   1293 
   1294 public:
   1295  // Given the start of a Control Flow sequence, grab the value that is
   1296  // finally branched to given the start of a function that loads an address
   1297  // into a register get the address that ends up in the register.
   1298  template <class Iter>
   1299  static const uint32_t* GetCF32Target(Iter* iter);
   1300 
   1301  static uintptr_t GetPointer(uint8_t*);
   1302  template <class Iter>
   1303  static const uint32_t* GetPtr32Target(Iter iter, Register* dest = nullptr,
   1304                                        RelocStyle* rs = nullptr);
   1305 
   1306  bool oom() const;
   1307 
   1308  void setPrinter(Sprinter* sp) {
   1309 #ifdef JS_DISASM_ARM
   1310    spew_.setPrinter(sp);
   1311 #endif
   1312  }
   1313 
   1314  Register getStackPointer() const { return StackPointer; }
   1315 
   1316 private:
   1317  bool isFinished;
   1318 
   1319 protected:
   1320  LabelDoc refLabel(const Label* label) {
   1321 #ifdef JS_DISASM_ARM
   1322    return spew_.refLabel(label);
   1323 #else
   1324    return LabelDoc();
   1325 #endif
   1326  }
   1327 
   1328 public:
   1329  void finish();
   1330  bool appendRawCode(const uint8_t* code, size_t numBytes);
   1331  bool reserve(size_t size);
   1332  bool swapBuffer(wasm::Bytes& bytes);
   1333  void copyJumpRelocationTable(uint8_t* dest);
   1334  void copyDataRelocationTable(uint8_t* dest);
   1335 
   1336  // Size of the instruction stream, in bytes, after pools are flushed.
   1337  size_t size() const;
   1338  // Size of the jump relocation table, in bytes.
   1339  size_t jumpRelocationTableBytes() const;
   1340  size_t dataRelocationTableBytes() const;
   1341 
   1342  // Size of the data table, in bytes.
   1343  size_t bytesNeeded() const;
   1344 
   1345  // Write a single instruction into the instruction stream.  Very hot,
   1346  // inlined for performance
   1347  MOZ_ALWAYS_INLINE BufferOffset writeInst(uint32_t x) {
   1348    MOZ_ASSERT(hasCreator());
   1349    BufferOffset offs = m_buffer.putInt(x);
   1350 #ifdef JS_DISASM_ARM
   1351    spew(m_buffer.getInstOrNull(offs));
   1352 #endif
   1353    return offs;
   1354  }
   1355 
   1356  // As above, but also mark the instruction as a branch.  Very hot, inlined
   1357  // for performance
   1358  MOZ_ALWAYS_INLINE BufferOffset
   1359  writeBranchInst(uint32_t x, const LabelDoc& documentation) {
   1360    BufferOffset offs = m_buffer.putInt(x);
   1361 #ifdef JS_DISASM_ARM
   1362    spewBranch(m_buffer.getInstOrNull(offs), documentation);
   1363 #endif
   1364    return offs;
   1365  }
   1366 
   1367  // Write a placeholder NOP for a branch into the instruction stream
   1368  // (in order to adjust assembler addresses and mark it as a branch), it will
   1369  // be overwritten subsequently.
   1370  BufferOffset allocBranchInst();
   1371 
   1372  // A static variant for the cases where we don't want to have an assembler
   1373  // object.
   1374  static void WriteInstStatic(uint32_t x, uint32_t* dest);
   1375 
   1376 public:
   1377  void writeCodePointer(CodeLabel* label);
   1378 
   1379  void haltingAlign(int alignment);
   1380  void nopAlign(int alignment);
   1381  BufferOffset as_nop();
   1382  BufferOffset as_alu(Register dest, Register src1, Operand2 op2, ALUOp op,
   1383                      SBit s = LeaveCC, Condition c = Always);
   1384  BufferOffset as_mov(Register dest, Operand2 op2, SBit s = LeaveCC,
   1385                      Condition c = Always);
   1386  BufferOffset as_mvn(Register dest, Operand2 op2, SBit s = LeaveCC,
   1387                      Condition c = Always);
   1388 
   1389  static void as_alu_patch(Register dest, Register src1, Operand2 op2, ALUOp op,
   1390                           SBit s, Condition c, uint32_t* pos);
   1391  static void as_mov_patch(Register dest, Operand2 op2, SBit s, Condition c,
   1392                           uint32_t* pos);
   1393 
   1394  // Logical operations:
   1395  BufferOffset as_and(Register dest, Register src1, Operand2 op2,
   1396                      SBit s = LeaveCC, Condition c = Always);
   1397  BufferOffset as_bic(Register dest, Register src1, Operand2 op2,
   1398                      SBit s = LeaveCC, Condition c = Always);
   1399  BufferOffset as_eor(Register dest, Register src1, Operand2 op2,
   1400                      SBit s = LeaveCC, Condition c = Always);
   1401  BufferOffset as_orr(Register dest, Register src1, Operand2 op2,
   1402                      SBit s = LeaveCC, Condition c = Always);
   1403  // Reverse byte operations:
   1404  BufferOffset as_rev(Register dest, Register src, Condition c = Always);
   1405  BufferOffset as_rev16(Register dest, Register src, Condition c = Always);
   1406  BufferOffset as_revsh(Register dest, Register src, Condition c = Always);
   1407  // Mathematical operations:
   1408  BufferOffset as_adc(Register dest, Register src1, Operand2 op2,
   1409                      SBit s = LeaveCC, Condition c = Always);
   1410  BufferOffset as_add(Register dest, Register src1, Operand2 op2,
   1411                      SBit s = LeaveCC, Condition c = Always);
   1412  BufferOffset as_sbc(Register dest, Register src1, Operand2 op2,
   1413                      SBit s = LeaveCC, Condition c = Always);
   1414  BufferOffset as_sub(Register dest, Register src1, Operand2 op2,
   1415                      SBit s = LeaveCC, Condition c = Always);
   1416  BufferOffset as_rsb(Register dest, Register src1, Operand2 op2,
   1417                      SBit s = LeaveCC, Condition c = Always);
   1418  BufferOffset as_rsc(Register dest, Register src1, Operand2 op2,
   1419                      SBit s = LeaveCC, Condition c = Always);
   1420  // Test operations:
   1421  BufferOffset as_cmn(Register src1, Operand2 op2, Condition c = Always);
   1422  BufferOffset as_cmp(Register src1, Operand2 op2, Condition c = Always);
   1423  BufferOffset as_teq(Register src1, Operand2 op2, Condition c = Always);
   1424  BufferOffset as_tst(Register src1, Operand2 op2, Condition c = Always);
   1425 
   1426  // Sign extension operations:
   1427  BufferOffset as_sxtb(Register dest, Register src, int rotate,
   1428                       Condition c = Always);
   1429  BufferOffset as_sxth(Register dest, Register src, int rotate,
   1430                       Condition c = Always);
   1431  BufferOffset as_uxtb(Register dest, Register src, int rotate,
   1432                       Condition c = Always);
   1433  BufferOffset as_uxth(Register dest, Register src, int rotate,
   1434                       Condition c = Always);
   1435 
   1436  // Not quite ALU worthy, but useful none the less: These also have the issue
   1437  // of these being formatted completly differently from the standard ALU
   1438  // operations.
   1439  BufferOffset as_movw(Register dest, Imm16 imm, Condition c = Always);
   1440  BufferOffset as_movt(Register dest, Imm16 imm, Condition c = Always);
   1441 
   1442  static void as_movw_patch(Register dest, Imm16 imm, Condition c,
   1443                            Instruction* pos);
   1444  static void as_movt_patch(Register dest, Imm16 imm, Condition c,
   1445                            Instruction* pos);
   1446 
   1447  BufferOffset as_genmul(Register d1, Register d2, Register rm, Register rn,
   1448                         MULOp op, SBit s, Condition c = Always);
   1449  BufferOffset as_mul(Register dest, Register src1, Register src2,
   1450                      SBit s = LeaveCC, Condition c = Always);
   1451  BufferOffset as_mla(Register dest, Register acc, Register src1, Register src2,
   1452                      SBit s = LeaveCC, Condition c = Always);
   1453  BufferOffset as_umaal(Register dest1, Register dest2, Register src1,
   1454                        Register src2, Condition c = Always);
   1455  BufferOffset as_mls(Register dest, Register acc, Register src1, Register src2,
   1456                      Condition c = Always);
   1457  BufferOffset as_umull(Register dest1, Register dest2, Register src1,
   1458                        Register src2, SBit s = LeaveCC, Condition c = Always);
   1459  BufferOffset as_umlal(Register dest1, Register dest2, Register src1,
   1460                        Register src2, SBit s = LeaveCC, Condition c = Always);
   1461  BufferOffset as_smull(Register dest1, Register dest2, Register src1,
   1462                        Register src2, SBit s = LeaveCC, Condition c = Always);
   1463  BufferOffset as_smlal(Register dest1, Register dest2, Register src1,
   1464                        Register src2, SBit s = LeaveCC, Condition c = Always);
   1465 
   1466  BufferOffset as_sdiv(Register dest, Register num, Register div,
   1467                       Condition c = Always);
   1468  BufferOffset as_udiv(Register dest, Register num, Register div,
   1469                       Condition c = Always);
   1470  BufferOffset as_clz(Register dest, Register src, Condition c = Always);
   1471 
   1472  // Data transfer instructions: ldr, str, ldrb, strb.
   1473  // Using an int to differentiate between 8 bits and 32 bits is overkill.
   1474  BufferOffset as_dtr(LoadStore ls, int size, Index mode, Register rt,
   1475                      DTRAddr addr, Condition c = Always);
   1476 
   1477  static void as_dtr_patch(LoadStore ls, int size, Index mode, Register rt,
   1478                           DTRAddr addr, Condition c, uint32_t* dest);
   1479 
   1480  // Handles all of the other integral data transferring functions:
   1481  // ldrsb, ldrsh, ldrd, etc. The size is given in bits.
   1482  BufferOffset as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
   1483                         Register rt, EDtrAddr addr, Condition c = Always);
   1484 
   1485  BufferOffset as_dtm(LoadStore ls, Register rn, uint32_t mask, DTMMode mode,
   1486                      DTMWriteBack wb, Condition c = Always);
   1487 
   1488  // Overwrite a pool entry with new data.
   1489  static void WritePoolEntry(Instruction* addr, Condition c, uint32_t data);
   1490 
   1491  // Load a 32 bit immediate from a pool into a register.
   1492  BufferOffset as_Imm32Pool(Register dest, uint32_t value,
   1493                            Condition c = Always);
   1494 
   1495  // Load a 64 bit floating point immediate from a pool into a register.
   1496  BufferOffset as_FImm64Pool(VFPRegister dest, double value,
   1497                             Condition c = Always);
   1498  // Load a 32 bit floating point immediate from a pool into a register.
   1499  BufferOffset as_FImm32Pool(VFPRegister dest, float value,
   1500                             Condition c = Always);
   1501 
   1502  // Atomic instructions: ldrexd, ldrex, ldrexh, ldrexb, strexd, strex, strexh,
   1503  // strexb.
   1504  //
   1505  // The doubleword, halfword, and byte versions are available from ARMv6K
   1506  // forward.
   1507  //
   1508  // The word versions are available from ARMv6 forward and can be used to
   1509  // implement the halfword and byte versions on older systems.
   1510 
   1511  // LDREXD rt, rt2, [rn].  Constraint: rt even register, rt2=rt+1.
   1512  BufferOffset as_ldrexd(Register rt, Register rt2, Register rn,
   1513                         Condition c = Always);
   1514 
   1515  // LDREX rt, [rn]
   1516  BufferOffset as_ldrex(Register rt, Register rn, Condition c = Always);
   1517  BufferOffset as_ldrexh(Register rt, Register rn, Condition c = Always);
   1518  BufferOffset as_ldrexb(Register rt, Register rn, Condition c = Always);
   1519 
   1520  // STREXD rd, rt, rt2, [rn].  Constraint: rt even register, rt2=rt+1.
   1521  BufferOffset as_strexd(Register rd, Register rt, Register rt2, Register rn,
   1522                         Condition c = Always);
   1523 
   1524  // STREX rd, rt, [rn].  Constraint: rd != rn, rd != rt.
   1525  BufferOffset as_strex(Register rd, Register rt, Register rn,
   1526                        Condition c = Always);
   1527  BufferOffset as_strexh(Register rd, Register rt, Register rn,
   1528                         Condition c = Always);
   1529  BufferOffset as_strexb(Register rd, Register rt, Register rn,
   1530                         Condition c = Always);
   1531 
   1532  // CLREX
   1533  BufferOffset as_clrex();
   1534 
   1535  // Memory synchronization.
   1536  // These are available from ARMv7 forward.
   1537  BufferOffset as_dmb(BarrierOption option = BarrierSY);
   1538  BufferOffset as_dsb(BarrierOption option = BarrierSY);
   1539  BufferOffset as_isb();
   1540 
   1541  // Memory synchronization for architectures before ARMv7.
   1542  BufferOffset as_dsb_trap();
   1543  BufferOffset as_dmb_trap();
   1544  BufferOffset as_isb_trap();
   1545 
   1546  // Speculation barrier
   1547  BufferOffset as_csdb();
   1548 
   1549  // Move Special Register and Hints:
   1550 
   1551  // yield hint instruction.
   1552  BufferOffset as_yield();
   1553 
   1554  // Control flow stuff:
   1555 
   1556  // bx can *only* branch to a register never to an immediate.
   1557  BufferOffset as_bx(Register r, Condition c = Always);
   1558 
   1559  // Branch can branch to an immediate *or* to a register. Branches to
   1560  // immediates are pc relative, branches to registers are absolute.
   1561  BufferOffset as_b(BOffImm off, Condition c, Label* documentation = nullptr);
   1562 
   1563  BufferOffset as_b(Label* l, Condition c = Always);
   1564  BufferOffset as_b(BOffImm off, Condition c, BufferOffset inst);
   1565 
   1566  // blx can go to either an immediate or a register. When blx'ing to a
   1567  // register, we change processor mode depending on the low bit of the
   1568  // register when blx'ing to an immediate, we *always* change processor
   1569  // state.
   1570  BufferOffset as_blx(Label* l);
   1571 
   1572  BufferOffset as_blx(Register r, Condition c = Always);
   1573  BufferOffset as_bl(BOffImm off, Condition c, Label* documentation = nullptr);
   1574  // bl can only branch+link to an immediate, never to a register it never
   1575  // changes processor state.
   1576  BufferOffset as_bl();
   1577  // bl #imm can have a condition code, blx #imm cannot.
   1578  // blx reg can be conditional.
   1579  BufferOffset as_bl(Label* l, Condition c);
   1580  BufferOffset as_bl(BOffImm off, Condition c, BufferOffset inst);
   1581 
   1582  BufferOffset as_mrs(Register r, Condition c = Always);
   1583  BufferOffset as_msr(Register r, Condition c = Always);
   1584 
   1585  // VFP instructions!
   1586 private:
   1587  enum vfp_size { IsDouble = 1 << 8, IsSingle = 0 << 8 };
   1588 
   1589  BufferOffset writeVFPInst(vfp_size sz, uint32_t blob);
   1590 
   1591  static void WriteVFPInstStatic(vfp_size sz, uint32_t blob, uint32_t* dest);
   1592 
   1593  // Unityped variants: all registers hold the same (ieee754 single/double)
   1594  // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
   1595  BufferOffset as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
   1596                            VFPOp op, Condition c = Always);
   1597 
   1598 public:
   1599  BufferOffset as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
   1600                       Condition c = Always);
   1601  BufferOffset as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm,
   1602                       Condition c = Always);
   1603  BufferOffset as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
   1604                       Condition c = Always);
   1605  BufferOffset as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
   1606                        Condition c = Always);
   1607  BufferOffset as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm,
   1608                        Condition c = Always);
   1609  BufferOffset as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm,
   1610                        Condition c = Always);
   1611  BufferOffset as_vneg(VFPRegister vd, VFPRegister vm, Condition c = Always);
   1612  BufferOffset as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c = Always);
   1613  BufferOffset as_vabs(VFPRegister vd, VFPRegister vm, Condition c = Always);
   1614  BufferOffset as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
   1615                       Condition c = Always);
   1616  BufferOffset as_vcmp(VFPRegister vd, VFPRegister vm, Condition c = Always);
   1617  BufferOffset as_vcmpz(VFPRegister vd, Condition c = Always);
   1618 
   1619  // Specifically, a move between two same sized-registers.
   1620  BufferOffset as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c = Always);
   1621 
   1622  // Transfer between Core and VFP.
   1623  enum FloatToCore_ { FloatToCore = 1 << 20, CoreToFloat = 0 << 20 };
   1624 
   1625 private:
   1626  enum VFPXferSize { WordTransfer = 0x02000010, DoubleTransfer = 0x00400010 };
   1627 
   1628 public:
   1629  // Unlike the next function, moving between the core registers and vfp
   1630  // registers can't be *that* properly typed. Namely, since I don't want to
   1631  // munge the type VFPRegister to also include core registers. Thus, the core
   1632  // and vfp registers are passed in based on their type, and src/dest is
   1633  // determined by the float2core.
   1634 
   1635  BufferOffset as_vxfer(Register vt1, Register vt2, VFPRegister vm,
   1636                        FloatToCore_ f2c, Condition c = Always, int idx = 0);
   1637 
   1638  // Our encoding actually allows just the src and the dest (and their types)
   1639  // to uniquely specify the encoding that we are going to use.
   1640  BufferOffset as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR = false,
   1641                       Condition c = Always);
   1642 
   1643  // Hard coded to a 32 bit fixed width result for now.
   1644  BufferOffset as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint,
   1645                            bool toFixed, Condition c = Always);
   1646 
   1647  // Convert between single- and half-precision. Both registers are single
   1648  // precision.
   1649  BufferOffset as_vcvtb_s2h(VFPRegister vd, VFPRegister vm,
   1650                            Condition c = Always);
   1651  BufferOffset as_vcvtb_h2s(VFPRegister vd, VFPRegister vm,
   1652                            Condition c = Always);
   1653 
   1654  // Transfer between VFP and memory.
   1655  BufferOffset as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
   1656                       Condition c = Always /* vfp doesn't have a wb option*/);
   1657 
   1658  static void as_vdtr_patch(LoadStore ls, VFPRegister vd, VFPAddr addr,
   1659                            Condition c /* vfp doesn't have a wb option */,
   1660                            uint32_t* dest);
   1661 
   1662  // VFP's ldm/stm work differently from the standard arm ones. You can only
   1663  // transfer a range.
   1664 
   1665  BufferOffset as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length,
   1666                       /* also has update conditions */ Condition c = Always);
   1667 
   1668  // vldr/vstr variants that handle unaligned accesses.  These encode as NEON
   1669  // single-element instructions and can only be used if NEON is available.
   1670  // Here, vd must be tagged as a float or double register.
   1671  BufferOffset as_vldr_unaligned(VFPRegister vd, Register rn);
   1672  BufferOffset as_vstr_unaligned(VFPRegister vd, Register rn);
   1673 
   1674  BufferOffset as_vimm(VFPRegister vd, VFPImm imm, Condition c = Always);
   1675 
   1676  BufferOffset as_vmrs(Register r, Condition c = Always);
   1677  BufferOffset as_vmsr(Register r, Condition c = Always);
   1678 
   1679  // Label operations.
   1680  bool nextLink(BufferOffset b, BufferOffset* next);
   1681  void bind(Label* label, BufferOffset boff = BufferOffset());
   1682  void bind(CodeLabel* label) { label->target()->bind(currentOffset()); }
   1683  uint32_t currentOffset() { return nextOffset().getOffset(); }
   1684  void retarget(Label* label, Label* target);
   1685  // I'm going to pretend this doesn't exist for now.
   1686  void retarget(Label* label, void* target, RelocationKind reloc);
   1687 
   1688  static void Bind(uint8_t* rawCode, const CodeLabel& label);
   1689  static void PatchMovwt(Instruction* addr, uint32_t imm);
   1690 
   1691  void as_bkpt();
   1692  BufferOffset as_illegal_trap();
   1693 
   1694 public:
   1695  static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
   1696                                   CompactBufferReader& reader);
   1697  static void TraceDataRelocations(JSTracer* trc, JitCode* code,
   1698                                   CompactBufferReader& reader);
   1699 
   1700  void assertNoGCThings() const {
   1701 #ifdef DEBUG
   1702    MOZ_ASSERT(dataRelocations_.length() == 0);
   1703    for (auto& j : jumps_) {
   1704      MOZ_ASSERT(j.kind() == RelocationKind::HARDCODED);
   1705    }
   1706 #endif
   1707  }
   1708 
   1709  static bool SupportsFloatingPoint() { return ARMFlags::HasVFP(); }
   1710  static bool SupportsUnalignedAccesses() { return ARMFlags::HasARMv7(); }
   1711  // Note, returning false here is technically wrong, but one has to go via the
   1712  // as_vldr_unaligned and as_vstr_unaligned instructions to get proper behavior
   1713  // and those are NEON-specific and have to be asked for specifically.
   1714  static bool SupportsFastUnalignedFPAccesses() { return false; }
   1715  static bool SupportsFloat64To16() { return false; }
   1716  static bool SupportsFloat32To16() { return ARMFlags::HasFPHalfPrecision(); }
   1717 
   1718  static bool HasRoundInstruction(RoundingMode mode) { return false; }
   1719 
   1720 protected:
   1721  void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind) {
   1722    enoughMemory_ &= jumps_.append(RelativePatch(target.value, kind));
   1723    if (kind == RelocationKind::JITCODE) {
   1724      jumpRelocations_.writeUnsigned(src.getOffset());
   1725    }
   1726  }
   1727 
   1728 public:
   1729  // The buffer is about to be linked, make sure any constant pools or excess
   1730  // bookkeeping has been flushed to the instruction stream.
   1731  void flush() {
   1732    MOZ_ASSERT(!isFinished);
   1733    m_buffer.flushPool();
   1734    return;
   1735  }
   1736 
   1737  void comment(const char* msg) {
   1738 #ifdef JS_DISASM_ARM
   1739    spew_.spew("; %s", msg);
   1740 #endif
   1741  }
   1742 
   1743  // Copy the assembly code to the given buffer, and perform any pending
   1744  // relocations relying on the target address.
   1745  void executableCopy(uint8_t* buffer);
   1746 
   1747  // Actual assembly emitting functions.
   1748 
   1749  // Since I can't think of a reasonable default for the mode, I'm going to
   1750  // leave it as a required argument.
   1751  void startDataTransferM(LoadStore ls, Register rm, DTMMode mode,
   1752                          DTMWriteBack update = NoWriteBack,
   1753                          Condition c = Always) {
   1754    MOZ_ASSERT(!dtmActive);
   1755    dtmUpdate = update;
   1756    dtmBase = rm;
   1757    dtmLoadStore = ls;
   1758    dtmLastReg = -1;
   1759    dtmRegBitField = 0;
   1760    dtmActive = 1;
   1761    dtmCond = c;
   1762    dtmMode = mode;
   1763  }
   1764 
   1765  void transferReg(Register rn) {
   1766    MOZ_ASSERT(dtmActive);
   1767    MOZ_ASSERT(rn.code() > dtmLastReg);
   1768    dtmRegBitField |= 1 << rn.code();
   1769    if (dtmLoadStore == IsLoad && rn.code() == 13 && dtmBase.code() == 13) {
   1770      MOZ_CRASH("ARM Spec says this is invalid");
   1771    }
   1772  }
   1773  void finishDataTransfer() {
   1774    dtmActive = false;
   1775    as_dtm(dtmLoadStore, dtmBase, dtmRegBitField, dtmMode, dtmUpdate, dtmCond);
   1776  }
   1777 
   1778  void startFloatTransferM(LoadStore ls, Register rm, DTMMode mode,
   1779                           DTMWriteBack update = NoWriteBack,
   1780                           Condition c = Always) {
   1781    MOZ_ASSERT(!dtmActive);
   1782    dtmActive = true;
   1783    dtmUpdate = update;
   1784    dtmLoadStore = ls;
   1785    dtmBase = rm;
   1786    dtmCond = c;
   1787    dtmLastReg = -1;
   1788    dtmMode = mode;
   1789    dtmDelta = 0;
   1790  }
   1791  void transferFloatReg(VFPRegister rn) {
   1792    if (dtmLastReg == -1) {
   1793      vdtmFirstReg = rn.code();
   1794    } else {
   1795      if (dtmDelta == 0) {
   1796        dtmDelta = rn.code() - dtmLastReg;
   1797        MOZ_ASSERT(dtmDelta == 1 || dtmDelta == -1);
   1798      }
   1799      MOZ_ASSERT(dtmLastReg >= 0);
   1800      MOZ_ASSERT(rn.code() == unsigned(dtmLastReg) + dtmDelta);
   1801    }
   1802 
   1803    dtmLastReg = rn.code();
   1804  }
   1805  void finishFloatTransfer() {
   1806    MOZ_ASSERT(dtmActive);
   1807    dtmActive = false;
   1808    MOZ_ASSERT(dtmLastReg != -1);
   1809    dtmDelta = dtmDelta ? dtmDelta : 1;
   1810    // The operand for the vstr/vldr instruction is the lowest register in the
   1811    // range.
   1812    int low = std::min(dtmLastReg, vdtmFirstReg);
   1813    int high = std::max(dtmLastReg, vdtmFirstReg);
   1814    // Fencepost problem.
   1815    int len = high - low + 1;
   1816    // vdtm can only transfer 16 registers at once.  If we need to transfer
   1817    // more, then either hoops are necessary, or we need to be updating the
   1818    // register.
   1819    MOZ_ASSERT_IF(len > 16, dtmUpdate == WriteBack);
   1820 
   1821    int adjustLow = dtmLoadStore == IsStore ? 0 : 1;
   1822    int adjustHigh = dtmLoadStore == IsStore ? -1 : 0;
   1823    while (len > 0) {
   1824      // Limit the instruction to 16 registers.
   1825      int curLen = std::min(len, 16);
   1826      // If it is a store, we want to start at the high end and move down
   1827      // (e.g. vpush d16-d31; vpush d0-d15).
   1828      int curStart = (dtmLoadStore == IsStore) ? high - curLen + 1 : low;
   1829      as_vdtm(dtmLoadStore, dtmBase,
   1830              VFPRegister(FloatRegister::FromCode(curStart)), curLen, dtmCond);
   1831      // Update the bounds.
   1832      low += adjustLow * curLen;
   1833      high += adjustHigh * curLen;
   1834      // Update the length parameter.
   1835      len -= curLen;
   1836    }
   1837  }
   1838 
   1839 private:
   1840  int dtmRegBitField;
   1841  int vdtmFirstReg;
   1842  int dtmLastReg;
   1843  int dtmDelta;
   1844  Register dtmBase;
   1845  DTMWriteBack dtmUpdate;
   1846  DTMMode dtmMode;
   1847  LoadStore dtmLoadStore;
   1848  bool dtmActive;
   1849  Condition dtmCond;
   1850 
   1851 public:
   1852  enum {
   1853    PadForAlign8 = (int)0x00,
   1854    PadForAlign16 = (int)0x0000,
   1855    PadForAlign32 = (int)0xe12fff7f  // 'bkpt 0xffff'
   1856  };
   1857 
   1858  // API for speaking with the IonAssemblerBufferWithConstantPools generate an
   1859  // initial placeholder instruction that we want to later fix up.
   1860  static void InsertIndexIntoTag(uint8_t* load, uint32_t index);
   1861 
   1862  // Take the stub value that was written in before, and write in an actual
   1863  // load using the index we'd computed previously as well as the address of
   1864  // the pool start.
   1865  static void PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
   1866 
   1867  // We're not tracking short-range branches for ARM for now.
   1868  static void PatchShortRangeBranchToVeneer(ARMBuffer*, unsigned rangeIdx,
   1869                                            BufferOffset deadline,
   1870                                            BufferOffset veneer) {
   1871    MOZ_CRASH();
   1872  }
   1873  // END API
   1874 
   1875  // Move our entire pool into the instruction stream. This is to force an
   1876  // opportunistic dump of the pool, prefferably when it is more convenient to
   1877  // do a dump.
   1878  void flushBuffer();
   1879  void enterNoPool(size_t maxInst);
   1880  void leaveNoPool();
   1881  void enterNoNops();
   1882  void leaveNoNops();
   1883 
   1884  static void WritePoolHeader(uint8_t* start, Pool* p, bool isNatural);
   1885  static void WritePoolGuard(BufferOffset branch, Instruction* inst,
   1886                             BufferOffset dest);
   1887 
   1888  static uint32_t PatchWrite_NearCallSize();
   1889  static uint32_t NopSize() { return 4; }
   1890  static void PatchWrite_NearCall(CodeLocationLabel start,
   1891                                  CodeLocationLabel toCall);
   1892  static void PatchDataWithValueCheck(CodeLocationLabel label,
   1893                                      PatchedImmPtr newValue,
   1894                                      PatchedImmPtr expectedValue);
   1895  static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
   1896                                      ImmPtr expectedValue);
   1897  static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
   1898 
   1899  static uint32_t AlignDoubleArg(uint32_t offset) { return (offset + 1) & ~1; }
   1900  static uint8_t* NextInstruction(uint8_t* instruction,
   1901                                  uint32_t* count = nullptr);
   1902 
   1903  // Toggle a jmp or cmp emitted by toggledJump().
   1904  static void ToggleToJmp(CodeLocationLabel inst_);
   1905  static void ToggleToCmp(CodeLocationLabel inst_);
   1906 
   1907  static size_t ToggledCallSize(uint8_t* code);
   1908  static void ToggleCall(CodeLocationLabel inst_, bool enabled);
   1909 
   1910  void processCodeLabels(uint8_t* rawCode);
   1911 
   1912  void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
   1913                                   const Disassembler::HeapAccess& heapAccess) {
   1914    // Implement this if we implement a disassembler.
   1915  }
   1916 };  // Assembler
   1917 
   1918 // An Instruction is a structure for both encoding and decoding any and all ARM
   1919 // instructions. Many classes have not been implemented thus far.
   1920 class Instruction {
   1921  uint32_t data;
   1922 
   1923 protected:
   1924  // This is not for defaulting to always, this is for instructions that
   1925  // cannot be made conditional, and have the usually invalid 4b1111 cond
   1926  // field.
   1927  explicit Instruction(uint32_t data_, bool fake = false)
   1928      : data(data_ | 0xf0000000) {
   1929    MOZ_ASSERT(fake || ((data_ & 0xf0000000) == 0));
   1930  }
   1931  // Standard constructor.
   1932  Instruction(uint32_t data_, Assembler::Condition c)
   1933      : data(data_ | (uint32_t)c) {
   1934    MOZ_ASSERT((data_ & 0xf0000000) == 0);
   1935  }
   1936  // You should never create an instruction directly. You should create a more
   1937  // specific instruction which will eventually call one of these constructors
   1938  // for you.
   1939 public:
   1940  uint32_t encode() const { return data; }
   1941  // Check if this instruction is really a particular case.
   1942  template <class C>
   1943  bool is() const {
   1944    return C::IsTHIS(*this);
   1945  }
   1946 
   1947  // Safely get a more specific variant of this pointer.
   1948  template <class C>
   1949  C* as() const {
   1950    return C::AsTHIS(*this);
   1951  }
   1952 
   1953  const Instruction& operator=(Instruction src) {
   1954    data = src.data;
   1955    return *this;
   1956  }
   1957  // Since almost all instructions have condition codes, the condition code
   1958  // extractor resides in the base class.
   1959  Assembler::Condition extractCond() const {
   1960    MOZ_ASSERT(data >> 28 != 0xf,
   1961               "The instruction does not have condition code");
   1962    return (Assembler::Condition)(data & 0xf0000000);
   1963  }
   1964 
   1965  // Sometimes, an api wants a uint32_t (or a pointer to it) rather than an
   1966  // instruction. raw() just coerces this into a pointer to a uint32_t.
   1967  const uint32_t* raw() const { return &data; }
   1968  uint32_t size() const { return 4; }
   1969 };  // Instruction
   1970 
   1971 // Make sure that it is the right size.
   1972 static_assert(sizeof(Instruction) == 4);
   1973 
   1974 inline void InstructionIterator::advanceRaw(ptrdiff_t instructions) {
   1975  inst_ = inst_ + instructions;
   1976 }
   1977 
   1978 // Data Transfer Instructions.
   1979 class InstDTR : public Instruction {
   1980 public:
   1981  enum IsByte_ { IsByte = 0x00400000, IsWord = 0x00000000 };
   1982  static const int IsDTR = 0x04000000;
   1983  static const int IsDTRMask = 0x0c000000;
   1984 
   1985  // TODO: Replace the initialization with something that is safer.
   1986  InstDTR(LoadStore ls, IsByte_ ib, Index mode, Register rt, DTRAddr addr,
   1987          Assembler::Condition c)
   1988      : Instruction(std::underlying_type_t<LoadStore>(ls) |
   1989                        std::underlying_type_t<IsByte_>(ib) |
   1990                        std::underlying_type_t<Index>(mode) | RT(rt) |
   1991                        addr.encode() | IsDTR,
   1992                    c) {}
   1993 
   1994  static bool IsTHIS(const Instruction& i);
   1995  static InstDTR* AsTHIS(const Instruction& i);
   1996 };
   1997 static_assert(sizeof(InstDTR) == sizeof(Instruction));
   1998 
   1999 class InstLDR : public InstDTR {
   2000 public:
   2001  InstLDR(Index mode, Register rt, DTRAddr addr, Assembler::Condition c)
   2002      : InstDTR(IsLoad, IsWord, mode, rt, addr, c) {}
   2003 
   2004  static bool IsTHIS(const Instruction& i);
   2005  static InstLDR* AsTHIS(const Instruction& i);
   2006 
   2007  int32_t signedOffset() const {
   2008    int32_t offset = encode() & 0xfff;
   2009    if (IsUp_(encode() & IsUp) != IsUp) {
   2010      return -offset;
   2011    }
   2012    return offset;
   2013  }
   2014  uint32_t* dest() const {
   2015    int32_t offset = signedOffset();
   2016    // When patching the load in PatchConstantPoolLoad, we ensure that the
   2017    // offset is a multiple of 4, offset by 8 bytes from the actual
   2018    // location.  Indeed, when the base register is PC, ARM's 3 stages
   2019    // pipeline design makes it that PC is off by 8 bytes (= 2 *
   2020    // sizeof(uint32*)) when we actually executed it.
   2021    MOZ_ASSERT(offset % 4 == 0);
   2022    offset >>= 2;
   2023    return (uint32_t*)raw() + offset + 2;
   2024  }
   2025 };
   2026 static_assert(sizeof(InstDTR) == sizeof(InstLDR));
   2027 
   2028 class InstNOP : public Instruction {
   2029 public:
   2030  static const uint32_t NopInst = 0x0320f000;
   2031 
   2032  InstNOP() : Instruction(NopInst, Assembler::Always) {}
   2033 
   2034  static bool IsTHIS(const Instruction& i);
   2035  static InstNOP* AsTHIS(Instruction& i);
   2036 };
   2037 
   2038 // Branching to a register, or calling a register
   2039 class InstBranchReg : public Instruction {
   2040 protected:
   2041  // Don't use BranchTag yourself, use a derived instruction.
   2042  enum BranchTag { IsBX = 0x012fff10, IsBLX = 0x012fff30 };
   2043 
   2044  static const uint32_t IsBRegMask = 0x0ffffff0;
   2045 
   2046  InstBranchReg(BranchTag tag, Register rm, Assembler::Condition c)
   2047      : Instruction(tag | rm.code(), c) {}
   2048 
   2049 public:
   2050  static bool IsTHIS(const Instruction& i);
   2051  static InstBranchReg* AsTHIS(const Instruction& i);
   2052 
   2053  // Get the register that is being branched to
   2054  void extractDest(Register* dest);
   2055  // Make sure we are branching to a pre-known register
   2056  bool checkDest(Register dest);
   2057 };
   2058 static_assert(sizeof(InstBranchReg) == sizeof(Instruction));
   2059 
   2060 // Branching to an immediate offset, or calling an immediate offset
   2061 class InstBranchImm : public Instruction {
   2062 protected:
   2063  enum BranchTag { IsB = 0x0a000000, IsBL = 0x0b000000 };
   2064 
   2065  static const uint32_t IsBImmMask = 0x0f000000;
   2066 
   2067  InstBranchImm(BranchTag tag, BOffImm off, Assembler::Condition c)
   2068      : Instruction(tag | off.encode(), c) {}
   2069 
   2070 public:
   2071  static bool IsTHIS(const Instruction& i);
   2072  static InstBranchImm* AsTHIS(const Instruction& i);
   2073 
   2074  void extractImm(BOffImm* dest);
   2075 };
   2076 static_assert(sizeof(InstBranchImm) == sizeof(Instruction));
   2077 
   2078 // Very specific branching instructions.
   2079 class InstBXReg : public InstBranchReg {
   2080 public:
   2081  static bool IsTHIS(const Instruction& i);
   2082  static InstBXReg* AsTHIS(const Instruction& i);
   2083 };
   2084 
   2085 class InstBLXReg : public InstBranchReg {
   2086 public:
   2087  InstBLXReg(Register reg, Assembler::Condition c)
   2088      : InstBranchReg(IsBLX, reg, c) {}
   2089 
   2090  static bool IsTHIS(const Instruction& i);
   2091  static InstBLXReg* AsTHIS(const Instruction& i);
   2092 };
   2093 
   2094 class InstBImm : public InstBranchImm {
   2095 public:
   2096  InstBImm(BOffImm off, Assembler::Condition c) : InstBranchImm(IsB, off, c) {}
   2097 
   2098  static bool IsTHIS(const Instruction& i);
   2099  static InstBImm* AsTHIS(const Instruction& i);
   2100 };
   2101 
   2102 class InstBLImm : public InstBranchImm {
   2103 public:
   2104  InstBLImm(BOffImm off, Assembler::Condition c)
   2105      : InstBranchImm(IsBL, off, c) {}
   2106 
   2107  static bool IsTHIS(const Instruction& i);
   2108  static InstBLImm* AsTHIS(const Instruction& i);
   2109 };
   2110 
   2111 // Both movw and movt. The layout of both the immediate and the destination
   2112 // register is the same so the code is being shared.
   2113 class InstMovWT : public Instruction {
   2114 protected:
   2115  enum WT { IsW = 0x03000000, IsT = 0x03400000 };
   2116  static const uint32_t IsWTMask = 0x0ff00000;
   2117 
   2118  InstMovWT(Register rd, Imm16 imm, WT wt, Assembler::Condition c)
   2119      : Instruction(RD(rd) | imm.encode() | wt, c) {}
   2120 
   2121 public:
   2122  void extractImm(Imm16* dest);
   2123  void extractDest(Register* dest);
   2124  bool checkImm(Imm16 dest);
   2125  bool checkDest(Register dest);
   2126 
   2127  static bool IsTHIS(Instruction& i);
   2128  static InstMovWT* AsTHIS(Instruction& i);
   2129 };
   2130 static_assert(sizeof(InstMovWT) == sizeof(Instruction));
   2131 
   2132 class InstMovW : public InstMovWT {
   2133 public:
   2134  InstMovW(Register rd, Imm16 imm, Assembler::Condition c)
   2135      : InstMovWT(rd, imm, IsW, c) {}
   2136 
   2137  static bool IsTHIS(const Instruction& i);
   2138  static InstMovW* AsTHIS(const Instruction& i);
   2139 };
   2140 
   2141 class InstMovT : public InstMovWT {
   2142 public:
   2143  InstMovT(Register rd, Imm16 imm, Assembler::Condition c)
   2144      : InstMovWT(rd, imm, IsT, c) {}
   2145 
   2146  static bool IsTHIS(const Instruction& i);
   2147  static InstMovT* AsTHIS(const Instruction& i);
   2148 };
   2149 
   2150 class InstALU : public Instruction {
   2151  static const int32_t ALUMask = 0xc << 24;
   2152 
   2153 public:
   2154  InstALU(Register rd, Register rn, Operand2 op2, ALUOp op, SBit s,
   2155          Assembler::Condition c)
   2156      : Instruction(maybeRD(rd) | maybeRN(rn) | op2.encode() | op | s, c) {}
   2157 
   2158  static bool IsTHIS(const Instruction& i);
   2159  static InstALU* AsTHIS(const Instruction& i);
   2160 
   2161  void extractOp(ALUOp* ret);
   2162  bool checkOp(ALUOp op);
   2163  void extractDest(Register* ret);
   2164  bool checkDest(Register rd);
   2165  void extractOp1(Register* ret);
   2166  bool checkOp1(Register rn);
   2167  Operand2 extractOp2();
   2168 };
   2169 
   2170 class InstCMP : public InstALU {
   2171 public:
   2172  static bool IsTHIS(const Instruction& i);
   2173  static InstCMP* AsTHIS(const Instruction& i);
   2174 };
   2175 
   2176 class InstMOV : public InstALU {
   2177 public:
   2178  static bool IsTHIS(const Instruction& i);
   2179  static InstMOV* AsTHIS(const Instruction& i);
   2180 };
   2181 
   2182 // Compile-time iterator over instructions, with a safe interface that
   2183 // references not-necessarily-linear Instructions by linear BufferOffset.
   2184 class BufferInstructionIterator
   2185    : public ARMBuffer::AssemblerBufferInstIterator {
   2186 public:
   2187  BufferInstructionIterator(BufferOffset bo, ARMBuffer* buffer)
   2188      : ARMBuffer::AssemblerBufferInstIterator(bo, buffer) {}
   2189 
   2190  // Advances the buffer to the next intentionally-inserted instruction.
   2191  Instruction* next() {
   2192    advance(cur()->size());
   2193    maybeSkipAutomaticInstructions();
   2194    return cur();
   2195  }
   2196 
   2197  // Advances the BufferOffset past any automatically-inserted instructions.
   2198  Instruction* maybeSkipAutomaticInstructions();
   2199 };
   2200 
   2201 static const uint32_t NumIntArgRegs = 4;
   2202 
   2203 // There are 16 *float* registers available for arguments
   2204 // If doubles are used, only half the number of registers are available.
   2205 static const uint32_t NumFloatArgRegs = 16;
   2206 
   2207 static inline bool GetIntArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs,
   2208                                Register* out) {
   2209  if (usedIntArgs >= NumIntArgRegs) {
   2210    return false;
   2211  }
   2212 
   2213  *out = Register::FromCode(usedIntArgs);
   2214  return true;
   2215 }
   2216 
   2217 // Get a register in which we plan to put a quantity that will be used as an
   2218 // integer argument. This differs from GetIntArgReg in that if we have no more
   2219 // actual argument registers to use we will fall back on using whatever
   2220 // CallTempReg* don't overlap the argument registers, and only fail once those
   2221 // run out too.
   2222 static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
   2223                                       uint32_t usedFloatArgs, Register* out) {
   2224  if (GetIntArgReg(usedIntArgs, usedFloatArgs, out)) {
   2225    return true;
   2226  }
   2227 
   2228  // Unfortunately, we have to assume things about the point at which
   2229  // GetIntArgReg returns false, because we need to know how many registers it
   2230  // can allocate.
   2231  usedIntArgs -= NumIntArgRegs;
   2232  if (usedIntArgs >= NumCallTempNonArgRegs) {
   2233    return false;
   2234  }
   2235 
   2236  *out = CallTempNonArgRegs[usedIntArgs];
   2237  return true;
   2238 }
   2239 
   2240 class DoubleEncoder {
   2241  struct DoubleEntry {
   2242    uint32_t dblTop;
   2243    datastore::Imm8VFPImmData data;
   2244  };
   2245 
   2246  static const DoubleEntry table[256];
   2247 
   2248 public:
   2249  bool lookup(uint32_t top, datastore::Imm8VFPImmData* ret) const {
   2250    for (int i = 0; i < 256; i++) {
   2251      if (table[i].dblTop == top) {
   2252        *ret = table[i].data;
   2253        return true;
   2254      }
   2255    }
   2256    return false;
   2257  }
   2258 };
   2259 
   2260 // Forbids nop filling for testing purposes. Not nestable.
   2261 class AutoForbidNops {
   2262 protected:
   2263  Assembler* masm_;
   2264 
   2265 public:
   2266  explicit AutoForbidNops(Assembler* masm) : masm_(masm) {
   2267    masm_->enterNoNops();
   2268  }
   2269  ~AutoForbidNops() { masm_->leaveNoNops(); }
   2270 };
   2271 
   2272 class AutoForbidPoolsAndNops : public AutoForbidNops {
   2273 public:
   2274  // The maxInst argument is the maximum number of word sized instructions
   2275  // that will be allocated within this context. It is used to determine if
   2276  // the pool needs to be dumped before entering this content. The debug code
   2277  // checks that no more than maxInst instructions are actually allocated.
   2278  //
   2279  // Allocation of pool entries is not supported within this content so the
   2280  // code can not use large integers or float constants etc.
   2281  AutoForbidPoolsAndNops(Assembler* masm, size_t maxInst)
   2282      : AutoForbidNops(masm) {
   2283    masm_->enterNoPool(maxInst);
   2284  }
   2285 
   2286  ~AutoForbidPoolsAndNops() { masm_->leaveNoPool(); }
   2287 };
   2288 
   2289 }  // namespace jit
   2290 }  // namespace js
   2291 
   2292 #endif /* jit_arm_Assembler_arm_h */