tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Assembler-arm.cpp (93461B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/arm/Assembler-arm.h"
      8 
      9 #include "mozilla/DebugOnly.h"
     10 #include "mozilla/MathAlgorithms.h"
     11 #include "mozilla/Maybe.h"
     12 #include "mozilla/Sprintf.h"
     13 
     14 #include <type_traits>
     15 
     16 #include "gc/Marking.h"
     17 #include "jit/arm/disasm/Disasm-arm.h"
     18 #include "jit/arm/MacroAssembler-arm.h"
     19 #include "jit/AutoWritableJitCode.h"
     20 #include "jit/ExecutableAllocator.h"
     21 #include "jit/MacroAssembler.h"
     22 #include "vm/Realm.h"
     23 #include "wasm/WasmFrame.h"
     24 
     25 using namespace js;
     26 using namespace js::jit;
     27 
     28 using mozilla::CountLeadingZeroes32;
     29 using mozilla::DebugOnly;
     30 
     31 using LabelDoc = DisassemblerSpew::LabelDoc;
     32 using LiteralDoc = DisassemblerSpew::LiteralDoc;
     33 
     34 void dbg_break() {}
     35 
     36 // The ABIArgGenerator is used for making system ABI calls and for inter-wasm
     37 // calls. The system ABI can either be SoftFp or HardFp, and inter-wasm calls
     38 // are always HardFp calls. The initialization defaults to HardFp, and the ABI
     39 // choice is made before any system ABI calls with the method "setUseHardFp".
     40 ABIArgGenerator::ABIArgGenerator(ABIKind kind)
     41    : ABIArgGeneratorShared(kind),
     42      intRegIndex_(0),
     43      floatRegIndex_(0),
     44      current_(),
     45      useHardFp_(true) {}
     46 
     47 // See the "Parameter Passing" section of the "Procedure Call Standard for the
     48 // ARM Architecture" documentation.
     49 ABIArg ABIArgGenerator::softNext(MIRType type) {
     50  switch (type) {
     51    case MIRType::Int32:
     52    case MIRType::Pointer:
     53    case MIRType::WasmAnyRef:
     54    case MIRType::WasmArrayData:
     55    case MIRType::StackResults:
     56      if (intRegIndex_ == NumIntArgRegs) {
     57        current_ = ABIArg(stackOffset_);
     58        stackOffset_ += sizeof(uint32_t);
     59        break;
     60      }
     61      current_ = ABIArg(Register::FromCode(intRegIndex_));
     62      intRegIndex_++;
     63      break;
     64    case MIRType::Int64:
     65      // Make sure to use an even register index. Increase to next even number
     66      // when odd.
     67      intRegIndex_ = (intRegIndex_ + 1) & ~1;
     68      if (intRegIndex_ == NumIntArgRegs) {
     69        // Align the stack on 8 bytes.
     70        static const uint32_t align = sizeof(uint64_t) - 1;
     71        stackOffset_ = (stackOffset_ + align) & ~align;
     72        current_ = ABIArg(stackOffset_);
     73        stackOffset_ += sizeof(uint64_t);
     74        break;
     75      }
     76      current_ = ABIArg(Register::FromCode(intRegIndex_),
     77                        Register::FromCode(intRegIndex_ + 1));
     78      intRegIndex_ += 2;
     79      break;
     80    case MIRType::Float32:
     81      if (intRegIndex_ == NumIntArgRegs) {
     82        current_ = ABIArg(stackOffset_);
     83        stackOffset_ += sizeof(uint32_t);
     84        break;
     85      }
     86      current_ = ABIArg(Register::FromCode(intRegIndex_));
     87      intRegIndex_++;
     88      break;
     89    case MIRType::Double:
     90      // Make sure to use an even register index. Increase to next even number
     91      // when odd.
     92      intRegIndex_ = (intRegIndex_ + 1) & ~1;
     93      if (intRegIndex_ == NumIntArgRegs) {
     94        // Align the stack on 8 bytes.
     95        static const uint32_t align = sizeof(double) - 1;
     96        stackOffset_ = (stackOffset_ + align) & ~align;
     97        current_ = ABIArg(stackOffset_);
     98        stackOffset_ += sizeof(double);
     99        break;
    100      }
    101      current_ = ABIArg(Register::FromCode(intRegIndex_),
    102                        Register::FromCode(intRegIndex_ + 1));
    103      intRegIndex_ += 2;
    104      break;
    105    default:
    106      MOZ_CRASH("Unexpected argument type");
    107  }
    108 
    109  return current_;
    110 }
    111 
    112 ABIArg ABIArgGenerator::hardNext(MIRType type) {
    113  switch (type) {
    114    case MIRType::Int32:
    115    case MIRType::Pointer:
    116    case MIRType::WasmAnyRef:
    117    case MIRType::WasmArrayData:
    118    case MIRType::StackResults:
    119      if (intRegIndex_ == NumIntArgRegs) {
    120        current_ = ABIArg(stackOffset_);
    121        stackOffset_ += sizeof(uint32_t);
    122        break;
    123      }
    124      current_ = ABIArg(Register::FromCode(intRegIndex_));
    125      intRegIndex_++;
    126      break;
    127    case MIRType::Int64:
    128      // Make sure to use an even register index. Increase to next even number
    129      // when odd.
    130      intRegIndex_ = (intRegIndex_ + 1) & ~1;
    131      if (intRegIndex_ == NumIntArgRegs) {
    132        // Align the stack on 8 bytes.
    133        static const uint32_t align = sizeof(uint64_t) - 1;
    134        stackOffset_ = (stackOffset_ + align) & ~align;
    135        current_ = ABIArg(stackOffset_);
    136        stackOffset_ += sizeof(uint64_t);
    137        break;
    138      }
    139      current_ = ABIArg(Register::FromCode(intRegIndex_),
    140                        Register::FromCode(intRegIndex_ + 1));
    141      intRegIndex_ += 2;
    142      break;
    143    case MIRType::Float32:
    144      if (floatRegIndex_ == NumFloatArgRegs) {
    145        current_ = ABIArg(stackOffset_);
    146        stackOffset_ += sizeof(uint32_t);
    147        break;
    148      }
    149      current_ = ABIArg(VFPRegister(floatRegIndex_, VFPRegister::Single));
    150      floatRegIndex_++;
    151      break;
    152    case MIRType::Double:
    153      // Double register are composed of 2 float registers, thus we have to
    154      // skip any float register which cannot be used in a pair of float
    155      // registers in which a double value can be stored.
    156      floatRegIndex_ = (floatRegIndex_ + 1) & ~1;
    157      if (floatRegIndex_ == NumFloatArgRegs) {
    158        static const uint32_t align = sizeof(double) - 1;
    159        stackOffset_ = (stackOffset_ + align) & ~align;
    160        current_ = ABIArg(stackOffset_);
    161        stackOffset_ += sizeof(uint64_t);
    162        break;
    163      }
    164      current_ = ABIArg(VFPRegister(floatRegIndex_ >> 1, VFPRegister::Double));
    165      floatRegIndex_ += 2;
    166      break;
    167    default:
    168      MOZ_CRASH("Unexpected argument type");
    169  }
    170 
    171  return current_;
    172 }
    173 
    174 ABIArg ABIArgGenerator::next(MIRType type) {
    175  if (useHardFp_) {
    176    return hardNext(type);
    177  }
    178  return softNext(type);
    179 }
    180 
    181 bool js::jit::IsUnaligned(const wasm::MemoryAccessDesc& access) {
    182  if (!access.align()) {
    183    return false;
    184  }
    185 
    186  if (access.type() == Scalar::Float64 && access.align() >= 4) {
    187    return false;
    188  }
    189 
    190  return access.align() < access.byteSize();
    191 }
    192 
    193 // Encode a standard register when it is being used as src1, the dest, and an
    194 // extra register. These should never be called with an InvalidReg.
    195 uint32_t js::jit::RT(Register r) {
    196  MOZ_ASSERT((r.code() & ~0xf) == 0);
    197  return r.code() << 12;
    198 }
    199 
    200 uint32_t js::jit::RN(Register r) {
    201  MOZ_ASSERT((r.code() & ~0xf) == 0);
    202  return r.code() << 16;
    203 }
    204 
    205 uint32_t js::jit::RD(Register r) {
    206  MOZ_ASSERT((r.code() & ~0xf) == 0);
    207  return r.code() << 12;
    208 }
    209 
    210 uint32_t js::jit::RM(Register r) {
    211  MOZ_ASSERT((r.code() & ~0xf) == 0);
    212  return r.code() << 8;
    213 }
    214 
    215 // Encode a standard register when it is being used as src1, the dest, and an
    216 // extra register. For these, an InvalidReg is used to indicate a optional
    217 // register that has been omitted.
    218 uint32_t js::jit::maybeRT(Register r) {
    219  if (r == InvalidReg) {
    220    return 0;
    221  }
    222 
    223  MOZ_ASSERT((r.code() & ~0xf) == 0);
    224  return r.code() << 12;
    225 }
    226 
    227 uint32_t js::jit::maybeRN(Register r) {
    228  if (r == InvalidReg) {
    229    return 0;
    230  }
    231 
    232  MOZ_ASSERT((r.code() & ~0xf) == 0);
    233  return r.code() << 16;
    234 }
    235 
    236 uint32_t js::jit::maybeRD(Register r) {
    237  if (r == InvalidReg) {
    238    return 0;
    239  }
    240 
    241  MOZ_ASSERT((r.code() & ~0xf) == 0);
    242  return r.code() << 12;
    243 }
    244 
    245 Register js::jit::toRD(Instruction i) {
    246  return Register::FromCode((i.encode() >> 12) & 0xf);
    247 }
    248 Register js::jit::toR(Instruction i) {
    249  return Register::FromCode(i.encode() & 0xf);
    250 }
    251 
    252 Register js::jit::toRM(Instruction i) {
    253  return Register::FromCode((i.encode() >> 8) & 0xf);
    254 }
    255 
    256 Register js::jit::toRN(Instruction i) {
    257  return Register::FromCode((i.encode() >> 16) & 0xf);
    258 }
    259 
    260 uint32_t js::jit::VD(VFPRegister vr) {
    261  if (vr.isMissing()) {
    262    return 0;
    263  }
    264 
    265  // Bits 15,14,13,12, 22.
    266  VFPRegister::VFPRegIndexSplit s = vr.encode();
    267  return s.bit << 22 | s.block << 12;
    268 }
    269 uint32_t js::jit::VN(VFPRegister vr) {
    270  if (vr.isMissing()) {
    271    return 0;
    272  }
    273 
    274  // Bits 19,18,17,16, 7.
    275  VFPRegister::VFPRegIndexSplit s = vr.encode();
    276  return s.bit << 7 | s.block << 16;
    277 }
    278 uint32_t js::jit::VM(VFPRegister vr) {
    279  if (vr.isMissing()) {
    280    return 0;
    281  }
    282 
    283  // Bits 5, 3,2,1,0.
    284  VFPRegister::VFPRegIndexSplit s = vr.encode();
    285  return s.bit << 5 | s.block;
    286 }
    287 
    288 VFPRegister::VFPRegIndexSplit jit::VFPRegister::encode() {
    289  MOZ_ASSERT(!_isInvalid);
    290 
    291  switch (kind) {
    292    case Double:
    293      return VFPRegIndexSplit(code_ & 0xf, code_ >> 4);
    294    case Single:
    295      return VFPRegIndexSplit(code_ >> 1, code_ & 1);
    296    default:
    297      // VFP register treated as an integer, NOT a gpr.
    298      return VFPRegIndexSplit(code_ >> 1, code_ & 1);
    299  }
    300 }
    301 
    302 bool InstDTR::IsTHIS(const Instruction& i) {
    303  return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
    304 }
    305 
    306 InstDTR* InstDTR::AsTHIS(const Instruction& i) {
    307  if (IsTHIS(i)) {
    308    return (InstDTR*)&i;
    309  }
    310  return nullptr;
    311 }
    312 
    313 bool InstLDR::IsTHIS(const Instruction& i) {
    314  return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
    315 }
    316 
    317 InstLDR* InstLDR::AsTHIS(const Instruction& i) {
    318  if (IsTHIS(i)) {
    319    return (InstLDR*)&i;
    320  }
    321  return nullptr;
    322 }
    323 
    324 InstNOP* InstNOP::AsTHIS(Instruction& i) {
    325  if (IsTHIS(i)) {
    326    return (InstNOP*)&i;
    327  }
    328  return nullptr;
    329 }
    330 
    331 bool InstNOP::IsTHIS(const Instruction& i) {
    332  return (i.encode() & 0x0fffffff) == NopInst;
    333 }
    334 
    335 bool InstBranchReg::IsTHIS(const Instruction& i) {
    336  return InstBXReg::IsTHIS(i) || InstBLXReg::IsTHIS(i);
    337 }
    338 
    339 InstBranchReg* InstBranchReg::AsTHIS(const Instruction& i) {
    340  if (IsTHIS(i)) {
    341    return (InstBranchReg*)&i;
    342  }
    343  return nullptr;
    344 }
    345 void InstBranchReg::extractDest(Register* dest) { *dest = toR(*this); }
    346 bool InstBranchReg::checkDest(Register dest) { return dest == toR(*this); }
    347 
    348 bool InstBranchImm::IsTHIS(const Instruction& i) {
    349  return InstBImm::IsTHIS(i) || InstBLImm::IsTHIS(i);
    350 }
    351 
    352 InstBranchImm* InstBranchImm::AsTHIS(const Instruction& i) {
    353  if (IsTHIS(i)) {
    354    return (InstBranchImm*)&i;
    355  }
    356  return nullptr;
    357 }
    358 
    359 void InstBranchImm::extractImm(BOffImm* dest) { *dest = BOffImm(*this); }
    360 
    361 bool InstBXReg::IsTHIS(const Instruction& i) {
    362  return (i.encode() & IsBRegMask) == IsBX;
    363 }
    364 
    365 InstBXReg* InstBXReg::AsTHIS(const Instruction& i) {
    366  if (IsTHIS(i)) {
    367    return (InstBXReg*)&i;
    368  }
    369  return nullptr;
    370 }
    371 
    372 bool InstBLXReg::IsTHIS(const Instruction& i) {
    373  return (i.encode() & IsBRegMask) == IsBLX;
    374 }
    375 InstBLXReg* InstBLXReg::AsTHIS(const Instruction& i) {
    376  if (IsTHIS(i)) {
    377    return (InstBLXReg*)&i;
    378  }
    379  return nullptr;
    380 }
    381 
    382 bool InstBImm::IsTHIS(const Instruction& i) {
    383  return (i.encode() & IsBImmMask) == IsB;
    384 }
    385 InstBImm* InstBImm::AsTHIS(const Instruction& i) {
    386  if (IsTHIS(i)) {
    387    return (InstBImm*)&i;
    388  }
    389  return nullptr;
    390 }
    391 
    392 bool InstBLImm::IsTHIS(const Instruction& i) {
    393  return (i.encode() & IsBImmMask) == IsBL;
    394 }
    395 InstBLImm* InstBLImm::AsTHIS(const Instruction& i) {
    396  if (IsTHIS(i)) {
    397    return (InstBLImm*)&i;
    398  }
    399  return nullptr;
    400 }
    401 
    402 bool InstMovWT::IsTHIS(Instruction& i) {
    403  return InstMovW::IsTHIS(i) || InstMovT::IsTHIS(i);
    404 }
    405 InstMovWT* InstMovWT::AsTHIS(Instruction& i) {
    406  if (IsTHIS(i)) {
    407    return (InstMovWT*)&i;
    408  }
    409  return nullptr;
    410 }
    411 
    412 void InstMovWT::extractImm(Imm16* imm) { *imm = Imm16(*this); }
    413 bool InstMovWT::checkImm(Imm16 imm) {
    414  return imm.decode() == Imm16(*this).decode();
    415 }
    416 
    417 void InstMovWT::extractDest(Register* dest) { *dest = toRD(*this); }
    418 bool InstMovWT::checkDest(Register dest) { return dest == toRD(*this); }
    419 
    420 bool InstMovW::IsTHIS(const Instruction& i) {
    421  return (i.encode() & IsWTMask) == IsW;
    422 }
    423 
    424 InstMovW* InstMovW::AsTHIS(const Instruction& i) {
    425  if (IsTHIS(i)) {
    426    return (InstMovW*)&i;
    427  }
    428  return nullptr;
    429 }
    430 InstMovT* InstMovT::AsTHIS(const Instruction& i) {
    431  if (IsTHIS(i)) {
    432    return (InstMovT*)&i;
    433  }
    434  return nullptr;
    435 }
    436 
    437 bool InstMovT::IsTHIS(const Instruction& i) {
    438  return (i.encode() & IsWTMask) == IsT;
    439 }
    440 
    441 InstALU* InstALU::AsTHIS(const Instruction& i) {
    442  if (IsTHIS(i)) {
    443    return (InstALU*)&i;
    444  }
    445  return nullptr;
    446 }
    447 bool InstALU::IsTHIS(const Instruction& i) {
    448  return (i.encode() & ALUMask) == 0;
    449 }
    450 void InstALU::extractOp(ALUOp* ret) { *ret = ALUOp(encode() & (0xf << 21)); }
    451 bool InstALU::checkOp(ALUOp op) {
    452  ALUOp mine;
    453  extractOp(&mine);
    454  return mine == op;
    455 }
    456 void InstALU::extractDest(Register* ret) { *ret = toRD(*this); }
    457 bool InstALU::checkDest(Register rd) { return rd == toRD(*this); }
    458 void InstALU::extractOp1(Register* ret) { *ret = toRN(*this); }
    459 bool InstALU::checkOp1(Register rn) { return rn == toRN(*this); }
    460 Operand2 InstALU::extractOp2() { return Operand2(encode()); }
    461 
    462 InstCMP* InstCMP::AsTHIS(const Instruction& i) {
    463  if (IsTHIS(i)) {
    464    return (InstCMP*)&i;
    465  }
    466  return nullptr;
    467 }
    468 
    469 bool InstCMP::IsTHIS(const Instruction& i) {
    470  return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkDest(r0) &&
    471         InstALU::AsTHIS(i)->checkOp(OpCmp);
    472 }
    473 
    474 InstMOV* InstMOV::AsTHIS(const Instruction& i) {
    475  if (IsTHIS(i)) {
    476    return (InstMOV*)&i;
    477  }
    478  return nullptr;
    479 }
    480 
    481 bool InstMOV::IsTHIS(const Instruction& i) {
    482  return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkOp1(r0) &&
    483         InstALU::AsTHIS(i)->checkOp(OpMov);
    484 }
    485 
    486 Op2Reg Operand2::toOp2Reg() const { return *(Op2Reg*)this; }
    487 
    488 Imm16::Imm16(Instruction& inst)
    489    : lower_(inst.encode() & 0xfff),
    490      upper_(inst.encode() >> 16),
    491      invalid_(0xfff) {}
    492 
    493 Imm16::Imm16(uint32_t imm)
    494    : lower_(imm & 0xfff), pad_(0), upper_((imm >> 12) & 0xf), invalid_(0) {
    495  MOZ_ASSERT(decode() == imm);
    496 }
    497 
    498 Imm16::Imm16() : invalid_(0xfff) {}
    499 
    500 void Assembler::finish() {
    501  flush();
    502  MOZ_ASSERT(!isFinished);
    503  isFinished = true;
    504 }
    505 
    506 bool Assembler::appendRawCode(const uint8_t* code, size_t numBytes) {
    507  flush();
    508  return m_buffer.appendRawCode(code, numBytes);
    509 }
    510 
    511 bool Assembler::reserve(size_t size) {
    512  // This buffer uses fixed-size chunks so there's no point in reserving
    513  // now vs. on-demand.
    514  return !oom();
    515 }
    516 
    517 bool Assembler::swapBuffer(wasm::Bytes& bytes) {
    518  // For now, specialize to the one use case. As long as wasm::Bytes is a
    519  // Vector, not a linked-list of chunks, there's not much we can do other
    520  // than copy.
    521  MOZ_ASSERT(bytes.empty());
    522  if (!bytes.resize(bytesNeeded())) {
    523    return false;
    524  }
    525  m_buffer.executableCopy(bytes.begin());
    526  return true;
    527 }
    528 
    529 void Assembler::executableCopy(uint8_t* buffer) {
    530  MOZ_ASSERT(isFinished);
    531  m_buffer.executableCopy(buffer);
    532 }
    533 
    534 class RelocationIterator {
    535  CompactBufferReader reader_;
    536  // Offset in bytes.
    537  uint32_t offset_;
    538 
    539 public:
    540  explicit RelocationIterator(CompactBufferReader& reader) : reader_(reader) {}
    541 
    542  bool read() {
    543    if (!reader_.more()) {
    544      return false;
    545    }
    546    offset_ = reader_.readUnsigned();
    547    return true;
    548  }
    549 
    550  uint32_t offset() const { return offset_; }
    551 };
    552 
    553 template <class Iter>
    554 const uint32_t* Assembler::GetCF32Target(Iter* iter) {
    555  Instruction* inst1 = iter->cur();
    556 
    557  if (inst1->is<InstBranchImm>()) {
    558    // See if we have a simple case, b #offset.
    559    BOffImm imm;
    560    InstBranchImm* jumpB = inst1->as<InstBranchImm>();
    561    jumpB->extractImm(&imm);
    562    return imm.getDest(inst1)->raw();
    563  }
    564 
    565  if (inst1->is<InstMovW>()) {
    566    // See if we have the complex case:
    567    //  movw r_temp, #imm1
    568    //  movt r_temp, #imm2
    569    //  bx r_temp
    570    // OR
    571    //  movw r_temp, #imm1
    572    //  movt r_temp, #imm2
    573    //  str pc, [sp]
    574    //  bx r_temp
    575 
    576    Imm16 targ_bot;
    577    Imm16 targ_top;
    578    Register temp;
    579 
    580    // Extract both the temp register and the bottom immediate.
    581    InstMovW* bottom = inst1->as<InstMovW>();
    582    bottom->extractImm(&targ_bot);
    583    bottom->extractDest(&temp);
    584 
    585    // Extract the top part of the immediate.
    586    Instruction* inst2 = iter->next();
    587    MOZ_ASSERT(inst2->is<InstMovT>());
    588    InstMovT* top = inst2->as<InstMovT>();
    589    top->extractImm(&targ_top);
    590 
    591    // Make sure they are being loaded into the same register.
    592    MOZ_ASSERT(top->checkDest(temp));
    593 
    594    // Make sure we're branching to the same register.
    595 #ifdef DEBUG
    596    // A toggled call sometimes has a NOP instead of a branch for the third
    597    // instruction. No way to assert that it's valid in that situation.
    598    Instruction* inst3 = iter->next();
    599    if (!inst3->is<InstNOP>()) {
    600      InstBranchReg* realBranch = nullptr;
    601      if (inst3->is<InstBranchReg>()) {
    602        realBranch = inst3->as<InstBranchReg>();
    603      } else {
    604        Instruction* inst4 = iter->next();
    605        realBranch = inst4->as<InstBranchReg>();
    606      }
    607      MOZ_ASSERT(realBranch->checkDest(temp));
    608    }
    609 #endif
    610 
    611    uint32_t* dest = (uint32_t*)(targ_bot.decode() | (targ_top.decode() << 16));
    612    return dest;
    613  }
    614 
    615  if (inst1->is<InstLDR>()) {
    616    return *(uint32_t**)inst1->as<InstLDR>()->dest();
    617  }
    618 
    619  MOZ_CRASH("unsupported branch relocation");
    620 }
    621 
    622 uintptr_t Assembler::GetPointer(uint8_t* instPtr) {
    623  InstructionIterator iter((Instruction*)instPtr);
    624  uintptr_t ret = (uintptr_t)GetPtr32Target(iter, nullptr, nullptr);
    625  return ret;
    626 }
    627 
    628 template <class Iter>
    629 const uint32_t* Assembler::GetPtr32Target(Iter iter, Register* dest,
    630                                          RelocStyle* style) {
    631  Instruction* load1 = iter.cur();
    632  Instruction* load2 = iter.next();
    633 
    634  if (load1->is<InstMovW>() && load2->is<InstMovT>()) {
    635    if (style) {
    636      *style = L_MOVWT;
    637    }
    638 
    639    // See if we have the complex case:
    640    //  movw r_temp, #imm1
    641    //  movt r_temp, #imm2
    642 
    643    Imm16 targ_bot;
    644    Imm16 targ_top;
    645    Register temp;
    646 
    647    // Extract both the temp register and the bottom immediate.
    648    InstMovW* bottom = load1->as<InstMovW>();
    649    bottom->extractImm(&targ_bot);
    650    bottom->extractDest(&temp);
    651 
    652    // Extract the top part of the immediate.
    653    InstMovT* top = load2->as<InstMovT>();
    654    top->extractImm(&targ_top);
    655 
    656    // Make sure they are being loaded into the same register.
    657    MOZ_ASSERT(top->checkDest(temp));
    658 
    659    if (dest) {
    660      *dest = temp;
    661    }
    662 
    663    uint32_t* value =
    664        (uint32_t*)(targ_bot.decode() | (targ_top.decode() << 16));
    665    return value;
    666  }
    667 
    668  if (load1->is<InstLDR>()) {
    669    if (style) {
    670      *style = L_LDR;
    671    }
    672    if (dest) {
    673      *dest = toRD(*load1);
    674    }
    675    return *(uint32_t**)load1->as<InstLDR>()->dest();
    676  }
    677 
    678  MOZ_CRASH("unsupported relocation");
    679 }
    680 
    681 template const uint32_t* Assembler::GetPtr32Target<InstructionIterator>(
    682    InstructionIterator iter, Register* dest, RelocStyle* style);
    683 template const uint32_t* Assembler::GetPtr32Target<BufferInstructionIterator>(
    684    BufferInstructionIterator iter, Register* dest, RelocStyle* style);
    685 
    686 static JitCode* CodeFromJump(InstructionIterator* jump) {
    687  uint8_t* target = (uint8_t*)Assembler::GetCF32Target(jump);
    688  return JitCode::FromExecutable(target);
    689 }
    690 
    691 void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
    692                                     CompactBufferReader& reader) {
    693  RelocationIterator iter(reader);
    694  while (iter.read()) {
    695    InstructionIterator institer((Instruction*)(code->raw() + iter.offset()));
    696    JitCode* child = CodeFromJump(&institer);
    697    TraceManuallyBarrieredEdge(trc, &child, "rel32");
    698  }
    699 }
    700 
    701 static void TraceOneDataRelocation(JSTracer* trc,
    702                                   mozilla::Maybe<AutoWritableJitCode>& awjc,
    703                                   JitCode* code, InstructionIterator iter) {
    704  Register dest;
    705  Assembler::RelocStyle rs;
    706  const void* prior = Assembler::GetPtr32Target(iter, &dest, &rs);
    707  void* ptr = const_cast<void*>(prior);
    708 
    709  // No barrier needed since these are constants.
    710  TraceManuallyBarrieredGenericPointerEdge(
    711      trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr");
    712 
    713  if (ptr != prior) {
    714    if (awjc.isNothing()) {
    715      awjc.emplace(code);
    716    }
    717 
    718    MacroAssemblerARM::ma_mov_patch(Imm32(int32_t(ptr)), dest,
    719                                    Assembler::Always, rs, iter);
    720  }
    721 }
    722 
    723 /* static */
    724 void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
    725                                     CompactBufferReader& reader) {
    726  mozilla::Maybe<AutoWritableJitCode> awjc;
    727  while (reader.more()) {
    728    size_t offset = reader.readUnsigned();
    729    InstructionIterator iter((Instruction*)(code->raw() + offset));
    730    TraceOneDataRelocation(trc, awjc, code, iter);
    731  }
    732 }
    733 
    734 void Assembler::copyJumpRelocationTable(uint8_t* dest) {
    735  if (jumpRelocations_.length()) {
    736    memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
    737  }
    738 }
    739 
    740 void Assembler::copyDataRelocationTable(uint8_t* dest) {
    741  if (dataRelocations_.length()) {
    742    memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
    743  }
    744 }
    745 
    746 void Assembler::processCodeLabels(uint8_t* rawCode) {
    747  for (const CodeLabel& label : codeLabels_) {
    748    Bind(rawCode, label);
    749  }
    750 }
    751 
    752 void Assembler::writeCodePointer(CodeLabel* label) {
    753  m_buffer.assertNoPoolAndNoNops();
    754  BufferOffset off = writeInst(-1);
    755  label->patchAt()->bind(off.getOffset());
    756 }
    757 
    758 void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
    759  auto mode = label.linkMode();
    760  size_t offset = label.patchAt().offset();
    761  size_t target = label.target().offset();
    762 
    763  if (mode == CodeLabel::MoveImmediate) {
    764    uint32_t imm = uint32_t(rawCode + target);
    765    Instruction* inst = (Instruction*)(rawCode + offset);
    766    if (ARMFlags::HasMOVWT()) {
    767      Assembler::PatchMovwt(inst, imm);
    768    } else {
    769      Assembler::WritePoolEntry(inst, Always, imm);
    770    }
    771  } else {
    772    *reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
    773  }
    774 }
    775 
    776 Assembler::Condition Assembler::InvertCondition(Condition cond) {
    777  const uint32_t ConditionInversionBit = 0x10000000;
    778  return Condition(ConditionInversionBit ^ cond);
    779 }
    780 
    781 Assembler::Condition Assembler::UnsignedCondition(Condition cond) {
    782  switch (cond) {
    783    case Zero:
    784    case NonZero:
    785      return cond;
    786    case LessThan:
    787    case Below:
    788      return Below;
    789    case LessThanOrEqual:
    790    case BelowOrEqual:
    791      return BelowOrEqual;
    792    case GreaterThan:
    793    case Above:
    794      return Above;
    795    case AboveOrEqual:
    796    case GreaterThanOrEqual:
    797      return AboveOrEqual;
    798    default:
    799      MOZ_CRASH("unexpected condition");
    800  }
    801 }
    802 
    803 Assembler::Condition Assembler::ConditionWithoutEqual(Condition cond) {
    804  switch (cond) {
    805    case LessThan:
    806    case LessThanOrEqual:
    807      return LessThan;
    808    case Below:
    809    case BelowOrEqual:
    810      return Below;
    811    case GreaterThan:
    812    case GreaterThanOrEqual:
    813      return GreaterThan;
    814    case Above:
    815    case AboveOrEqual:
    816      return Above;
    817    default:
    818      MOZ_CRASH("unexpected condition");
    819  }
    820 }
    821 
    822 Assembler::DoubleCondition Assembler::InvertCondition(DoubleCondition cond) {
    823  const uint32_t ConditionInversionBit = 0x10000000;
    824  return DoubleCondition(ConditionInversionBit ^ cond);
    825 }
    826 
    827 Imm8::TwoImm8mData Imm8::EncodeTwoImms(uint32_t imm) {
    828  // In the ideal case, we are looking for a number that (in binary) looks
    829  // like:
    830  //   0b((00)*)n_1((00)*)n_2((00)*)
    831  //      left  n1   mid  n2
    832  //   where both n_1 and n_2 fit into 8 bits.
    833  // Since this is being done with rotates, we also need to handle the case
    834  // that one of these numbers is in fact split between the left and right
    835  // sides, in which case the constant will look like:
    836  //   0bn_1a((00)*)n_2((00)*)n_1b
    837  //     n1a  mid  n2   rgh    n1b
    838  // Also remember, values are rotated by multiples of two, and left, mid or
    839  // right can have length zero.
    840  uint32_t imm1, imm2;
    841  int left = CountLeadingZeroes32(imm) & 0x1E;
    842  uint32_t no_n1 = imm & ~(0xff << (24 - left));
    843 
    844  // Not technically needed: this case only happens if we can encode as a
    845  // single imm8m. There is a perfectly reasonable encoding in this case, but
    846  // we shouldn't encourage people to do things like this.
    847  if (no_n1 == 0) {
    848    return TwoImm8mData();
    849  }
    850 
    851  int mid = CountLeadingZeroes32(no_n1) & 0x1E;
    852  uint32_t no_n2 =
    853      no_n1 & ~((0xff << ((24 - mid) & 0x1f)) | 0xff >> ((8 + mid) & 0x1f));
    854 
    855  if (no_n2 == 0) {
    856    // We hit the easy case, no wraparound.
    857    // Note: a single constant *may* look like this.
    858    int imm1shift = left + 8;
    859    int imm2shift = mid + 8;
    860    imm1 = (imm >> (32 - imm1shift)) & 0xff;
    861    if (imm2shift >= 32) {
    862      imm2shift = 0;
    863      // This assert does not always hold, in fact, this would lead to
    864      // some incredibly subtle bugs.
    865      // assert((imm & 0xff) == no_n1);
    866      imm2 = no_n1;
    867    } else {
    868      imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
    869      MOZ_ASSERT(((no_n1 >> (32 - imm2shift)) | (no_n1 << imm2shift)) == imm2);
    870    }
    871    MOZ_ASSERT((imm1shift & 0x1) == 0);
    872    MOZ_ASSERT((imm2shift & 0x1) == 0);
    873    return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
    874                        datastore::Imm8mData(imm2, imm2shift >> 1));
    875  }
    876 
    877  // Either it wraps, or it does not fit. If we initially chopped off more
    878  // than 8 bits, then it won't fit.
    879  if (left >= 8) {
    880    return TwoImm8mData();
    881  }
    882 
    883  int right = 32 - (CountLeadingZeroes32(no_n2) & 30);
    884  // All remaining set bits *must* fit into the lower 8 bits.
    885  // The right == 8 case should be handled by the previous case.
    886  if (right > 8) {
    887    return TwoImm8mData();
    888  }
    889 
    890  // Make sure the initial bits that we removed for no_n1 fit into the
    891  // 8-(32-right) leftmost bits.
    892  if (((imm & (0xff << (24 - left))) << (8 - right)) != 0) {
    893    // BUT we may have removed more bits than we needed to for no_n1
    894    // 0x04104001 e.g. we can encode 0x104 with a single op, then 0x04000001
    895    // with a second, but we try to encode 0x0410000 and find that we need a
    896    // second op for 0x4000, and 0x1 cannot be included in the encoding of
    897    // 0x04100000.
    898    no_n1 = imm & ~((0xff >> (8 - right)) | (0xff << (24 + right)));
    899    mid = CountLeadingZeroes32(no_n1) & 30;
    900    no_n2 = no_n1 & ~((0xff << ((24 - mid) & 31)) | 0xff >> ((8 + mid) & 31));
    901    if (no_n2 != 0) {
    902      return TwoImm8mData();
    903    }
    904  }
    905 
    906  // Now assemble all of this information into a two coherent constants it is
    907  // a rotate right from the lower 8 bits.
    908  int imm1shift = 8 - right;
    909  imm1 = 0xff & ((imm << imm1shift) | (imm >> (32 - imm1shift)));
    910  MOZ_ASSERT((imm1shift & ~0x1e) == 0);
    911  // left + 8 + mid is the position of the leftmost bit of n_2.
    912  // We needed to rotate 0x000000ab right by 8 in order to get 0xab000000,
    913  // then shift again by the leftmost bit in order to get the constant that we
    914  // care about.
    915  int imm2shift = mid + 8;
    916  imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
    917  MOZ_ASSERT((imm1shift & 0x1) == 0);
    918  MOZ_ASSERT((imm2shift & 0x1) == 0);
    919  return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
    920                      datastore::Imm8mData(imm2, imm2shift >> 1));
    921 }
    922 
    923 ALUOp jit::ALUNeg(ALUOp op, Register dest, Register scratch, Imm32* imm,
    924                  Register* negDest) {
    925  // Find an alternate ALUOp to get the job done, and use a different imm.
    926  *negDest = dest;
    927  switch (op) {
    928    case OpMov:
    929      *imm = Imm32(~imm->value);
    930      return OpMvn;
    931    case OpMvn:
    932      *imm = Imm32(~imm->value);
    933      return OpMov;
    934    case OpAnd:
    935      *imm = Imm32(~imm->value);
    936      return OpBic;
    937    case OpBic:
    938      *imm = Imm32(~imm->value);
    939      return OpAnd;
    940    case OpAdd:
    941      *imm = Imm32(-imm->value);
    942      return OpSub;
    943    case OpSub:
    944      *imm = Imm32(-imm->value);
    945      return OpAdd;
    946    case OpCmp:
    947      *imm = Imm32(-imm->value);
    948      return OpCmn;
    949    case OpCmn:
    950      *imm = Imm32(-imm->value);
    951      return OpCmp;
    952    case OpTst:
    953      MOZ_ASSERT(dest == InvalidReg);
    954      *imm = Imm32(~imm->value);
    955      *negDest = scratch;
    956      return OpBic;
    957      // orr has orn on thumb2 only.
    958    default:
    959      return OpInvalid;
    960  }
    961 }
    962 
    963 bool jit::can_dbl(ALUOp op) {
    964  // Some instructions can't be processed as two separate instructions such as
    965  // and, and possibly add (when we're setting ccodes). There is also some
    966  // hilarity with *reading* condition codes. For example, adc dest, src1,
    967  // 0xfff; (add with carry) can be split up into adc dest, src1, 0xf00; add
    968  // dest, dest, 0xff, since "reading" the condition code increments the
    969  // result by one conditionally, that only needs to be done on one of the two
    970  // instructions.
    971  switch (op) {
    972    case OpBic:
    973    case OpAdd:
    974    case OpSub:
    975    case OpEor:
    976    case OpOrr:
    977      return true;
    978    default:
    979      return false;
    980  }
    981 }
    982 
    983 bool jit::condsAreSafe(ALUOp op) {
    984  // Even when we are setting condition codes, sometimes we can get away with
    985  // splitting an operation into two. For example, if our immediate is
    986  // 0x00ff00ff, and the operation is eors we can split this in half, since x
    987  // ^ 0x00ff0000 ^ 0x000000ff should set all of its condition codes exactly
    988  // the same as x ^ 0x00ff00ff. However, if the operation were adds, we
    989  // cannot split this in half. If the source on the add is 0xfff00ff0, the
    990  // result sholud be 0xef10ef, but do we set the overflow bit or not?
    991  // Depending on which half is performed first (0x00ff0000 or 0x000000ff) the
    992  // V bit will be set differently, and *not* updating the V bit would be
    993  // wrong. Theoretically, the following should work:
    994  //  adds r0, r1, 0x00ff0000;
    995  //  addsvs r0, r1, 0x000000ff;
    996  //  addvc r0, r1, 0x000000ff;
    997  // But this is 3 instructions, and at that point, we might as well use
    998  // something else.
    999  switch (op) {
   1000    case OpBic:
   1001    case OpOrr:
   1002    case OpEor:
   1003      return true;
   1004    default:
   1005      return false;
   1006  }
   1007 }
   1008 
   1009 ALUOp jit::getDestVariant(ALUOp op) {
   1010  // All of the compare operations are dest-less variants of a standard
   1011  // operation. Given the dest-less variant, return the dest-ful variant.
   1012  switch (op) {
   1013    case OpCmp:
   1014      return OpSub;
   1015    case OpCmn:
   1016      return OpAdd;
   1017    case OpTst:
   1018      return OpAnd;
   1019    case OpTeq:
   1020      return OpEor;
   1021    default:
   1022      return op;
   1023  }
   1024 }
   1025 
   1026 O2RegImmShift jit::O2Reg(Register r) { return O2RegImmShift(r, LSL, 0); }
   1027 
   1028 O2RegImmShift jit::lsl(Register r, int amt) {
   1029  MOZ_ASSERT(0 <= amt && amt <= 31);
   1030  return O2RegImmShift(r, LSL, amt);
   1031 }
   1032 
   1033 O2RegImmShift jit::lsr(Register r, int amt) {
   1034  MOZ_ASSERT(1 <= amt && amt <= 32);
   1035  return O2RegImmShift(r, LSR, amt);
   1036 }
   1037 
   1038 O2RegImmShift jit::ror(Register r, int amt) {
   1039  MOZ_ASSERT(1 <= amt && amt <= 31);
   1040  return O2RegImmShift(r, ROR, amt);
   1041 }
   1042 O2RegImmShift jit::rol(Register r, int amt) {
   1043  MOZ_ASSERT(1 <= amt && amt <= 31);
   1044  return O2RegImmShift(r, ROR, 32 - amt);
   1045 }
   1046 
   1047 O2RegImmShift jit::asr(Register r, int amt) {
   1048  MOZ_ASSERT(1 <= amt && amt <= 32);
   1049  return O2RegImmShift(r, ASR, amt);
   1050 }
   1051 
   1052 O2RegRegShift jit::lsl(Register r, Register amt) {
   1053  return O2RegRegShift(r, LSL, amt);
   1054 }
   1055 
   1056 O2RegRegShift jit::lsr(Register r, Register amt) {
   1057  return O2RegRegShift(r, LSR, amt);
   1058 }
   1059 
   1060 O2RegRegShift jit::ror(Register r, Register amt) {
   1061  return O2RegRegShift(r, ROR, amt);
   1062 }
   1063 
   1064 O2RegRegShift jit::asr(Register r, Register amt) {
   1065  return O2RegRegShift(r, ASR, amt);
   1066 }
   1067 
   1068 static js::jit::DoubleEncoder doubleEncoder;
   1069 
   1070 /* static */
   1071 MOZ_RUNINIT const js::jit::VFPImm js::jit::VFPImm::One(0x3FF00000);
   1072 
   1073 js::jit::VFPImm::VFPImm(uint32_t top) {
   1074  data_ = -1;
   1075  datastore::Imm8VFPImmData tmp;
   1076  if (doubleEncoder.lookup(top, &tmp)) {
   1077    data_ = tmp.encode();
   1078  }
   1079 }
   1080 
   1081 BOffImm::BOffImm(const Instruction& inst) : data_(inst.encode() & 0x00ffffff) {}
   1082 
   1083 Instruction* BOffImm::getDest(Instruction* src) const {
   1084  // TODO: It is probably worthwhile to verify that src is actually a branch.
   1085  // NOTE: This does not explicitly shift the offset of the destination left by
   1086  // 2, since it is indexing into an array of instruction sized objects.
   1087  return &src[((int32_t(data_) << 8) >> 8) + 2];
   1088 }
   1089 
   1090 const js::jit::DoubleEncoder::DoubleEntry js::jit::DoubleEncoder::table[256] = {
   1091 #include "jit/arm/DoubleEntryTable.tbl"
   1092 };
   1093 
   1094 // VFPRegister implementation
   1095 VFPRegister VFPRegister::doubleOverlay(unsigned int which) const {
   1096  MOZ_ASSERT(!_isInvalid);
   1097  MOZ_ASSERT(which == 0);
   1098  if (kind != Double) {
   1099    return VFPRegister(code_ >> 1, Double);
   1100  }
   1101  return *this;
   1102 }
   1103 VFPRegister VFPRegister::singleOverlay(unsigned int which) const {
   1104  MOZ_ASSERT(!_isInvalid);
   1105  if (kind == Double) {
   1106    // There are no corresponding float registers for d16-d31.
   1107    MOZ_ASSERT(code_ < 16);
   1108    MOZ_ASSERT(which < 2);
   1109    return VFPRegister((code_ << 1) + which, Single);
   1110  }
   1111  MOZ_ASSERT(which == 0);
   1112  return VFPRegister(code_, Single);
   1113 }
   1114 
   1115 static_assert(
   1116    FloatRegisters::TotalDouble <= 16,
   1117    "We assume that every Double register also has an Integer personality");
   1118 
   1119 VFPRegister VFPRegister::sintOverlay(unsigned int which) const {
   1120  MOZ_ASSERT(!_isInvalid);
   1121  if (kind == Double) {
   1122    // There are no corresponding float registers for d16-d31.
   1123    MOZ_ASSERT(code_ < 16);
   1124    MOZ_ASSERT(which < 2);
   1125    return VFPRegister((code_ << 1) + which, Int);
   1126  }
   1127  MOZ_ASSERT(which == 0);
   1128  return VFPRegister(code_, Int);
   1129 }
   1130 VFPRegister VFPRegister::uintOverlay(unsigned int which) const {
   1131  MOZ_ASSERT(!_isInvalid);
   1132  if (kind == Double) {
   1133    // There are no corresponding float registers for d16-d31.
   1134    MOZ_ASSERT(code_ < 16);
   1135    MOZ_ASSERT(which < 2);
   1136    return VFPRegister((code_ << 1) + which, UInt);
   1137  }
   1138  MOZ_ASSERT(which == 0);
   1139  return VFPRegister(code_, UInt);
   1140 }
   1141 
   1142 bool Assembler::oom() const {
   1143  return AssemblerShared::oom() || m_buffer.oom() || jumpRelocations_.oom() ||
   1144         dataRelocations_.oom();
   1145 }
   1146 
   1147 // Size of the instruction stream, in bytes. Including pools. This function
   1148 // expects all pools that need to be placed have been placed. If they haven't
   1149 // then we need to go an flush the pools :(
   1150 size_t Assembler::size() const { return m_buffer.size(); }
   1151 // Size of the relocation table, in bytes.
   1152 size_t Assembler::jumpRelocationTableBytes() const {
   1153  return jumpRelocations_.length();
   1154 }
   1155 size_t Assembler::dataRelocationTableBytes() const {
   1156  return dataRelocations_.length();
   1157 }
   1158 
   1159 // Size of the data table, in bytes.
   1160 size_t Assembler::bytesNeeded() const {
   1161  return size() + jumpRelocationTableBytes() + dataRelocationTableBytes();
   1162 }
   1163 
   1164 // Allocate memory for a branch instruction, it will be overwritten
   1165 // subsequently and should not be disassembled.
   1166 
   1167 BufferOffset Assembler::allocBranchInst() {
   1168  return m_buffer.putInt(Always | InstNOP::NopInst);
   1169 }
   1170 
   1171 void Assembler::WriteInstStatic(uint32_t x, uint32_t* dest) {
   1172  MOZ_ASSERT(dest != nullptr);
   1173  *dest = x;
   1174 }
   1175 
   1176 void Assembler::haltingAlign(int alignment) {
   1177  // HLT with payload 0xBAAD
   1178  m_buffer.align(alignment, 0xE1000070 | (0xBAA << 8) | 0xD);
   1179 }
   1180 
   1181 void Assembler::nopAlign(int alignment) { m_buffer.align(alignment); }
   1182 
   1183 BufferOffset Assembler::as_nop() { return writeInst(0xe320f000); }
   1184 
   1185 static uint32_t EncodeAlu(Register dest, Register src1, Operand2 op2, ALUOp op,
   1186                          SBit s, Assembler::Condition c) {
   1187  return (int)op | (int)s | (int)c | op2.encode() |
   1188         ((dest == InvalidReg) ? 0 : RD(dest)) |
   1189         ((src1 == InvalidReg) ? 0 : RN(src1));
   1190 }
   1191 
   1192 BufferOffset Assembler::as_alu(Register dest, Register src1, Operand2 op2,
   1193                               ALUOp op, SBit s, Condition c) {
   1194  return writeInst(EncodeAlu(dest, src1, op2, op, s, c));
   1195 }
   1196 
   1197 BufferOffset Assembler::as_mov(Register dest, Operand2 op2, SBit s,
   1198                               Condition c) {
   1199  return as_alu(dest, InvalidReg, op2, OpMov, s, c);
   1200 }
   1201 
   1202 /* static */
   1203 void Assembler::as_alu_patch(Register dest, Register src1, Operand2 op2,
   1204                             ALUOp op, SBit s, Condition c, uint32_t* pos) {
   1205  WriteInstStatic(EncodeAlu(dest, src1, op2, op, s, c), pos);
   1206 }
   1207 
   1208 /* static */
   1209 void Assembler::as_mov_patch(Register dest, Operand2 op2, SBit s, Condition c,
   1210                             uint32_t* pos) {
   1211  as_alu_patch(dest, InvalidReg, op2, OpMov, s, c, pos);
   1212 }
   1213 
   1214 BufferOffset Assembler::as_mvn(Register dest, Operand2 op2, SBit s,
   1215                               Condition c) {
   1216  return as_alu(dest, InvalidReg, op2, OpMvn, s, c);
   1217 }
   1218 
   1219 // Logical operations.
   1220 BufferOffset Assembler::as_and(Register dest, Register src1, Operand2 op2,
   1221                               SBit s, Condition c) {
   1222  return as_alu(dest, src1, op2, OpAnd, s, c);
   1223 }
   1224 BufferOffset Assembler::as_bic(Register dest, Register src1, Operand2 op2,
   1225                               SBit s, Condition c) {
   1226  return as_alu(dest, src1, op2, OpBic, s, c);
   1227 }
   1228 BufferOffset Assembler::as_eor(Register dest, Register src1, Operand2 op2,
   1229                               SBit s, Condition c) {
   1230  return as_alu(dest, src1, op2, OpEor, s, c);
   1231 }
   1232 BufferOffset Assembler::as_orr(Register dest, Register src1, Operand2 op2,
   1233                               SBit s, Condition c) {
   1234  return as_alu(dest, src1, op2, OpOrr, s, c);
   1235 }
   1236 
   1237 // Reverse byte operations.
   1238 BufferOffset Assembler::as_rev(Register dest, Register src, Condition c) {
   1239  return writeInst((int)c | 0b0000'0110'1011'1111'0000'1111'0011'0000 |
   1240                   RD(dest) | src.code());
   1241 }
   1242 BufferOffset Assembler::as_rev16(Register dest, Register src, Condition c) {
   1243  return writeInst((int)c | 0b0000'0110'1011'1111'0000'1111'1011'0000 |
   1244                   RD(dest) | src.code());
   1245 }
   1246 BufferOffset Assembler::as_revsh(Register dest, Register src, Condition c) {
   1247  return writeInst((int)c | 0b0000'0110'1111'1111'0000'1111'1011'0000 |
   1248                   RD(dest) | src.code());
   1249 }
   1250 
   1251 // Mathematical operations.
   1252 BufferOffset Assembler::as_adc(Register dest, Register src1, Operand2 op2,
   1253                               SBit s, Condition c) {
   1254  return as_alu(dest, src1, op2, OpAdc, s, c);
   1255 }
   1256 BufferOffset Assembler::as_add(Register dest, Register src1, Operand2 op2,
   1257                               SBit s, Condition c) {
   1258  return as_alu(dest, src1, op2, OpAdd, s, c);
   1259 }
   1260 BufferOffset Assembler::as_sbc(Register dest, Register src1, Operand2 op2,
   1261                               SBit s, Condition c) {
   1262  return as_alu(dest, src1, op2, OpSbc, s, c);
   1263 }
   1264 BufferOffset Assembler::as_sub(Register dest, Register src1, Operand2 op2,
   1265                               SBit s, Condition c) {
   1266  return as_alu(dest, src1, op2, OpSub, s, c);
   1267 }
   1268 BufferOffset Assembler::as_rsb(Register dest, Register src1, Operand2 op2,
   1269                               SBit s, Condition c) {
   1270  return as_alu(dest, src1, op2, OpRsb, s, c);
   1271 }
   1272 BufferOffset Assembler::as_rsc(Register dest, Register src1, Operand2 op2,
   1273                               SBit s, Condition c) {
   1274  return as_alu(dest, src1, op2, OpRsc, s, c);
   1275 }
   1276 
   1277 // Test operations.
   1278 BufferOffset Assembler::as_cmn(Register src1, Operand2 op2, Condition c) {
   1279  return as_alu(InvalidReg, src1, op2, OpCmn, SetCC, c);
   1280 }
   1281 BufferOffset Assembler::as_cmp(Register src1, Operand2 op2, Condition c) {
   1282  return as_alu(InvalidReg, src1, op2, OpCmp, SetCC, c);
   1283 }
   1284 BufferOffset Assembler::as_teq(Register src1, Operand2 op2, Condition c) {
   1285  return as_alu(InvalidReg, src1, op2, OpTeq, SetCC, c);
   1286 }
   1287 BufferOffset Assembler::as_tst(Register src1, Operand2 op2, Condition c) {
   1288  return as_alu(InvalidReg, src1, op2, OpTst, SetCC, c);
   1289 }
   1290 
   1291 static constexpr Register NoAddend{Registers::pc};
   1292 
   1293 static const int SignExtend = 0x06000070;
   1294 
   1295 enum SignExtend {
   1296  SxSxtb = 10 << 20,
   1297  SxSxth = 11 << 20,
   1298  SxUxtb = 14 << 20,
   1299  SxUxth = 15 << 20
   1300 };
   1301 
   1302 // Sign extension operations.
   1303 BufferOffset Assembler::as_sxtb(Register dest, Register src, int rotate,
   1304                                Condition c) {
   1305  return writeInst((int)c | SignExtend | SxSxtb | RN(NoAddend) | RD(dest) |
   1306                   ((rotate & 3) << 10) | src.code());
   1307 }
   1308 BufferOffset Assembler::as_sxth(Register dest, Register src, int rotate,
   1309                                Condition c) {
   1310  return writeInst((int)c | SignExtend | SxSxth | RN(NoAddend) | RD(dest) |
   1311                   ((rotate & 3) << 10) | src.code());
   1312 }
   1313 BufferOffset Assembler::as_uxtb(Register dest, Register src, int rotate,
   1314                                Condition c) {
   1315  return writeInst((int)c | SignExtend | SxUxtb | RN(NoAddend) | RD(dest) |
   1316                   ((rotate & 3) << 10) | src.code());
   1317 }
   1318 BufferOffset Assembler::as_uxth(Register dest, Register src, int rotate,
   1319                                Condition c) {
   1320  return writeInst((int)c | SignExtend | SxUxth | RN(NoAddend) | RD(dest) |
   1321                   ((rotate & 3) << 10) | src.code());
   1322 }
   1323 
   1324 static uint32_t EncodeMovW(Register dest, Imm16 imm, Assembler::Condition c) {
   1325  MOZ_ASSERT(ARMFlags::HasMOVWT());
   1326  return 0x03000000 | c | imm.encode() | RD(dest);
   1327 }
   1328 
   1329 static uint32_t EncodeMovT(Register dest, Imm16 imm, Assembler::Condition c) {
   1330  MOZ_ASSERT(ARMFlags::HasMOVWT());
   1331  return 0x03400000 | c | imm.encode() | RD(dest);
   1332 }
   1333 
   1334 // Not quite ALU worthy, but these are useful none the less. These also have
   1335 // the isue of these being formatted completly differently from the standard ALU
   1336 // operations.
   1337 BufferOffset Assembler::as_movw(Register dest, Imm16 imm, Condition c) {
   1338  return writeInst(EncodeMovW(dest, imm, c));
   1339 }
   1340 
   1341 /* static */
   1342 void Assembler::as_movw_patch(Register dest, Imm16 imm, Condition c,
   1343                              Instruction* pos) {
   1344  WriteInstStatic(EncodeMovW(dest, imm, c), (uint32_t*)pos);
   1345 }
   1346 
   1347 BufferOffset Assembler::as_movt(Register dest, Imm16 imm, Condition c) {
   1348  return writeInst(EncodeMovT(dest, imm, c));
   1349 }
   1350 
   1351 /* static */
   1352 void Assembler::as_movt_patch(Register dest, Imm16 imm, Condition c,
   1353                              Instruction* pos) {
   1354  WriteInstStatic(EncodeMovT(dest, imm, c), (uint32_t*)pos);
   1355 }
   1356 
   1357 void Assembler::PatchMovwt(Instruction* addr, uint32_t imm) {
   1358  InstructionIterator iter(addr);
   1359  Instruction* movw = iter.cur();
   1360  MOZ_ASSERT(movw->is<InstMovW>());
   1361  Instruction* movt = iter.next();
   1362  MOZ_ASSERT(movt->is<InstMovT>());
   1363 
   1364  Register dest = toRD(*movw);
   1365  Condition c = movw->extractCond();
   1366  MOZ_ASSERT(toRD(*movt) == dest && movt->extractCond() == c);
   1367 
   1368  Assembler::WriteInstStatic(EncodeMovW(dest, Imm16(imm & 0xffff), c),
   1369                             (uint32_t*)movw);
   1370  Assembler::WriteInstStatic(EncodeMovT(dest, Imm16(imm >> 16 & 0xffff), c),
   1371                             (uint32_t*)movt);
   1372 }
   1373 
   1374 static const int mull_tag = 0x90;
   1375 
   1376 BufferOffset Assembler::as_genmul(Register dhi, Register dlo, Register rm,
   1377                                  Register rn, MULOp op, SBit s, Condition c) {
   1378  return writeInst(RN(dhi) | maybeRD(dlo) | RM(rm) | rn.code() | op | s | c |
   1379                   mull_tag);
   1380 }
   1381 BufferOffset Assembler::as_mul(Register dest, Register src1, Register src2,
   1382                               SBit s, Condition c) {
   1383  return as_genmul(dest, InvalidReg, src1, src2, OpmMul, s, c);
   1384 }
   1385 BufferOffset Assembler::as_mla(Register dest, Register acc, Register src1,
   1386                               Register src2, SBit s, Condition c) {
   1387  return as_genmul(dest, acc, src1, src2, OpmMla, s, c);
   1388 }
   1389 BufferOffset Assembler::as_umaal(Register destHI, Register destLO,
   1390                                 Register src1, Register src2, Condition c) {
   1391  return as_genmul(destHI, destLO, src1, src2, OpmUmaal, LeaveCC, c);
   1392 }
   1393 BufferOffset Assembler::as_mls(Register dest, Register acc, Register src1,
   1394                               Register src2, Condition c) {
   1395  return as_genmul(dest, acc, src1, src2, OpmMls, LeaveCC, c);
   1396 }
   1397 
   1398 BufferOffset Assembler::as_umull(Register destHI, Register destLO,
   1399                                 Register src1, Register src2, SBit s,
   1400                                 Condition c) {
   1401  return as_genmul(destHI, destLO, src1, src2, OpmUmull, s, c);
   1402 }
   1403 
   1404 BufferOffset Assembler::as_umlal(Register destHI, Register destLO,
   1405                                 Register src1, Register src2, SBit s,
   1406                                 Condition c) {
   1407  return as_genmul(destHI, destLO, src1, src2, OpmUmlal, s, c);
   1408 }
   1409 
   1410 BufferOffset Assembler::as_smull(Register destHI, Register destLO,
   1411                                 Register src1, Register src2, SBit s,
   1412                                 Condition c) {
   1413  return as_genmul(destHI, destLO, src1, src2, OpmSmull, s, c);
   1414 }
   1415 
   1416 BufferOffset Assembler::as_smlal(Register destHI, Register destLO,
   1417                                 Register src1, Register src2, SBit s,
   1418                                 Condition c) {
   1419  return as_genmul(destHI, destLO, src1, src2, OpmSmlal, s, c);
   1420 }
   1421 
   1422 BufferOffset Assembler::as_sdiv(Register rd, Register rn, Register rm,
   1423                                Condition c) {
   1424  return writeInst(0x0710f010 | c | RN(rd) | RM(rm) | rn.code());
   1425 }
   1426 
   1427 BufferOffset Assembler::as_udiv(Register rd, Register rn, Register rm,
   1428                                Condition c) {
   1429  return writeInst(0x0730f010 | c | RN(rd) | RM(rm) | rn.code());
   1430 }
   1431 
   1432 BufferOffset Assembler::as_clz(Register dest, Register src, Condition c) {
   1433  MOZ_ASSERT(src != pc && dest != pc);
   1434  return writeInst(RD(dest) | src.code() | c | 0x016f0f10);
   1435 }
   1436 
   1437 // Data transfer instructions: ldr, str, ldrb, strb. Using an int to
   1438 // differentiate between 8 bits and 32 bits is overkill, but meh.
   1439 
   1440 static uint32_t EncodeDtr(LoadStore ls, int size, Index mode, Register rt,
   1441                          DTRAddr addr, Assembler::Condition c) {
   1442  MOZ_ASSERT(mode == Offset || (rt != addr.getBase() && pc != addr.getBase()));
   1443  MOZ_ASSERT(size == 32 || size == 8);
   1444  return 0x04000000 | ls | (size == 8 ? 0x00400000 : 0) | mode | c | RT(rt) |
   1445         addr.encode();
   1446 }
   1447 
   1448 BufferOffset Assembler::as_dtr(LoadStore ls, int size, Index mode, Register rt,
   1449                               DTRAddr addr, Condition c) {
   1450  return writeInst(EncodeDtr(ls, size, mode, rt, addr, c));
   1451 }
   1452 
   1453 /* static */
   1454 void Assembler::as_dtr_patch(LoadStore ls, int size, Index mode, Register rt,
   1455                             DTRAddr addr, Condition c, uint32_t* dest) {
   1456  WriteInstStatic(EncodeDtr(ls, size, mode, rt, addr, c), dest);
   1457 }
   1458 
   1459 class PoolHintData {
   1460 public:
   1461  enum LoadType {
   1462    // Set 0 to bogus, since that is the value most likely to be
   1463    // accidentally left somewhere.
   1464    PoolBOGUS = 0,
   1465    PoolDTR = 1,
   1466    PoolBranch = 2,
   1467    PoolVDTR = 3
   1468  };
   1469 
   1470 private:
   1471  uint32_t index_ : 16;
   1472  uint32_t cond_ : 4;
   1473  uint32_t loadType_ : 2;
   1474  uint32_t destReg_ : 5;
   1475  uint32_t destType_ : 1;
   1476  uint32_t ONES : 4;
   1477 
   1478  static const uint32_t ExpectedOnes = 0xfu;
   1479 
   1480 public:
   1481  void init(uint32_t index, Assembler::Condition cond, LoadType lt,
   1482            Register destReg) {
   1483    index_ = index;
   1484    MOZ_ASSERT(index_ == index);
   1485    cond_ = cond >> 28;
   1486    MOZ_ASSERT(cond_ == cond >> 28);
   1487    loadType_ = lt;
   1488    ONES = ExpectedOnes;
   1489    destReg_ = destReg.code();
   1490    destType_ = 0;
   1491  }
   1492  void init(uint32_t index, Assembler::Condition cond, LoadType lt,
   1493            const VFPRegister& destReg) {
   1494    MOZ_ASSERT(destReg.isFloat());
   1495    index_ = index;
   1496    MOZ_ASSERT(index_ == index);
   1497    cond_ = cond >> 28;
   1498    MOZ_ASSERT(cond_ == cond >> 28);
   1499    loadType_ = lt;
   1500    ONES = ExpectedOnes;
   1501    destReg_ = destReg.id();
   1502    destType_ = destReg.isDouble();
   1503  }
   1504  Assembler::Condition getCond() const {
   1505    return Assembler::Condition(cond_ << 28);
   1506  }
   1507 
   1508  Register getReg() const { return Register::FromCode(destReg_); }
   1509  VFPRegister getVFPReg() const {
   1510    VFPRegister r = VFPRegister(
   1511        destReg_, destType_ ? VFPRegister::Double : VFPRegister::Single);
   1512    return r;
   1513  }
   1514 
   1515  int32_t getIndex() const { return index_; }
   1516  void setIndex(uint32_t index) {
   1517    MOZ_ASSERT(ONES == ExpectedOnes && loadType_ != PoolBOGUS);
   1518    index_ = index;
   1519    MOZ_ASSERT(index_ == index);
   1520  }
   1521 
   1522  LoadType getLoadType() const {
   1523    // If this *was* a PoolBranch, but the branch has already been bound
   1524    // then this isn't going to look like a real poolhintdata, but we still
   1525    // want to lie about it so everyone knows it *used* to be a branch.
   1526    if (ONES != ExpectedOnes) {
   1527      return PoolHintData::PoolBranch;
   1528    }
   1529    return static_cast<LoadType>(loadType_);
   1530  }
   1531 
   1532  bool isValidPoolHint() const {
   1533    // Most instructions cannot have a condition that is 0xf. Notable
   1534    // exceptions are blx and the entire NEON instruction set. For the
   1535    // purposes of pool loads, and possibly patched branches, the possible
   1536    // instructions are ldr and b, neither of which can have a condition
   1537    // code of 0xf.
   1538    return ONES == ExpectedOnes;
   1539  }
   1540 };
   1541 
   1542 union PoolHintPun {
   1543  PoolHintData phd;
   1544  uint32_t raw;
   1545 };
   1546 
   1547 // Handles all of the other integral data transferring functions: ldrsb, ldrsh,
   1548 // ldrd, etc. The size is given in bits.
   1549 BufferOffset Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned,
   1550                                  Index mode, Register rt, EDtrAddr addr,
   1551                                  Condition c) {
   1552  int extra_bits2 = 0;
   1553  int extra_bits1 = 0;
   1554  switch (size) {
   1555    case 8:
   1556      MOZ_ASSERT(IsSigned);
   1557      MOZ_ASSERT(ls != IsStore);
   1558      extra_bits1 = 0x1;
   1559      extra_bits2 = 0x2;
   1560      break;
   1561    case 16:
   1562      // 'case 32' doesn't need to be handled, it is handled by the default
   1563      // ldr/str.
   1564      extra_bits2 = 0x01;
   1565      extra_bits1 = (ls == IsStore) ? 0 : 1;
   1566      if (IsSigned) {
   1567        MOZ_ASSERT(ls != IsStore);
   1568        extra_bits2 |= 0x2;
   1569      }
   1570      break;
   1571    case 64:
   1572      extra_bits2 = (ls == IsStore) ? 0x3 : 0x2;
   1573      extra_bits1 = 0;
   1574      break;
   1575    default:
   1576      MOZ_CRASH("unexpected size in as_extdtr");
   1577  }
   1578  return writeInst(extra_bits2 << 5 | extra_bits1 << 20 | 0x90 | addr.encode() |
   1579                   RT(rt) | mode | c);
   1580 }
   1581 
   1582 BufferOffset Assembler::as_dtm(LoadStore ls, Register rn, uint32_t mask,
   1583                               DTMMode mode, DTMWriteBack wb, Condition c) {
   1584  return writeInst(0x08000000 | RN(rn) | ls | mode | mask | c | wb);
   1585 }
   1586 
   1587 BufferOffset Assembler::allocLiteralLoadEntry(
   1588    size_t numInst, unsigned numPoolEntries, PoolHintPun& php, uint8_t* data,
   1589    const LiteralDoc& doc, ARMBuffer::PoolEntry* pe, bool loadToPC) {
   1590  uint8_t* inst = (uint8_t*)&php.raw;
   1591 
   1592  MOZ_ASSERT(inst);
   1593  MOZ_ASSERT(numInst == 1);  // Or fix the disassembly
   1594 
   1595  BufferOffset offs =
   1596      m_buffer.allocEntry(numInst, numPoolEntries, inst, data, pe);
   1597  propagateOOM(offs.assigned());
   1598 #ifdef JS_DISASM_ARM
   1599  Instruction* instruction = m_buffer.getInstOrNull(offs);
   1600  if (instruction) {
   1601    spewLiteralLoad(php, loadToPC, instruction, doc);
   1602  }
   1603 #endif
   1604  return offs;
   1605 }
   1606 
   1607 // This is also used for instructions that might be resolved into branches,
   1608 // or might not.  If dest==pc then it is effectively a branch.
   1609 
   1610 BufferOffset Assembler::as_Imm32Pool(Register dest, uint32_t value,
   1611                                     Condition c) {
   1612  PoolHintPun php;
   1613  php.phd.init(0, c, PoolHintData::PoolDTR, dest);
   1614  BufferOffset offs = allocLiteralLoadEntry(
   1615      1, 1, php, (uint8_t*)&value, LiteralDoc(value), nullptr, dest == pc);
   1616  return offs;
   1617 }
   1618 
   1619 /* static */
   1620 void Assembler::WritePoolEntry(Instruction* addr, Condition c, uint32_t data) {
   1621  MOZ_ASSERT(addr->is<InstLDR>());
   1622  *addr->as<InstLDR>()->dest() = data;
   1623  MOZ_ASSERT(addr->extractCond() == c);
   1624 }
   1625 
   1626 BufferOffset Assembler::as_FImm64Pool(VFPRegister dest, double d, Condition c) {
   1627  MOZ_ASSERT(dest.isDouble());
   1628  PoolHintPun php;
   1629  php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
   1630  return allocLiteralLoadEntry(1, 2, php, (uint8_t*)&d, LiteralDoc(d));
   1631 }
   1632 
   1633 BufferOffset Assembler::as_FImm32Pool(VFPRegister dest, float f, Condition c) {
   1634  // Insert floats into the double pool as they have the same limitations on
   1635  // immediate offset. This wastes 4 bytes padding per float. An alternative
   1636  // would be to have a separate pool for floats.
   1637  MOZ_ASSERT(dest.isSingle());
   1638  PoolHintPun php;
   1639  php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
   1640  return allocLiteralLoadEntry(1, 1, php, (uint8_t*)&f, LiteralDoc(f));
   1641 }
   1642 
   1643 // Pool callbacks stuff:
   1644 void Assembler::InsertIndexIntoTag(uint8_t* load_, uint32_t index) {
   1645  uint32_t* load = (uint32_t*)load_;
   1646  PoolHintPun php;
   1647  php.raw = *load;
   1648  php.phd.setIndex(index);
   1649  *load = php.raw;
   1650 }
   1651 
   1652 // patchConstantPoolLoad takes the address of the instruction that wants to be
   1653 // patched, and the address of the start of the constant pool, and figures
   1654 // things out from there.
   1655 void Assembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
   1656  PoolHintData data = *(PoolHintData*)loadAddr;
   1657  uint32_t* instAddr = (uint32_t*)loadAddr;
   1658  int offset = (char*)constPoolAddr - (char*)loadAddr;
   1659  switch (data.getLoadType()) {
   1660    case PoolHintData::PoolBOGUS:
   1661      MOZ_CRASH("bogus load type!");
   1662    case PoolHintData::PoolDTR:
   1663      Assembler::as_dtr_patch(
   1664          IsLoad, 32, Offset, data.getReg(),
   1665          DTRAddr(pc, DtrOffImm(offset + 4 * data.getIndex() - 8)),
   1666          data.getCond(), instAddr);
   1667      break;
   1668    case PoolHintData::PoolBranch:
   1669      // Either this used to be a poolBranch, and the label was already bound,
   1670      // so it was replaced with a real branch, or this may happen in the
   1671      // future. If this is going to happen in the future, then the actual
   1672      // bits that are written here don't matter (except the condition code,
   1673      // since that is always preserved across patchings) but if it does not
   1674      // get bound later, then we want to make sure this is a load from the
   1675      // pool entry (and the pool entry should be nullptr so it will crash).
   1676      if (data.isValidPoolHint()) {
   1677        Assembler::as_dtr_patch(
   1678            IsLoad, 32, Offset, pc,
   1679            DTRAddr(pc, DtrOffImm(offset + 4 * data.getIndex() - 8)),
   1680            data.getCond(), instAddr);
   1681      }
   1682      break;
   1683    case PoolHintData::PoolVDTR: {
   1684      VFPRegister dest = data.getVFPReg();
   1685      int32_t imm = offset + (data.getIndex() * 4) - 8;
   1686      MOZ_ASSERT(-1024 < imm && imm < 1024);
   1687      Assembler::as_vdtr_patch(IsLoad, dest, VFPAddr(pc, VFPOffImm(imm)),
   1688                               data.getCond(), instAddr);
   1689      break;
   1690    }
   1691  }
   1692 }
   1693 
   1694 // Atomic instruction stuff:
   1695 
   1696 BufferOffset Assembler::as_ldrexd(Register rt, Register rt2, Register rn,
   1697                                  Condition c) {
   1698  MOZ_ASSERT(!(rt.code() & 1) && rt2.code() == rt.code() + 1);
   1699  MOZ_ASSERT(rt.code() != 14 && rn.code() != 15);
   1700  return writeInst(0x01b00f9f | (int)c | RT(rt) | RN(rn));
   1701 }
   1702 
   1703 BufferOffset Assembler::as_ldrex(Register rt, Register rn, Condition c) {
   1704  MOZ_ASSERT(rt.code() != 15 && rn.code() != 15);
   1705  return writeInst(0x01900f9f | (int)c | RT(rt) | RN(rn));
   1706 }
   1707 
   1708 BufferOffset Assembler::as_ldrexh(Register rt, Register rn, Condition c) {
   1709  MOZ_ASSERT(rt.code() != 15 && rn.code() != 15);
   1710  return writeInst(0x01f00f9f | (int)c | RT(rt) | RN(rn));
   1711 }
   1712 
   1713 BufferOffset Assembler::as_ldrexb(Register rt, Register rn, Condition c) {
   1714  MOZ_ASSERT(rt.code() != 15 && rn.code() != 15);
   1715  return writeInst(0x01d00f9f | (int)c | RT(rt) | RN(rn));
   1716 }
   1717 
   1718 BufferOffset Assembler::as_strexd(Register rd, Register rt, Register rt2,
   1719                                  Register rn, Condition c) {
   1720  MOZ_ASSERT(!(rt.code() & 1) && rt2.code() == rt.code() + 1);
   1721  MOZ_ASSERT(rt.code() != 14 && rn.code() != 15 && rd.code() != 15);
   1722  MOZ_ASSERT(rd != rn && rd != rt && rd != rt2);
   1723  return writeInst(0x01a00f90 | (int)c | RD(rd) | RN(rn) | rt.code());
   1724 }
   1725 
   1726 BufferOffset Assembler::as_strex(Register rd, Register rt, Register rn,
   1727                                 Condition c) {
   1728  MOZ_ASSERT(rd != rn && rd != rt);  // True restriction on Cortex-A7 (RPi2)
   1729  return writeInst(0x01800f90 | (int)c | RD(rd) | RN(rn) | rt.code());
   1730 }
   1731 
   1732 BufferOffset Assembler::as_strexh(Register rd, Register rt, Register rn,
   1733                                  Condition c) {
   1734  MOZ_ASSERT(rd != rn && rd != rt);  // True restriction on Cortex-A7 (RPi2)
   1735  return writeInst(0x01e00f90 | (int)c | RD(rd) | RN(rn) | rt.code());
   1736 }
   1737 
   1738 BufferOffset Assembler::as_strexb(Register rd, Register rt, Register rn,
   1739                                  Condition c) {
   1740  MOZ_ASSERT(rd != rn && rd != rt);  // True restriction on Cortex-A7 (RPi2)
   1741  return writeInst(0x01c00f90 | (int)c | RD(rd) | RN(rn) | rt.code());
   1742 }
   1743 
   1744 BufferOffset Assembler::as_clrex() { return writeInst(0xf57ff01f); }
   1745 
   1746 // Memory barrier stuff:
   1747 
   1748 BufferOffset Assembler::as_dmb(BarrierOption option) {
   1749  return writeInst(0xf57ff050U | (int)option);
   1750 }
   1751 BufferOffset Assembler::as_dsb(BarrierOption option) {
   1752  return writeInst(0xf57ff040U | (int)option);
   1753 }
   1754 BufferOffset Assembler::as_isb() {
   1755  return writeInst(0xf57ff06fU);  // option == SY
   1756 }
   1757 BufferOffset Assembler::as_dsb_trap() {
   1758  // DSB is "mcr 15, 0, r0, c7, c10, 4".
   1759  // See eg https://bugs.kde.org/show_bug.cgi?id=228060.
   1760  // ARMv7 manual, "VMSA CP15 c7 register summary".
   1761  // Flagged as "legacy" starting with ARMv8, may be disabled on chip, see
   1762  // ARMv8 manual E2.7.3 and G3.18.16.
   1763  return writeInst(0xee070f9a);
   1764 }
   1765 BufferOffset Assembler::as_dmb_trap() {
   1766  // DMB is "mcr 15, 0, r0, c7, c10, 5".
   1767  // ARMv7 manual, "VMSA CP15 c7 register summary".
   1768  // Flagged as "legacy" starting with ARMv8, may be disabled on chip, see
   1769  // ARMv8 manual E2.7.3 and G3.18.16.
   1770  return writeInst(0xee070fba);
   1771 }
   1772 BufferOffset Assembler::as_isb_trap() {
   1773  // ISB is "mcr 15, 0, r0, c7, c5, 4".
   1774  // ARMv7 manual, "VMSA CP15 c7 register summary".
   1775  // Flagged as "legacy" starting with ARMv8, may be disabled on chip, see
   1776  // ARMv8 manual E2.7.3 and G3.18.16.
   1777  return writeInst(0xee070f94);
   1778 }
   1779 
   1780 BufferOffset Assembler::as_csdb() {
   1781  // NOP (see as_nop) on architectures where this instruction is not defined.
   1782  //
   1783  // https://developer.arm.com/-/media/developer/pdf/Cache_Speculation_Side-channels_22Feb18.pdf
   1784  // CSDB A32: 1110_0011_0010_0000_1111_0000_0001_0100
   1785  return writeInst(0xe320f000 | 0x14);
   1786 }
   1787 
   1788 // Move Special Register and Hints:
   1789 
   1790 BufferOffset Assembler::as_yield() {
   1791  // YIELD hint instruction.
   1792  //
   1793  // YIELD A32: 1110_0011_0010_0000_1111_0000_0000_0001
   1794  return writeInst(0xe320f001);
   1795 }
   1796 
   1797 // Control flow stuff:
   1798 
   1799 // bx can *only* branch to a register, never to an immediate.
   1800 BufferOffset Assembler::as_bx(Register r, Condition c) {
   1801  BufferOffset ret = writeInst(((int)c) | OpBx | r.code());
   1802  return ret;
   1803 }
   1804 
   1805 void Assembler::WritePoolGuard(BufferOffset branch, Instruction* dest,
   1806                               BufferOffset afterPool) {
   1807  BOffImm off = afterPool.diffB<BOffImm>(branch);
   1808  if (off.isInvalid()) {
   1809    MOZ_CRASH("BOffImm invalid");
   1810  }
   1811  *dest = InstBImm(off, Always);
   1812 }
   1813 
   1814 // Branch can branch to an immediate *or* to a register.
   1815 // Branches to immediates are pc relative, branches to registers are absolute.
   1816 BufferOffset Assembler::as_b(BOffImm off, Condition c, Label* documentation) {
   1817  return writeBranchInst(((int)c) | OpB | off.encode(),
   1818                         refLabel(documentation));
   1819 }
   1820 
   1821 BufferOffset Assembler::as_b(Label* l, Condition c) {
   1822  if (l->bound()) {
   1823    // Note only one instruction is emitted here, the NOP is overwritten.
   1824    BufferOffset ret = allocBranchInst();
   1825    if (oom()) {
   1826      return BufferOffset();
   1827    }
   1828 
   1829    BOffImm offset = BufferOffset(l).diffB<BOffImm>(ret);
   1830    MOZ_RELEASE_ASSERT(!offset.isInvalid(),
   1831                       "Buffer size limit should prevent this");
   1832    as_b(offset, c, ret);
   1833 #ifdef JS_DISASM_ARM
   1834    spewBranch(m_buffer.getInstOrNull(ret), refLabel(l));
   1835 #endif
   1836    return ret;
   1837  }
   1838 
   1839  if (oom()) {
   1840    return BufferOffset();
   1841  }
   1842 
   1843  BufferOffset ret;
   1844  if (l->used()) {
   1845    int32_t old = l->offset();
   1846    MOZ_RELEASE_ASSERT(BOffImm::IsInRange(old),
   1847                       "Buffer size limit should prevent this");
   1848    ret = as_b(BOffImm(old), c, l);
   1849  } else {
   1850    BOffImm inv;
   1851    ret = as_b(inv, c, l);
   1852  }
   1853 
   1854  if (oom()) {
   1855    return BufferOffset();
   1856  }
   1857 
   1858  l->use(ret.getOffset());
   1859  return ret;
   1860 }
   1861 
   1862 BufferOffset Assembler::as_b(BOffImm off, Condition c, BufferOffset inst) {
   1863  // JS_DISASM_ARM NOTE: Can't disassemble here, because numerous callers use
   1864  // this to patchup old code.  Must disassemble in caller where it makes sense.
   1865  // Not many callers.
   1866  *editSrc(inst) = InstBImm(off, c);
   1867  return inst;
   1868 }
   1869 
   1870 // blx can go to either an immediate or a register.
   1871 // When blx'ing to a register, we change processor state depending on the low
   1872 // bit of the register when blx'ing to an immediate, we *always* change
   1873 // processor state.
   1874 
   1875 BufferOffset Assembler::as_blx(Register r, Condition c) {
   1876  return writeInst(((int)c) | OpBlx | r.code());
   1877 }
   1878 
   1879 // bl can only branch to an pc-relative immediate offset
   1880 // It cannot change the processor state.
   1881 BufferOffset Assembler::as_bl(BOffImm off, Condition c, Label* documentation) {
   1882  return writeBranchInst(((int)c) | OpBl | off.encode(),
   1883                         refLabel(documentation));
   1884 }
   1885 
   1886 BufferOffset Assembler::as_bl(Label* l, Condition c) {
   1887  if (l->bound()) {
   1888    // Note only one instruction is emitted here, the NOP is overwritten.
   1889    BufferOffset ret = allocBranchInst();
   1890    if (oom()) {
   1891      return BufferOffset();
   1892    }
   1893 
   1894    BOffImm offset = BufferOffset(l).diffB<BOffImm>(ret);
   1895    MOZ_RELEASE_ASSERT(!offset.isInvalid(),
   1896                       "Buffer size limit should prevent this");
   1897 
   1898    as_bl(offset, c, ret);
   1899 #ifdef JS_DISASM_ARM
   1900    spewBranch(m_buffer.getInstOrNull(ret), refLabel(l));
   1901 #endif
   1902    return ret;
   1903  }
   1904 
   1905  if (oom()) {
   1906    return BufferOffset();
   1907  }
   1908 
   1909  BufferOffset ret;
   1910  // See if the list was empty.
   1911  if (l->used()) {
   1912    int32_t old = l->offset();
   1913    MOZ_RELEASE_ASSERT(BOffImm::IsInRange(old),
   1914                       "Buffer size limit should prevent this");
   1915    ret = as_bl(BOffImm(old), c, l);
   1916  } else {
   1917    BOffImm inv;
   1918    ret = as_bl(inv, c, l);
   1919  }
   1920 
   1921  if (oom()) {
   1922    return BufferOffset();
   1923  }
   1924 
   1925  l->use(ret.getOffset());
   1926  return ret;
   1927 }
   1928 
   1929 BufferOffset Assembler::as_bl(BOffImm off, Condition c, BufferOffset inst) {
   1930  *editSrc(inst) = InstBLImm(off, c);
   1931  return inst;
   1932 }
   1933 
   1934 BufferOffset Assembler::as_mrs(Register r, Condition c) {
   1935  return writeInst(0x010f0000 | int(c) | RD(r));
   1936 }
   1937 
   1938 BufferOffset Assembler::as_msr(Register r, Condition c) {
   1939  // Hardcode the 'mask' field to 0b11 for now. It is bits 18 and 19, which
   1940  // are the two high bits of the 'c' in this constant.
   1941  MOZ_ASSERT((r.code() & ~0xf) == 0);
   1942  return writeInst(0x012cf000 | int(c) | r.code());
   1943 }
   1944 
   1945 // VFP instructions!
   1946 enum vfp_tags { VfpTag = 0x0C000A00, VfpArith = 0x02000000 };
   1947 
   1948 BufferOffset Assembler::writeVFPInst(vfp_size sz, uint32_t blob) {
   1949  MOZ_ASSERT((sz & blob) == 0);
   1950  MOZ_ASSERT((VfpTag & blob) == 0);
   1951  return writeInst(VfpTag | std::underlying_type_t<vfp_size>(sz) | blob);
   1952 }
   1953 
   1954 /* static */
   1955 void Assembler::WriteVFPInstStatic(vfp_size sz, uint32_t blob, uint32_t* dest) {
   1956  MOZ_ASSERT((sz & blob) == 0);
   1957  MOZ_ASSERT((VfpTag & blob) == 0);
   1958  WriteInstStatic(VfpTag | std::underlying_type_t<vfp_size>(sz) | blob, dest);
   1959 }
   1960 
   1961 // Unityped variants: all registers hold the same (ieee754 single/double)
   1962 // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
   1963 BufferOffset Assembler::as_vfp_float(VFPRegister vd, VFPRegister vn,
   1964                                     VFPRegister vm, VFPOp op, Condition c) {
   1965  // Make sure we believe that all of our operands are the same kind.
   1966  MOZ_ASSERT_IF(!vn.isMissing(), vd.equiv(vn));
   1967  MOZ_ASSERT_IF(!vm.isMissing(), vd.equiv(vm));
   1968  vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
   1969  return writeVFPInst(sz, VD(vd) | VN(vn) | VM(vm) | op | VfpArith | c);
   1970 }
   1971 
   1972 BufferOffset Assembler::as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
   1973                                Condition c) {
   1974  return as_vfp_float(vd, vn, vm, OpvAdd, c);
   1975 }
   1976 
   1977 BufferOffset Assembler::as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm,
   1978                                Condition c) {
   1979  return as_vfp_float(vd, vn, vm, OpvDiv, c);
   1980 }
   1981 
   1982 BufferOffset Assembler::as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
   1983                                Condition c) {
   1984  return as_vfp_float(vd, vn, vm, OpvMul, c);
   1985 }
   1986 
   1987 BufferOffset Assembler::as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
   1988                                 Condition c) {
   1989  return as_vfp_float(vd, vn, vm, OpvMul, c);
   1990 }
   1991 
   1992 BufferOffset Assembler::as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm,
   1993                                 Condition c) {
   1994  MOZ_CRASH("Feature NYI");
   1995 }
   1996 
   1997 BufferOffset Assembler::as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm,
   1998                                 Condition c) {
   1999  MOZ_CRASH("Feature NYI");
   2000 }
   2001 
   2002 BufferOffset Assembler::as_vneg(VFPRegister vd, VFPRegister vm, Condition c) {
   2003  return as_vfp_float(vd, NoVFPRegister, vm, OpvNeg, c);
   2004 }
   2005 
   2006 BufferOffset Assembler::as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c) {
   2007  return as_vfp_float(vd, NoVFPRegister, vm, OpvSqrt, c);
   2008 }
   2009 
   2010 BufferOffset Assembler::as_vabs(VFPRegister vd, VFPRegister vm, Condition c) {
   2011  return as_vfp_float(vd, NoVFPRegister, vm, OpvAbs, c);
   2012 }
   2013 
   2014 BufferOffset Assembler::as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
   2015                                Condition c) {
   2016  return as_vfp_float(vd, vn, vm, OpvSub, c);
   2017 }
   2018 
   2019 BufferOffset Assembler::as_vcmp(VFPRegister vd, VFPRegister vm, Condition c) {
   2020  return as_vfp_float(vd, NoVFPRegister, vm, OpvCmp, c);
   2021 }
   2022 
   2023 BufferOffset Assembler::as_vcmpz(VFPRegister vd, Condition c) {
   2024  return as_vfp_float(vd, NoVFPRegister, NoVFPRegister, OpvCmpz, c);
   2025 }
   2026 
   2027 // Specifically, a move between two same sized-registers.
   2028 BufferOffset Assembler::as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c) {
   2029  return as_vfp_float(vd, NoVFPRegister, vsrc, OpvMov, c);
   2030 }
   2031 
   2032 // Transfer between Core and VFP.
   2033 
   2034 // Unlike the next function, moving between the core registers and vfp registers
   2035 // can't be *that* properly typed. Namely, since I don't want to munge the type
   2036 // VFPRegister to also include core registers. Thus, the core and vfp registers
   2037 // are passed in based on their type, and src/dest is determined by the
   2038 // float2core.
   2039 
   2040 BufferOffset Assembler::as_vxfer(Register vt1, Register vt2, VFPRegister vm,
   2041                                 FloatToCore_ f2c, Condition c, int idx) {
   2042  vfp_size sz = IsSingle;
   2043  if (vm.isDouble()) {
   2044    // Technically, this can be done with a vmov à la ARM ARM under vmov
   2045    // however, that requires at least an extra bit saying if the operation
   2046    // should be performed on the lower or upper half of the double. Moving
   2047    // a single to/from 2N/2N+1 isn't equivalent, since there are 32 single
   2048    // registers, and 32 double registers so there is no way to encode the
   2049    // last 16 double registers.
   2050    sz = IsDouble;
   2051    MOZ_ASSERT(idx == 0 || idx == 1);
   2052    // If we are transferring a single half of the double then it must be
   2053    // moving a VFP reg to a core reg.
   2054    MOZ_ASSERT_IF(vt2 == InvalidReg, f2c == FloatToCore);
   2055    idx = idx << 21;
   2056  } else {
   2057    MOZ_ASSERT(idx == 0);
   2058  }
   2059 
   2060  if (vt2 == InvalidReg) {
   2061    return writeVFPInst(sz, WordTransfer |
   2062                                std::underlying_type_t<FloatToCore_>(f2c) |
   2063                                std::underlying_type_t<Condition>(c) | RT(vt1) |
   2064                                maybeRN(vt2) | VN(vm) | idx);
   2065  }
   2066 
   2067  // We are doing a 64 bit transfer.
   2068  return writeVFPInst(sz, DoubleTransfer |
   2069                              std::underlying_type_t<FloatToCore_>(f2c) |
   2070                              std::underlying_type_t<Condition>(c) | RT(vt1) |
   2071                              maybeRN(vt2) | VM(vm) | idx);
   2072 }
   2073 
   2074 enum vcvt_destFloatness { VcvtToInteger = 1 << 18, VcvtToFloat = 0 << 18 };
   2075 enum vcvt_toZero {
   2076  VcvtToZero =
   2077      1 << 7,  // Use the default rounding mode, which rounds truncates.
   2078  VcvtToFPSCR = 0 << 7  // Use whatever rounding mode the fpscr specifies.
   2079 };
   2080 enum vcvt_Signedness {
   2081  VcvtToSigned = 1 << 16,
   2082  VcvtToUnsigned = 0 << 16,
   2083  VcvtFromSigned = 1 << 7,
   2084  VcvtFromUnsigned = 0 << 7
   2085 };
   2086 
   2087 // Our encoding actually allows just the src and the dest (and their types) to
   2088 // uniquely specify the encoding that we are going to use.
   2089 BufferOffset Assembler::as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR,
   2090                                Condition c) {
   2091  // Unlike other cases, the source and dest types cannot be the same.
   2092  MOZ_ASSERT(!vd.equiv(vm));
   2093  vfp_size sz = IsDouble;
   2094  if (vd.isFloat() && vm.isFloat()) {
   2095    // Doing a float -> float conversion.
   2096    if (vm.isSingle()) {
   2097      sz = IsSingle;
   2098    }
   2099    return writeVFPInst(sz, c | 0x02B700C0 | VM(vm) | VD(vd));
   2100  }
   2101 
   2102  // At least one of the registers should be a float.
   2103  vcvt_destFloatness destFloat;
   2104  vcvt_Signedness opSign;
   2105  vcvt_toZero doToZero = VcvtToFPSCR;
   2106  MOZ_ASSERT(vd.isFloat() || vm.isFloat());
   2107  if (vd.isSingle() || vm.isSingle()) {
   2108    sz = IsSingle;
   2109  }
   2110 
   2111  if (vd.isFloat()) {
   2112    destFloat = VcvtToFloat;
   2113    opSign = (vm.isSInt()) ? VcvtFromSigned : VcvtFromUnsigned;
   2114  } else {
   2115    destFloat = VcvtToInteger;
   2116    opSign = (vd.isSInt()) ? VcvtToSigned : VcvtToUnsigned;
   2117    doToZero = useFPSCR ? VcvtToFPSCR : VcvtToZero;
   2118  }
   2119  return writeVFPInst(
   2120      sz, c | 0x02B80040 | VD(vd) | VM(vm) | destFloat | opSign | doToZero);
   2121 }
   2122 
   2123 BufferOffset Assembler::as_vcvtFixed(VFPRegister vd, bool isSigned,
   2124                                     uint32_t fixedPoint, bool toFixed,
   2125                                     Condition c) {
   2126  MOZ_ASSERT(vd.isFloat());
   2127  uint32_t sx = 0x1;
   2128  vfp_size sf = vd.isDouble() ? IsDouble : IsSingle;
   2129  int32_t imm5 = fixedPoint;
   2130  imm5 = (sx ? 32 : 16) - imm5;
   2131  MOZ_ASSERT(imm5 >= 0);
   2132  imm5 = imm5 >> 1 | (imm5 & 1) << 5;
   2133  return writeVFPInst(sf, 0x02BA0040 | VD(vd) | toFixed << 18 | sx << 7 |
   2134                              (!isSigned) << 16 | imm5 | c);
   2135 }
   2136 
   2137 BufferOffset Assembler::as_vcvtb_s2h(VFPRegister vd, VFPRegister vm,
   2138                                     Condition c) {
   2139  MOZ_ASSERT(ARMFlags::HasFPHalfPrecision());
   2140  MOZ_ASSERT(vd.isSingle());
   2141  MOZ_ASSERT(vm.isSingle());
   2142 
   2143  return writeVFPInst(IsSingle, c | 0x02B30040 | VM(vm) | VD(vd));
   2144 }
   2145 
   2146 BufferOffset Assembler::as_vcvtb_h2s(VFPRegister vd, VFPRegister vm,
   2147                                     Condition c) {
   2148  MOZ_ASSERT(ARMFlags::HasFPHalfPrecision());
   2149  MOZ_ASSERT(vd.isSingle());
   2150  MOZ_ASSERT(vm.isSingle());
   2151 
   2152  return writeVFPInst(IsSingle, c | 0x02B20040 | VM(vm) | VD(vd));
   2153 }
   2154 
   2155 // Transfer between VFP and memory.
   2156 static uint32_t EncodeVdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
   2157                           Assembler::Condition c) {
   2158  return ls | 0x01000000 | addr.encode() | VD(vd) | c;
   2159 }
   2160 
   2161 BufferOffset Assembler::as_vdtr(
   2162    LoadStore ls, VFPRegister vd, VFPAddr addr,
   2163    Condition c /* vfp doesn't have a wb option */) {
   2164  vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
   2165  return writeVFPInst(sz, EncodeVdtr(ls, vd, addr, c));
   2166 }
   2167 
   2168 /* static */
   2169 void Assembler::as_vdtr_patch(LoadStore ls, VFPRegister vd, VFPAddr addr,
   2170                              Condition c, uint32_t* dest) {
   2171  vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
   2172  WriteVFPInstStatic(sz, EncodeVdtr(ls, vd, addr, c), dest);
   2173 }
   2174 
   2175 // VFP's ldm/stm work differently from the standard arm ones. You can only
   2176 // transfer a range.
   2177 
   2178 BufferOffset Assembler::as_vdtm(LoadStore st, Register rn, VFPRegister vd,
   2179                                int length,
   2180                                /* also has update conditions */ Condition c) {
   2181  MOZ_ASSERT(length <= 16 && length >= 0);
   2182  vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
   2183 
   2184  if (vd.isDouble()) {
   2185    length *= 2;
   2186  }
   2187 
   2188  return writeVFPInst(sz, dtmLoadStore | RN(rn) | VD(vd) | length | dtmMode |
   2189                              dtmUpdate | dtmCond);
   2190 }
   2191 
   2192 BufferOffset Assembler::as_vldr_unaligned(VFPRegister vd, Register rn) {
   2193  MOZ_ASSERT(ARMFlags::HasNEON());
   2194  if (vd.isDouble()) {
   2195    // vld1 (multiple single elements) with align=0, size=3, numregs=1
   2196    return writeInst(0xF42007CF | RN(rn) | VD(vd));
   2197  }
   2198  // vld1 (single element to single lane) with index=0, size=2
   2199  MOZ_ASSERT(vd.isFloat());
   2200  MOZ_ASSERT((vd.code() & 1) == 0);
   2201  return writeInst(0xF4A0080F | RN(rn) | VD(vd.asDouble()));
   2202 }
   2203 
   2204 BufferOffset Assembler::as_vstr_unaligned(VFPRegister vd, Register rn) {
   2205  MOZ_ASSERT(ARMFlags::HasNEON());
   2206  if (vd.isDouble()) {
   2207    // vst1 (multiple single elements) with align=0, size=3, numregs=1
   2208    return writeInst(0xF40007CF | RN(rn) | VD(vd));
   2209  }
   2210  // vst1 (single element from one lane) with index=0, size=2
   2211  MOZ_ASSERT(vd.isFloat());
   2212  MOZ_ASSERT((vd.code() & 1) == 0);
   2213  return writeInst(0xF480080F | RN(rn) | VD(vd.asDouble()));
   2214 }
   2215 
   2216 BufferOffset Assembler::as_vimm(VFPRegister vd, VFPImm imm, Condition c) {
   2217  MOZ_ASSERT(imm.isValid());
   2218  vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
   2219  return writeVFPInst(sz, c | imm.encode() | VD(vd) | 0x02B00000);
   2220 }
   2221 
   2222 BufferOffset Assembler::as_vmrs(Register r, Condition c) {
   2223  return writeInst(c | 0x0ef10a10 | RT(r));
   2224 }
   2225 
   2226 BufferOffset Assembler::as_vmsr(Register r, Condition c) {
   2227  return writeInst(c | 0x0ee10a10 | RT(r));
   2228 }
   2229 
   2230 bool Assembler::nextLink(BufferOffset b, BufferOffset* next) {
   2231  Instruction branch = *editSrc(b);
   2232  MOZ_ASSERT(branch.is<InstBranchImm>());
   2233 
   2234  BOffImm destOff;
   2235  branch.as<InstBranchImm>()->extractImm(&destOff);
   2236  if (destOff.isInvalid()) {
   2237    return false;
   2238  }
   2239 
   2240  // Propagate the next link back to the caller, by constructing a new
   2241  // BufferOffset into the space they provided.
   2242  new (next) BufferOffset(destOff.decode());
   2243  return true;
   2244 }
   2245 
   2246 void Assembler::bind(Label* label, BufferOffset boff) {
   2247 #ifdef JS_DISASM_ARM
   2248  spew_.spewBind(label);
   2249 #endif
   2250  if (oom()) {
   2251    // Ensure we always bind the label. This matches what we do on
   2252    // x86/x64 and silences the assert in ~Label.
   2253    label->bind(0);
   2254    return;
   2255  }
   2256 
   2257  BufferOffset dest = boff.assigned() ? boff : nextOffset();
   2258  if (label->used()) {
   2259    bool more;
   2260    // If our caller didn't give us an explicit target to bind to then we
   2261    // want to bind to the location of the next instruction.
   2262    BufferOffset b(label);
   2263    do {
   2264      BufferOffset next;
   2265      more = nextLink(b, &next);
   2266      Instruction branch = *editSrc(b);
   2267      Condition c = branch.extractCond();
   2268      BOffImm offset = dest.diffB<BOffImm>(b);
   2269      MOZ_RELEASE_ASSERT(!offset.isInvalid(),
   2270                         "Buffer size limit should prevent this");
   2271      if (branch.is<InstBImm>()) {
   2272        as_b(offset, c, b);
   2273      } else if (branch.is<InstBLImm>()) {
   2274        as_bl(offset, c, b);
   2275      } else {
   2276        MOZ_CRASH("crazy fixup!");
   2277      }
   2278      b = next;
   2279    } while (more);
   2280  }
   2281  label->bind(dest.getOffset());
   2282  MOZ_ASSERT(!oom());
   2283 }
   2284 
   2285 void Assembler::retarget(Label* label, Label* target) {
   2286 #ifdef JS_DISASM_ARM
   2287  spew_.spewRetarget(label, target);
   2288 #endif
   2289  if (label->used() && !oom()) {
   2290    if (target->bound()) {
   2291      bind(label, BufferOffset(target));
   2292    } else if (target->used()) {
   2293      // The target is not bound but used. Prepend label's branch list
   2294      // onto target's.
   2295      BufferOffset labelBranchOffset(label);
   2296      BufferOffset next;
   2297 
   2298      // Find the head of the use chain for label.
   2299      while (nextLink(labelBranchOffset, &next)) {
   2300        labelBranchOffset = next;
   2301      }
   2302 
   2303      // Then patch the head of label's use chain to the tail of target's
   2304      // use chain, prepending the entire use chain of target.
   2305      Instruction branch = *editSrc(labelBranchOffset);
   2306      Condition c = branch.extractCond();
   2307      int32_t prev = target->offset();
   2308      target->use(label->offset());
   2309      if (branch.is<InstBImm>()) {
   2310        as_b(BOffImm(prev), c, labelBranchOffset);
   2311      } else if (branch.is<InstBLImm>()) {
   2312        as_bl(BOffImm(prev), c, labelBranchOffset);
   2313      } else {
   2314        MOZ_CRASH("crazy fixup!");
   2315      }
   2316    } else {
   2317      // The target is unbound and unused. We can just take the head of
   2318      // the list hanging off of label, and dump that into target.
   2319      target->use(label->offset());
   2320    }
   2321  }
   2322  label->reset();
   2323 }
   2324 
   2325 static int stopBKPT = -1;
   2326 void Assembler::as_bkpt() {
   2327  // This is a count of how many times a breakpoint instruction has been
   2328  // generated. It is embedded into the instruction for debugging
   2329  // purposes. Gdb will print "bkpt xxx" when you attempt to dissassemble a
   2330  // breakpoint with the number xxx embedded into it. If this breakpoint is
   2331  // being hit, then you can run (in gdb):
   2332  //  >b dbg_break
   2333  //  >b main
   2334  //  >commands
   2335  //  >set stopBKPT = xxx
   2336  //  >c
   2337  //  >end
   2338  // which will set a breakpoint on the function dbg_break above set a
   2339  // scripted breakpoint on main that will set the (otherwise unmodified)
   2340  // value to the number of the breakpoint, so dbg_break will actuall be
   2341  // called and finally, when you run the executable, execution will halt when
   2342  // that breakpoint is generated.
   2343  static int hit = 0;
   2344  if (stopBKPT == hit) {
   2345    dbg_break();
   2346  }
   2347  writeInst(0xe1200070 | (hit & 0xf) | ((hit & 0xfff0) << 4));
   2348  hit++;
   2349 }
   2350 
   2351 BufferOffset Assembler::as_illegal_trap() {
   2352  // Encoding of the permanently-undefined 'udf' instruction, with the imm16
   2353  // set to 0.
   2354  return writeInst(0xe7f000f0);
   2355 }
   2356 
   2357 void Assembler::flushBuffer() { m_buffer.flushPool(); }
   2358 
   2359 void Assembler::enterNoPool(size_t maxInst) { m_buffer.enterNoPool(maxInst); }
   2360 
   2361 void Assembler::leaveNoPool() { m_buffer.leaveNoPool(); }
   2362 
   2363 void Assembler::enterNoNops() { m_buffer.enterNoNops(); }
   2364 
   2365 void Assembler::leaveNoNops() { m_buffer.leaveNoNops(); }
   2366 
   2367 struct PoolHeader : Instruction {
   2368  struct Header {
   2369    // The size should take into account the pool header.
   2370    // The size is in units of Instruction (4 bytes), not byte.
   2371    uint32_t size : 15;
   2372    uint32_t isNatural : 1;
   2373    uint32_t ONES : 16;
   2374 
   2375    Header(int size_, bool isNatural_)
   2376        : size(size_), isNatural(isNatural_), ONES(0xffff) {}
   2377 
   2378    explicit Header(const Instruction* i) {
   2379      static_assert(sizeof(Header) == sizeof(uint32_t));
   2380      memcpy(this, i, sizeof(Header));
   2381      MOZ_ASSERT(ONES == 0xffff);
   2382    }
   2383 
   2384    uint32_t raw() const {
   2385      static_assert(sizeof(Header) == sizeof(uint32_t));
   2386      uint32_t dest;
   2387      memcpy(&dest, this, sizeof(Header));
   2388      return dest;
   2389    }
   2390  };
   2391 
   2392  PoolHeader(int size_, bool isNatural_)
   2393      : Instruction(Header(size_, isNatural_).raw(), true) {}
   2394 
   2395  uint32_t size() const {
   2396    Header tmp(this);
   2397    return tmp.size;
   2398  }
   2399  uint32_t isNatural() const {
   2400    Header tmp(this);
   2401    return tmp.isNatural;
   2402  }
   2403 
   2404  static bool IsTHIS(const Instruction& i) {
   2405    return (*i.raw() & 0xffff0000) == 0xffff0000;
   2406  }
   2407  static const PoolHeader* AsTHIS(const Instruction& i) {
   2408    if (!IsTHIS(i)) {
   2409      return nullptr;
   2410    }
   2411    return static_cast<const PoolHeader*>(&i);
   2412  }
   2413 };
   2414 
   2415 void Assembler::WritePoolHeader(uint8_t* start, Pool* p, bool isNatural) {
   2416  static_assert(sizeof(PoolHeader) == 4,
   2417                "PoolHandler must have the correct size.");
   2418  uint8_t* pool = start + 4;
   2419  // Go through the usual rigmarole to get the size of the pool.
   2420  pool += p->getPoolSize();
   2421  uint32_t size = pool - start;
   2422  MOZ_ASSERT((size & 3) == 0);
   2423  size = size >> 2;
   2424  MOZ_ASSERT(size < (1 << 15));
   2425  PoolHeader header(size, isNatural);
   2426  *(PoolHeader*)start = header;
   2427 }
   2428 
   2429 // The size of an arbitrary 32-bit call in the instruction stream. On ARM this
   2430 // sequence is |pc = ldr pc - 4; imm32| given that we never reach the imm32.
   2431 uint32_t Assembler::PatchWrite_NearCallSize() { return sizeof(uint32_t); }
   2432 
   2433 void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
   2434                                    CodeLocationLabel toCall) {
   2435  Instruction* inst = (Instruction*)start.raw();
   2436  // Overwrite whatever instruction used to be here with a call. Since the
   2437  // destination is in the same function, it will be within range of the
   2438  // 24 << 2 byte bl instruction.
   2439  uint8_t* dest = toCall.raw();
   2440  new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst), Always);
   2441 }
   2442 
   2443 void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
   2444                                        PatchedImmPtr newValue,
   2445                                        PatchedImmPtr expectedValue) {
   2446  Instruction* ptr = reinterpret_cast<Instruction*>(label.raw());
   2447 
   2448  Register dest;
   2449  Assembler::RelocStyle rs;
   2450 
   2451  {
   2452    InstructionIterator iter(ptr);
   2453    DebugOnly<const uint32_t*> val = GetPtr32Target(iter, &dest, &rs);
   2454    MOZ_ASSERT(uint32_t((const uint32_t*)val) == uint32_t(expectedValue.value));
   2455  }
   2456 
   2457  // Patch over actual instructions.
   2458  {
   2459    InstructionIterator iter(ptr);
   2460    MacroAssembler::ma_mov_patch(Imm32(int32_t(newValue.value)), dest, Always,
   2461                                 rs, iter);
   2462  }
   2463 }
   2464 
   2465 void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
   2466                                        ImmPtr newValue, ImmPtr expectedValue) {
   2467  PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
   2468                          PatchedImmPtr(expectedValue.value));
   2469 }
   2470 
   2471 // This just stomps over memory with 32 bits of raw data. Its purpose is to
   2472 // overwrite the call of JITed code with 32 bits worth of an offset. This will
   2473 // is only meant to function on code that has been invalidated, so it should be
   2474 // totally safe. Since that instruction will never be executed again, a ICache
   2475 // flush should not be necessary
   2476 void Assembler::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
   2477  // Raw is going to be the return address.
   2478  uint32_t* raw = (uint32_t*)label.raw();
   2479  // Overwrite the 4 bytes before the return address, which will end up being
   2480  // the call instruction.
   2481  *(raw - 1) = imm.value;
   2482 }
   2483 
   2484 uint8_t* Assembler::NextInstruction(uint8_t* inst_, uint32_t* count) {
   2485  if (count != nullptr) {
   2486    *count += sizeof(Instruction);
   2487  }
   2488 
   2489  InstructionIterator iter(reinterpret_cast<Instruction*>(inst_));
   2490  return reinterpret_cast<uint8_t*>(iter.next());
   2491 }
   2492 
   2493 static bool InstIsGuard(Instruction* inst, const PoolHeader** ph) {
   2494  Assembler::Condition c = inst->extractCond();
   2495  if (c != Assembler::Always) {
   2496    return false;
   2497  }
   2498  if (!(inst->is<InstBXReg>() || inst->is<InstBImm>())) {
   2499    return false;
   2500  }
   2501  // See if the next instruction is a pool header.
   2502  *ph = (inst + 1)->as<const PoolHeader>();
   2503  return *ph != nullptr;
   2504 }
   2505 
   2506 static bool InstIsGuard(BufferInstructionIterator& iter,
   2507                        const PoolHeader** ph) {
   2508  Instruction* inst = iter.cur();
   2509  Assembler::Condition c = inst->extractCond();
   2510  if (c != Assembler::Always) {
   2511    return false;
   2512  }
   2513  if (!(inst->is<InstBXReg>() || inst->is<InstBImm>())) {
   2514    return false;
   2515  }
   2516  // See if the next instruction is a pool header.
   2517  *ph = iter.peek()->as<const PoolHeader>();
   2518  return *ph != nullptr;
   2519 }
   2520 
   2521 template <class T>
   2522 static bool InstIsBNop(const T& iter) {
   2523  // In some special situations, it is necessary to insert a NOP into the
   2524  // instruction stream that nobody knows about, since nobody should know
   2525  // about it, make sure it gets skipped when Instruction::next() is called.
   2526  // this generates a very specific nop, namely a branch to the next
   2527  // instruction.
   2528  const Instruction* cur = iter.cur();
   2529  Assembler::Condition c = cur->extractCond();
   2530  if (c != Assembler::Always) {
   2531    return false;
   2532  }
   2533  if (!cur->is<InstBImm>()) {
   2534    return false;
   2535  }
   2536  InstBImm* b = cur->as<InstBImm>();
   2537  BOffImm offset;
   2538  b->extractImm(&offset);
   2539  return offset.decode() == 4;
   2540 }
   2541 
   2542 Instruction* InstructionIterator::maybeSkipAutomaticInstructions() {
   2543  // If the current instruction was automatically-inserted, skip past it.
   2544  const PoolHeader* ph;
   2545 
   2546  // Loop until an intentionally-placed instruction is found.
   2547  while (true) {
   2548    if (InstIsGuard(cur(), &ph)) {
   2549      // Don't skip a natural guard.
   2550      if (ph->isNatural()) {
   2551        return cur();
   2552      }
   2553      advanceRaw(1 + ph->size());
   2554    } else if (InstIsBNop<InstructionIterator>(*this)) {
   2555      advanceRaw(1);
   2556    } else {
   2557      return cur();
   2558    }
   2559  }
   2560 }
   2561 
   2562 Instruction* BufferInstructionIterator::maybeSkipAutomaticInstructions() {
   2563  const PoolHeader* ph;
   2564  // If this is a guard, and the next instruction is a header, always work
   2565  // around the pool. If it isn't a guard, then start looking ahead.
   2566  if (InstIsGuard(*this, &ph)) {
   2567    // Don't skip a natural guard.
   2568    if (ph->isNatural()) {
   2569      return cur();
   2570    }
   2571    advance(sizeof(Instruction) * ph->size());
   2572    return next();
   2573  }
   2574  if (InstIsBNop<BufferInstructionIterator>(*this)) {
   2575    return next();
   2576  }
   2577  return cur();
   2578 }
   2579 
   2580 // Cases to be handled:
   2581 // 1) no pools or branches in sight => return this+1
   2582 // 2) branch to next instruction => return this+2, because a nop needed to be
   2583 //    inserted into the stream.
   2584 // 3) this+1 is an artificial guard for a pool => return first instruction
   2585 //    after the pool
   2586 // 4) this+1 is a natural guard => return the branch
   2587 // 5) this is a branch, right before a pool => return first instruction after
   2588 //    the pool
   2589 // in assembly form:
   2590 // 1) add r0, r0, r0 <= this
   2591 //    add r1, r1, r1 <= returned value
   2592 //    add r2, r2, r2
   2593 //
   2594 // 2) add r0, r0, r0 <= this
   2595 //    b foo
   2596 //    foo:
   2597 //    add r2, r2, r2 <= returned value
   2598 //
   2599 // 3) add r0, r0, r0 <= this
   2600 //    b after_pool;
   2601 //    .word 0xffff0002  # bit 15 being 0 indicates that the branch was not
   2602 //                      # requested by the assembler
   2603 //    0xdeadbeef        # the 2 indicates that there is 1 pool entry, and the
   2604 //                      # pool header
   2605 //    add r4, r4, r4 <= returned value
   2606 // 4) add r0, r0, r0 <= this
   2607 //    b after_pool  <= returned value
   2608 //    .word 0xffff8002  # bit 15 being 1 indicates that the branch was
   2609 //                      # requested by the assembler
   2610 //    0xdeadbeef
   2611 //    add r4, r4, r4
   2612 // 5) b after_pool  <= this
   2613 //    .word 0xffff8002  # bit 15 has no bearing on the returned value
   2614 //    0xdeadbeef
   2615 //    add r4, r4, r4  <= returned value
   2616 
   2617 Instruction* InstructionIterator::next() {
   2618  const PoolHeader* ph;
   2619 
   2620  // If the current instruction is followed by a pool header,
   2621  // move past the current instruction and the pool.
   2622  if (InstIsGuard(cur(), &ph)) {
   2623    advanceRaw(1 + ph->size());
   2624    return maybeSkipAutomaticInstructions();
   2625  }
   2626 
   2627  // The next instruction is then known to not be a PoolHeader.
   2628  advanceRaw(1);
   2629  return maybeSkipAutomaticInstructions();
   2630 }
   2631 
   2632 void Assembler::ToggleToJmp(CodeLocationLabel inst_) {
   2633  uint32_t* ptr = (uint32_t*)inst_.raw();
   2634 
   2635  DebugOnly<Instruction*> inst = (Instruction*)inst_.raw();
   2636  MOZ_ASSERT(inst->is<InstCMP>());
   2637 
   2638  // Zero bits 20-27, then set 24-27 to be correct for a branch.
   2639  // 20-23 will be party of the B's immediate, and should be 0.
   2640  *ptr = (*ptr & ~(0xff << 20)) | (0xa0 << 20);
   2641 }
   2642 
   2643 void Assembler::ToggleToCmp(CodeLocationLabel inst_) {
   2644  uint32_t* ptr = (uint32_t*)inst_.raw();
   2645 
   2646  DebugOnly<Instruction*> inst = (Instruction*)inst_.raw();
   2647  MOZ_ASSERT(inst->is<InstBImm>());
   2648 
   2649  // Ensure that this masking operation doesn't affect the offset of the
   2650  // branch instruction when it gets toggled back.
   2651  MOZ_ASSERT((*ptr & (0xf << 20)) == 0);
   2652 
   2653  // Also make sure that the CMP is valid. Part of having a valid CMP is that
   2654  // all of the bits describing the destination in most ALU instructions are
   2655  // all unset (looks like it is encoding r0).
   2656  MOZ_ASSERT(toRD(*inst) == r0);
   2657 
   2658  // Zero out bits 20-27, then set them to be correct for a compare.
   2659  *ptr = (*ptr & ~(0xff << 20)) | (0x35 << 20);
   2660 }
   2661 
   2662 void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
   2663  InstructionIterator iter(reinterpret_cast<Instruction*>(inst_.raw()));
   2664  MOZ_ASSERT(iter.cur()->is<InstMovW>() || iter.cur()->is<InstLDR>());
   2665 
   2666  if (iter.cur()->is<InstMovW>()) {
   2667    // If it looks like the start of a movw/movt sequence, then make sure we
   2668    // have all of it (and advance the iterator past the full sequence).
   2669    iter.next();
   2670    MOZ_ASSERT(iter.cur()->is<InstMovT>());
   2671  }
   2672 
   2673  iter.next();
   2674  MOZ_ASSERT(iter.cur()->is<InstNOP>() || iter.cur()->is<InstBLXReg>());
   2675 
   2676  if (enabled == iter.cur()->is<InstBLXReg>()) {
   2677    // Nothing to do.
   2678    return;
   2679  }
   2680 
   2681  Instruction* inst = iter.cur();
   2682 
   2683  if (enabled) {
   2684    *inst = InstBLXReg(ScratchRegister, Always);
   2685  } else {
   2686    *inst = InstNOP();
   2687  }
   2688 }
   2689 
   2690 size_t Assembler::ToggledCallSize(uint8_t* code) {
   2691  InstructionIterator iter(reinterpret_cast<Instruction*>(code));
   2692  MOZ_ASSERT(iter.cur()->is<InstMovW>() || iter.cur()->is<InstLDR>());
   2693 
   2694  if (iter.cur()->is<InstMovW>()) {
   2695    // If it looks like the start of a movw/movt sequence, then make sure we
   2696    // have all of it (and advance the iterator past the full sequence).
   2697    iter.next();
   2698    MOZ_ASSERT(iter.cur()->is<InstMovT>());
   2699  }
   2700 
   2701  iter.next();
   2702  MOZ_ASSERT(iter.cur()->is<InstNOP>() || iter.cur()->is<InstBLXReg>());
   2703  return uintptr_t(iter.cur()) + 4 - uintptr_t(code);
   2704 }
   2705 
   2706 uint32_t Assembler::NopFill = 0;
   2707 
   2708 uint32_t Assembler::GetNopFill() {
   2709  static bool isSet = false;
   2710  if (!isSet) {
   2711    char* fillStr = getenv("ARM_ASM_NOP_FILL");
   2712    uint32_t fill;
   2713    if (fillStr && sscanf(fillStr, "%u", &fill) == 1) {
   2714      NopFill = fill;
   2715    }
   2716    if (NopFill > 8) {
   2717      MOZ_CRASH("Nop fill > 8 is not supported");
   2718    }
   2719    isSet = true;
   2720  }
   2721  return NopFill;
   2722 }
   2723 
   2724 uint32_t Assembler::AsmPoolMaxOffset = 1024;
   2725 
   2726 uint32_t Assembler::GetPoolMaxOffset() {
   2727  static bool isSet = false;
   2728  if (!isSet) {
   2729    char* poolMaxOffsetStr = getenv("ASM_POOL_MAX_OFFSET");
   2730    uint32_t poolMaxOffset;
   2731    if (poolMaxOffsetStr &&
   2732        sscanf(poolMaxOffsetStr, "%u", &poolMaxOffset) == 1) {
   2733      AsmPoolMaxOffset = poolMaxOffset;
   2734    }
   2735    isSet = true;
   2736  }
   2737  return AsmPoolMaxOffset;
   2738 }
   2739 
   2740 SecondScratchRegisterScope::SecondScratchRegisterScope(MacroAssembler& masm)
   2741    : AutoRegisterScope(masm, masm.getSecondScratchReg()) {}
   2742 
   2743 AutoNonDefaultSecondScratchRegister::AutoNonDefaultSecondScratchRegister(
   2744    MacroAssembler& masm, Register reg)
   2745    : masm_(masm) {
   2746  prevSecondScratch_ = masm.getSecondScratchReg();
   2747  masm.setSecondScratchReg(reg);
   2748 }
   2749 
   2750 AutoNonDefaultSecondScratchRegister::~AutoNonDefaultSecondScratchRegister() {
   2751  masm_.setSecondScratchReg(prevSecondScratch_);
   2752 }
   2753 
   2754 #ifdef JS_DISASM_ARM
   2755 
   2756 /* static */
   2757 void Assembler::disassembleInstruction(const Instruction* i,
   2758                                       DisasmBuffer& buffer) {
   2759  disasm::NameConverter converter;
   2760  disasm::Disassembler dasm(converter);
   2761  uint8_t* loc = reinterpret_cast<uint8_t*>(const_cast<uint32_t*>(i->raw()));
   2762  dasm.InstructionDecode(buffer, loc);
   2763 }
   2764 
   2765 void Assembler::initDisassembler() {
   2766  // The line is normally laid out like this:
   2767  //
   2768  // xxxxxxxx        ldr r, op   ; comment
   2769  //
   2770  // where xx...x is the instruction bit pattern.
   2771  //
   2772  // Labels are laid out by themselves to line up with the instructions above
   2773  // and below:
   2774  //
   2775  //            nnnn:
   2776  //
   2777  // Branch targets are normally on the same line as the branch instruction,
   2778  // but when they cannot be they will be on a line by themselves, indented
   2779  // significantly:
   2780  //
   2781  //                     -> label
   2782 
   2783  spew_.setLabelIndent("          ");             // 10
   2784  spew_.setTargetIndent("                    ");  // 20
   2785 }
   2786 
   2787 void Assembler::finishDisassembler() { spew_.spewOrphans(); }
   2788 
   2789 // Labels are named as they are encountered by adding names to a
   2790 // table, using the Label address as the key.  This is made tricky by
   2791 // the (memory for) Label objects being reused, but reused label
   2792 // objects are recognizable from being marked as not used or not
   2793 // bound.  See spew_.refLabel().
   2794 //
   2795 // In a number of cases there is no information about the target, and
   2796 // we just end up printing "patchable constant load to PC".  This is
   2797 // true especially for jumps to bailout handlers (which have no
   2798 // names).  See allocLiteralLoadEntry() and its callers.  In some cases
   2799 // (loop back edges) some information about the intended target may be
   2800 // propagated from higher levels, and if so it's printed here.
   2801 
   2802 void Assembler::spew(Instruction* i) {
   2803  if (spew_.isDisabled() || !i) {
   2804    return;
   2805  }
   2806 
   2807  DisasmBuffer buffer;
   2808  disassembleInstruction(i, buffer);
   2809  spew_.spew("%s", buffer.start());
   2810 }
   2811 
   2812 // If a target label is known, always print that and do not attempt to
   2813 // disassemble the branch operands, as they will often be encoding
   2814 // metainformation (pointers for a chain of jump instructions), and
   2815 // not actual branch targets.
   2816 
   2817 void Assembler::spewBranch(Instruction* i, const LabelDoc& target) {
   2818  if (spew_.isDisabled() || !i) {
   2819    return;
   2820  }
   2821 
   2822  DisasmBuffer buffer;
   2823  disassembleInstruction(i, buffer);
   2824 
   2825  char labelBuf[128];
   2826  labelBuf[0] = 0;
   2827 
   2828  bool haveTarget = target.valid;
   2829  if (!haveTarget) {
   2830    SprintfLiteral(labelBuf, "  -> (link-time target)");
   2831  }
   2832 
   2833  if (InstBranchImm::IsTHIS(*i)) {
   2834    InstBranchImm* bimm = InstBranchImm::AsTHIS(*i);
   2835    BOffImm destOff;
   2836    bimm->extractImm(&destOff);
   2837    if (destOff.isInvalid() || haveTarget) {
   2838      // The target information in the instruction is likely garbage, so remove
   2839      // it. The target label will in any case be printed if we have it.
   2840      //
   2841      // The format of the instruction disassembly is [0-9a-f]{8}\s+\S+\s+.*,
   2842      // where the \S+ string is the opcode.  Strip everything after the opcode,
   2843      // and attach the label if we have it.
   2844      int i;
   2845      for (i = 8; i < buffer.length() && buffer[i] == ' '; i++) {
   2846      }
   2847      for (; i < buffer.length() && buffer[i] != ' '; i++) {
   2848      }
   2849      buffer[i] = 0;
   2850      if (haveTarget) {
   2851        SprintfLiteral(labelBuf, "  -> %d%s", target.doc,
   2852                       !target.bound ? "f" : "");
   2853        haveTarget = false;
   2854      }
   2855    }
   2856  }
   2857  spew_.spew("%s%s", buffer.start(), labelBuf);
   2858 
   2859  if (haveTarget) {
   2860    spew_.spewRef(target);
   2861  }
   2862 }
   2863 
   2864 void Assembler::spewLiteralLoad(PoolHintPun& php, bool loadToPC,
   2865                                const Instruction* i, const LiteralDoc& doc) {
   2866  if (spew_.isDisabled()) {
   2867    return;
   2868  }
   2869 
   2870  char litbuf[2048];
   2871  spew_.formatLiteral(doc, litbuf, sizeof(litbuf));
   2872 
   2873  // See patchConstantPoolLoad, above.  We assemble the instruction into a
   2874  // buffer with a zero offset, as documentation, but the offset will be
   2875  // patched later.
   2876 
   2877  uint32_t inst;
   2878  PoolHintData& data = php.phd;
   2879  switch (php.phd.getLoadType()) {
   2880    case PoolHintData::PoolDTR:
   2881      Assembler::as_dtr_patch(IsLoad, 32, Offset, data.getReg(),
   2882                              DTRAddr(pc, DtrOffImm(0)), data.getCond(), &inst);
   2883      break;
   2884    case PoolHintData::PoolBranch:
   2885      if (data.isValidPoolHint()) {
   2886        Assembler::as_dtr_patch(IsLoad, 32, Offset, pc,
   2887                                DTRAddr(pc, DtrOffImm(0)), data.getCond(),
   2888                                &inst);
   2889      }
   2890      break;
   2891    case PoolHintData::PoolVDTR:
   2892      Assembler::as_vdtr_patch(IsLoad, data.getVFPReg(),
   2893                               VFPAddr(pc, VFPOffImm(0)), data.getCond(),
   2894                               &inst);
   2895      break;
   2896 
   2897    default:
   2898      MOZ_CRASH();
   2899  }
   2900 
   2901  DisasmBuffer buffer;
   2902  disasm::NameConverter converter;
   2903  disasm::Disassembler dasm(converter);
   2904  dasm.InstructionDecode(buffer, reinterpret_cast<uint8_t*>(&inst));
   2905  spew_.spew("%s    ; .const %s", buffer.start(), litbuf);
   2906 }
   2907 
   2908 #endif  // JS_DISASM_ARM