tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

MozAssembler-vixl.cpp (21418B)


      1 // Copyright 2015, ARM Limited
      2 // All rights reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are met:
      6 //
      7 //   * Redistributions of source code must retain the above copyright notice,
      8 //     this list of conditions and the following disclaimer.
      9 //   * Redistributions in binary form must reproduce the above copyright notice,
     10 //     this list of conditions and the following disclaimer in the documentation
     11 //     and/or other materials provided with the distribution.
     12 //   * Neither the name of ARM Limited nor the names of its contributors may be
     13 //     used to endorse or promote products derived from this software without
     14 //     specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
     17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
     20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26 
     27 #include "jit/arm64/vixl/Assembler-vixl.h"
     28 #include "jit/Label.h"
     29 
     30 namespace vixl {
     31 
     32 using LabelDoc = js::jit::DisassemblerSpew::LabelDoc;
     33 
     34 // Assembler
     35 void Assembler::FinalizeCode() {
     36 #ifdef DEBUG
     37  finalized_ = true;
     38 #endif
     39 }
     40 
     41 // Unbound Label Representation.
     42 //
     43 // We can have multiple branches using the same label before it is bound.
     44 // Assembler::bind() must then be able to enumerate all the branches and patch
     45 // them to target the final label location.
     46 //
     47 // When a Label is unbound with uses, its offset is pointing to the tip of a
     48 // linked list of uses. The uses can be branches or adr/adrp instructions. In
     49 // the case of branches, the next member in the linked list is simply encoded
     50 // as the branch target. For adr/adrp, the relative pc offset is encoded in the
     51 // immediate field as a signed instruction offset.
     52 //
     53 // In both cases, the end of the list is encoded as a 0 pc offset, i.e. the
     54 // tail is pointing to itself.
     55 
     56 static const ptrdiff_t kEndOfLabelUseList = 0;
     57 
     58 BufferOffset
     59 MozBaseAssembler::NextLink(BufferOffset cur)
     60 {
     61    Instruction* link = getInstructionAt(cur);
     62    // Raw encoded offset.
     63    ptrdiff_t offset = link->ImmPCRawOffset();
     64    // End of the list is encoded as 0.
     65    if (offset == kEndOfLabelUseList)
     66        return BufferOffset();
     67    // The encoded offset is the number of instructions to move.
     68    return BufferOffset(cur.getOffset() + offset * kInstructionSize);
     69 }
     70 
     71 static ptrdiff_t
     72 EncodeOffset(BufferOffset cur, BufferOffset next)
     73 {
     74    MOZ_ASSERT(next.assigned() && cur.assigned());
     75    ptrdiff_t offset = next.getOffset() - cur.getOffset();
     76    MOZ_ASSERT(offset % kInstructionSize == 0);
     77    return offset / kInstructionSize;
     78 }
     79 
     80 void
     81 MozBaseAssembler::SetNextLink(BufferOffset cur, BufferOffset next)
     82 {
     83    Instruction* link = getInstructionAt(cur);
     84    link->SetImmPCRawOffset(EncodeOffset(cur, next));
     85 }
     86 
     87 // A common implementation for the LinkAndGet<Type>OffsetTo helpers.
     88 //
     89 // If the label is bound, returns the offset as a multiple of 1 << elementShift.
     90 // Otherwise, links the instruction to the label and returns the raw offset to
     91 // encode. (This will be an instruction count.)
     92 //
     93 // The offset is calculated by aligning the PC and label addresses down to a
     94 // multiple of 1 << elementShift, then calculating the (scaled) offset between
     95 // them. This matches the semantics of adrp, for example. (Assuming that the
     96 // assembler buffer is page-aligned, which it probably isn't.)
     97 //
     98 // For an unbound label, the returned offset will be encodable in the provided
     99 // branch range. If the label is already bound, the caller is expected to make
    100 // sure that it is in range, and emit the necessary branch instrutions if it
    101 // isn't.
    102 //
    103 ptrdiff_t
    104 MozBaseAssembler::LinkAndGetOffsetTo(BufferOffset branch, ImmBranchRangeType branchRange,
    105                                     unsigned elementShift, Label* label)
    106 {
    107  if (armbuffer_.oom())
    108    return kEndOfLabelUseList;
    109 
    110  if (label->bound()) {
    111    // The label is bound: all uses are already linked.
    112    ptrdiff_t branch_offset = ptrdiff_t(branch.getOffset() >> elementShift);
    113    ptrdiff_t label_offset = ptrdiff_t(label->offset() >> elementShift);
    114    return label_offset - branch_offset;
    115  }
    116 
    117  // Keep track of short-range branches targeting unbound labels. We may need
    118  // to insert veneers in PatchShortRangeBranchToVeneer() below.
    119  if (branchRange < NumShortBranchRangeTypes) {
    120      // This is the last possible branch target.
    121      BufferOffset deadline(branch.getOffset() +
    122                            Instruction::ImmBranchMaxForwardOffset(branchRange));
    123      armbuffer_.registerBranchDeadline(branchRange, deadline);
    124  }
    125 
    126  // The label is unbound and previously unused: Store the offset in the label
    127  // itself for patching by bind().
    128  if (!label->used()) {
    129    label->use(branch.getOffset());
    130    return kEndOfLabelUseList;
    131  }
    132 
    133  // The label is unbound and has multiple users. Create a linked list between
    134  // the branches, and update the linked list head in the label struct. This is
    135  // not always trivial since the branches in the linked list have limited
    136  // ranges.
    137 
    138  // What is the earliest buffer offset that would be reachable by the branch
    139  // we're about to add?
    140  ptrdiff_t earliestReachable =
    141    branch.getOffset() + Instruction::ImmBranchMinBackwardOffset(branchRange);
    142 
    143  // If the existing instruction at the head of the list is within reach of the
    144  // new branch, we can simply insert the new branch at the front of the list.
    145  if (label->offset() >= earliestReachable) {
    146      ptrdiff_t offset = EncodeOffset(branch, BufferOffset(label));
    147      label->use(branch.getOffset());
    148      MOZ_ASSERT(offset != kEndOfLabelUseList);
    149      return offset;
    150  }
    151 
    152  // The label already has a linked list of uses, but we can't reach the head
    153  // of the list with the allowed branch range. Insert this branch at a
    154  // different position in the list.
    155  //
    156  // Find an existing branch, exbr, such that:
    157  //
    158  // 1.  The new branch can be reached by exbr, and either
    159  // 2a. The new branch can reach exbr's target, or
    160  // 2b. The exbr branch is at the end of the list.
    161  //
    162  // Then the new branch can be inserted after exbr in the linked list.
    163  //
    164  // We know that it is always possible to find an exbr branch satisfying these
    165  // conditions because of the PatchShortRangeBranchToVeneer() mechanism. All
    166  // branches are guaranteed to either be able to reach the end of the
    167  // assembler buffer, or they will be pointing to an unconditional branch that
    168  // can.
    169  //
    170  // In particular, the end of the list is always a viable candidate, so we'll
    171  // just get that.
    172  BufferOffset next(label);
    173  BufferOffset exbr;
    174  do {
    175      exbr = next;
    176      next = NextLink(next);
    177  } while (next.assigned());
    178  SetNextLink(exbr, branch);
    179 
    180  // This branch becomes the new end of the list.
    181  return kEndOfLabelUseList;
    182 }
    183 
    184 ptrdiff_t MozBaseAssembler::LinkAndGetByteOffsetTo(BufferOffset branch, Label* label) {
    185  return LinkAndGetOffsetTo(branch, UncondBranchRangeType, 0, label);
    186 }
    187 
    188 ptrdiff_t MozBaseAssembler::LinkAndGetInstructionOffsetTo(BufferOffset branch,
    189                                                          ImmBranchRangeType branchRange,
    190                                                          Label* label) {
    191  return LinkAndGetOffsetTo(branch, branchRange, kInstructionSizeLog2, label);
    192 }
    193 
    194 ptrdiff_t MozBaseAssembler::LinkAndGetPageOffsetTo(BufferOffset branch, Label* label) {
    195  return LinkAndGetOffsetTo(branch, UncondBranchRangeType, kPageSizeLog2, label);
    196 }
    197 
    198 BufferOffset Assembler::b(int imm26, const LabelDoc& doc) {
    199  return EmitBranch(B | ImmUncondBranch(imm26), doc);
    200 }
    201 
    202 
    203 void Assembler::b(Instruction* at, int imm26) {
    204  return EmitBranch(at, B | ImmUncondBranch(imm26));
    205 }
    206 
    207 
    208 BufferOffset Assembler::b(int imm19, Condition cond, const LabelDoc& doc) {
    209  return EmitBranch(B_cond | ImmCondBranch(imm19) | cond, doc);
    210 }
    211 
    212 
    213 void Assembler::b(Instruction* at, int imm19, Condition cond) {
    214  EmitBranch(at, B_cond | ImmCondBranch(imm19) | cond);
    215 }
    216 
    217 
    218 BufferOffset Assembler::b(Label* label) {
    219  // Encode the relative offset from the inserted branch to the label.
    220  LabelDoc doc = refLabel(label);
    221  return b(LinkAndGetInstructionOffsetTo(nextInstrOffset(), UncondBranchRangeType, label), doc);
    222 }
    223 
    224 
    225 BufferOffset Assembler::b(Label* label, Condition cond) {
    226  // Encode the relative offset from the inserted branch to the label.
    227  LabelDoc doc = refLabel(label);
    228  return b(LinkAndGetInstructionOffsetTo(nextInstrOffset(), CondBranchRangeType, label), cond, doc);
    229 }
    230 
    231 void Assembler::br(Instruction* at, const Register& xn) {
    232  VIXL_ASSERT(xn.Is64Bits());
    233  // No need for EmitBranch(): no immediate offset needs fixing.
    234  Emit(at, BR | Rn(xn));
    235 }
    236 
    237 
    238 void Assembler::blr(Instruction* at, const Register& xn) {
    239  VIXL_ASSERT(xn.Is64Bits());
    240  // No need for EmitBranch(): no immediate offset needs fixing.
    241  Emit(at, BLR | Rn(xn));
    242 }
    243 
    244 
    245 void Assembler::bl(int imm26, const LabelDoc& doc) {
    246  EmitBranch(BL | ImmUncondBranch(imm26), doc);
    247 }
    248 
    249 
    250 void Assembler::bl(Instruction* at, int imm26) {
    251  EmitBranch(at, BL | ImmUncondBranch(imm26));
    252 }
    253 
    254 
    255 void Assembler::bl(Label* label) {
    256  // Encode the relative offset from the inserted branch to the label.
    257  LabelDoc doc = refLabel(label);
    258  return bl(LinkAndGetInstructionOffsetTo(nextInstrOffset(), UncondBranchRangeType, label), doc);
    259 }
    260 
    261 
    262 void Assembler::cbz(const Register& rt, int imm19, const LabelDoc& doc) {
    263  EmitBranch(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt), doc);
    264 }
    265 
    266 
    267 void Assembler::cbz(Instruction* at, const Register& rt, int imm19) {
    268  EmitBranch(at, SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
    269 }
    270 
    271 
    272 void Assembler::cbz(const Register& rt, Label* label) {
    273  // Encode the relative offset from the inserted branch to the label.
    274  LabelDoc doc = refLabel(label);
    275  return cbz(rt, LinkAndGetInstructionOffsetTo(nextInstrOffset(), CondBranchRangeType, label), doc);
    276 }
    277 
    278 
    279 void Assembler::cbnz(const Register& rt, int imm19, const LabelDoc& doc) {
    280  EmitBranch(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt), doc);
    281 }
    282 
    283 
    284 void Assembler::cbnz(Instruction* at, const Register& rt, int imm19) {
    285  EmitBranch(at, SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
    286 }
    287 
    288 
    289 void Assembler::cbnz(const Register& rt, Label* label) {
    290  // Encode the relative offset from the inserted branch to the label.
    291  LabelDoc doc = refLabel(label);
    292  return cbnz(rt, LinkAndGetInstructionOffsetTo(nextInstrOffset(), CondBranchRangeType, label), doc);
    293 }
    294 
    295 
    296 void Assembler::tbz(const Register& rt, unsigned bit_pos, int imm14, const LabelDoc& doc) {
    297  VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
    298  EmitBranch(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt), doc);
    299 }
    300 
    301 
    302 void Assembler::tbz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) {
    303  VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
    304  EmitBranch(at, TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
    305 }
    306 
    307 
    308 void Assembler::tbz(const Register& rt, unsigned bit_pos, Label* label) {
    309  // Encode the relative offset from the inserted branch to the label.
    310  LabelDoc doc = refLabel(label);
    311  return tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(nextInstrOffset(), TestBranchRangeType, label), doc);
    312 }
    313 
    314 
    315 void Assembler::tbnz(const Register& rt, unsigned bit_pos, int imm14, const LabelDoc& doc) {
    316  VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
    317  EmitBranch(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt), doc);
    318 }
    319 
    320 
    321 void Assembler::tbnz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) {
    322  VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
    323  EmitBranch(at, TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
    324 }
    325 
    326 
    327 void Assembler::tbnz(const Register& rt, unsigned bit_pos, Label* label) {
    328  // Encode the relative offset from the inserted branch to the label.
    329  LabelDoc doc = refLabel(label);
    330  return tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(nextInstrOffset(), TestBranchRangeType, label), doc);
    331 }
    332 
    333 
    334 void Assembler::adr(const Register& rd, int imm21, const LabelDoc& doc) {
    335  VIXL_ASSERT(rd.Is64Bits());
    336  EmitBranch(ADR | ImmPCRelAddress(imm21) | Rd(rd), doc);
    337 }
    338 
    339 
    340 void Assembler::adr(Instruction* at, const Register& rd, int imm21) {
    341  VIXL_ASSERT(rd.Is64Bits());
    342  EmitBranch(at, ADR | ImmPCRelAddress(imm21) | Rd(rd));
    343 }
    344 
    345 
    346 void Assembler::adr(const Register& rd, Label* label) {
    347  // Encode the relative offset from the inserted adr to the label.
    348  LabelDoc doc = refLabel(label);
    349  return adr(rd, LinkAndGetByteOffsetTo(nextInstrOffset(), label), doc);
    350 }
    351 
    352 
    353 void Assembler::adrp(const Register& rd, int imm21, const LabelDoc& doc) {
    354  VIXL_ASSERT(rd.Is64Bits());
    355  EmitBranch(ADRP | ImmPCRelAddress(imm21) | Rd(rd), doc);
    356 }
    357 
    358 
    359 void Assembler::adrp(Instruction* at, const Register& rd, int imm21) {
    360  VIXL_ASSERT(rd.Is64Bits());
    361  EmitBranch(at, ADRP | ImmPCRelAddress(imm21) | Rd(rd));
    362 }
    363 
    364 
    365 void Assembler::adrp(const Register& rd, Label* label) {
    366  VIXL_ASSERT(AllowPageOffsetDependentCode());
    367  // Encode the relative offset from the inserted adr to the label.
    368  LabelDoc doc = refLabel(label);
    369  return adrp(rd, LinkAndGetPageOffsetTo(nextInstrOffset(), label), doc);
    370 }
    371 
    372 
    373 BufferOffset Assembler::ands(const Register& rd, const Register& rn, const Operand& operand) {
    374  return Logical(rd, rn, operand, ANDS);
    375 }
    376 
    377 
    378 BufferOffset Assembler::tst(const Register& rn, const Operand& operand) {
    379  return ands(AppropriateZeroRegFor(rn), rn, operand);
    380 }
    381 
    382 
    383 void Assembler::ldr(Instruction* at, const CPURegister& rt, int imm19) {
    384  LoadLiteralOp op = LoadLiteralOpFor(rt);
    385  Emit(at, op | ImmLLiteral(imm19) | Rt(rt));
    386 }
    387 
    388 void Assembler::ldrsw(Instruction* at, const CPURegister& rt, int imm19) {
    389  Emit(at, LDRSW_x_lit | ImmLLiteral(imm19) | Rt(rt));
    390 }
    391 
    392 BufferOffset Assembler::hint(SystemHint code) {
    393  return Emit(HINT | ImmHint(code));
    394 }
    395 
    396 
    397 void Assembler::hint(Instruction* at, SystemHint code) {
    398  Emit(at, HINT | ImmHint(code));
    399 }
    400 
    401 
    402 void Assembler::svc(Instruction* at, int code) {
    403  VIXL_ASSERT(IsUint16(code));
    404  Emit(at, SVC | ImmException(code));
    405 }
    406 
    407 
    408 void Assembler::nop(Instruction* at) {
    409  hint(at, NOP);
    410 }
    411 
    412 
    413 void Assembler::csdb(Instruction* at) {
    414  hint(at, CSDB);
    415 }
    416 
    417 
    418 BufferOffset Assembler::Logical(const Register& rd, const Register& rn,
    419                                const Operand& operand, LogicalOp op)
    420 {
    421  VIXL_ASSERT(rd.size() == rn.size());
    422  if (operand.IsImmediate()) {
    423    int64_t immediate = operand.immediate();
    424    unsigned reg_size = rd.size();
    425 
    426    VIXL_ASSERT(immediate != 0);
    427    VIXL_ASSERT(immediate != -1);
    428    VIXL_ASSERT(rd.Is64Bits() || IsUint32(immediate));
    429 
    430    // If the operation is NOT, invert the operation and immediate.
    431    if ((op & NOT) == NOT) {
    432      op = static_cast<LogicalOp>(op & ~NOT);
    433      immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
    434    }
    435 
    436    unsigned n, imm_s, imm_r;
    437    if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
    438      // Immediate can be encoded in the instruction.
    439      return LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
    440    } else {
    441      // This case is handled in the macro assembler.
    442      VIXL_UNREACHABLE();
    443    }
    444  } else {
    445    VIXL_ASSERT(operand.IsShiftedRegister());
    446    VIXL_ASSERT(operand.reg().size() == rd.size());
    447    Instr dp_op = static_cast<Instr>(static_cast<Instr>(op) | LogicalShiftedFixed);
    448    return DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
    449  }
    450 }
    451 
    452 
    453 BufferOffset Assembler::LogicalImmediate(const Register& rd, const Register& rn,
    454                                         unsigned n, unsigned imm_s, unsigned imm_r, LogicalOp op)
    455 {
    456    unsigned reg_size = rd.size();
    457    Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
    458    return Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
    459                ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg | Rn(rn));
    460 }
    461 
    462 
    463 BufferOffset Assembler::DataProcShiftedRegister(const Register& rd, const Register& rn,
    464                                                const Operand& operand, FlagsUpdate S, Instr op)
    465 {
    466  VIXL_ASSERT(operand.IsShiftedRegister());
    467  VIXL_ASSERT(rn.Is64Bits() || (rn.Is32Bits() && IsUint5(operand.shift_amount())));
    468  return Emit(SF(rd) | op | Flags(S) |
    469              ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
    470              Rm(operand.reg()) | Rn(rn) | Rd(rd));
    471 }
    472 
    473 
    474 void MozBaseAssembler::InsertIndexIntoTag(uint8_t* load, uint32_t index) {
    475  // Store the js::jit::PoolEntry index into the instruction.
    476  // finishPool() will walk over all literal load instructions
    477  // and use PatchConstantPoolLoad() to patch to the final relative offset.
    478  *((uint32_t*)load) |= Assembler::ImmLLiteral(index);
    479 }
    480 
    481 
    482 bool MozBaseAssembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
    483  Instruction* load = reinterpret_cast<Instruction*>(loadAddr);
    484 
    485  // The load currently contains the js::jit::PoolEntry's index,
    486  // as written by InsertIndexIntoTag().
    487  uint32_t index = load->ImmLLiteral();
    488 
    489  // Each entry in the literal pool is uint32_t-sized,
    490  // but literals may use multiple entries.
    491  uint32_t* constPool = reinterpret_cast<uint32_t*>(constPoolAddr);
    492  Instruction* source = reinterpret_cast<Instruction*>(&constPool[index]);
    493 
    494  load->SetImmLLiteral(source);
    495  return false; // Nothing uses the return value.
    496 }
    497 
    498 void
    499 MozBaseAssembler::PatchShortRangeBranchToVeneer(ARMBuffer* buffer, unsigned rangeIdx,
    500                                                BufferOffset deadline, BufferOffset veneer)
    501 {
    502  // Reconstruct the position of the branch from (rangeIdx, deadline).
    503  vixl::ImmBranchRangeType branchRange = static_cast<vixl::ImmBranchRangeType>(rangeIdx);
    504  BufferOffset branch(deadline.getOffset() - Instruction::ImmBranchMaxForwardOffset(branchRange));
    505  Instruction *branchInst = buffer->getInst(branch);
    506  Instruction *veneerInst = buffer->getInst(veneer);
    507 
    508  // Verify that the branch range matches what's encoded.
    509  MOZ_ASSERT(Instruction::ImmBranchTypeToRange(branchInst->BranchType()) == branchRange);
    510 
    511  // We want to insert veneer after branch in the linked list of instructions
    512  // that use the same unbound label.
    513  // The veneer should be an unconditional branch.
    514  ptrdiff_t nextElemOffset = branchInst->ImmPCRawOffset();
    515 
    516  // If offset is 0, this is the end of the linked list.
    517  if (nextElemOffset != kEndOfLabelUseList) {
    518      // Make the offset relative to veneer so it targets the same instruction
    519      // as branchInst.
    520      nextElemOffset *= kInstructionSize;
    521      nextElemOffset += branch.getOffset() - veneer.getOffset();
    522      nextElemOffset /= kInstructionSize;
    523  }
    524  Assembler::b(veneerInst, nextElemOffset);
    525 
    526  // Now point branchInst at veneer. See also SetNextLink() above.
    527  branchInst->SetImmPCRawOffset(EncodeOffset(branch, veneer));
    528 }
    529 
    530 struct PoolHeader {
    531  uint32_t data;
    532 
    533  struct Header {
    534    // The size should take into account the pool header.
    535    // The size is in units of Instruction (4bytes), not byte.
    536    union {
    537      struct {
    538        uint32_t size : 15;
    539 
    540 // "Natural" guards are part of the normal instruction stream,
    541 // while "non-natural" guards are inserted for the sole purpose
    542 // of skipping around a pool.
    543        uint32_t isNatural : 1;
    544        uint32_t ONES : 16;
    545      };
    546      uint32_t data;
    547    };
    548 
    549    Header(int size_, bool isNatural_)
    550      : size(size_),
    551        isNatural(isNatural_),
    552        ONES(0xffff)
    553    { }
    554 
    555    Header(uint32_t data)
    556      : data(data)
    557    {
    558      VIXL_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t));
    559      VIXL_ASSERT(ONES == 0xffff);
    560    }
    561 
    562    uint32_t raw() const {
    563      VIXL_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t));
    564      return data;
    565    }
    566  };
    567 
    568  PoolHeader(int size_, bool isNatural_)
    569    : data(Header(size_, isNatural_).raw())
    570  { }
    571 
    572  uint32_t size() const {
    573    Header tmp(data);
    574    return tmp.size;
    575  }
    576 
    577  uint32_t isNatural() const {
    578    Header tmp(data);
    579    return tmp.isNatural;
    580  }
    581 };
    582 
    583 
    584 void MozBaseAssembler::WritePoolHeader(uint8_t* start, js::jit::Pool* p, bool isNatural) {
    585  static_assert(sizeof(PoolHeader) == 4);
    586 
    587  // Get the total size of the pool.
    588  const uintptr_t totalPoolSize = sizeof(PoolHeader) + p->getPoolSize();
    589  const uintptr_t totalPoolInstructions = totalPoolSize / kInstructionSize;
    590 
    591  VIXL_ASSERT((totalPoolSize & 0x3) == 0);
    592  VIXL_ASSERT(totalPoolInstructions < (1 << 15));
    593 
    594  PoolHeader header(totalPoolInstructions, isNatural);
    595  *(PoolHeader*)start = header;
    596 }
    597 
    598 
    599 void MozBaseAssembler::WritePoolFooter(uint8_t* start, js::jit::Pool* p, bool isNatural) {
    600  return;
    601 }
    602 
    603 
    604 void MozBaseAssembler::WritePoolGuard(BufferOffset branch, Instruction* inst, BufferOffset dest) {
    605  int byteOffset = dest.getOffset() - branch.getOffset();
    606  VIXL_ASSERT(byteOffset % kInstructionSize == 0);
    607 
    608  int instOffset = byteOffset >> kInstructionSizeLog2;
    609  Assembler::b(inst, instOffset);
    610 }
    611 
    612 
    613 }  // namespace vixl