tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

testAssemblerBuffer.cpp (21679B)


      1 /* This Source Code Form is subject to the terms of the Mozilla Public
      2 * License, v. 2.0. If a copy of the MPL was not distributed with this
      3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      4 
      5 #include "jit/shared/IonAssemblerBufferWithConstantPools.h"
      6 #include "jsapi-tests/tests.h"
      7 
      8 // Tests for classes in:
      9 //
     10 //   jit/shared/IonAssemblerBuffer.h
     11 //   jit/shared/IonAssemblerBufferWithConstantPools.h
     12 //
     13 // Classes in js::jit tested:
     14 //
     15 //   BufferOffset
     16 //   BufferSlice (implicitly)
     17 //   AssemblerBuffer
     18 //
     19 //   BranchDeadlineSet
     20 //   Pool (implicitly)
     21 //   AssemblerBufferWithConstantPools
     22 //
     23 
     24 BEGIN_TEST(testAssemblerBuffer_BufferOffset) {
     25  using js::jit::BufferOffset;
     26 
     27  BufferOffset off1;
     28  BufferOffset off2(10);
     29 
     30  CHECK(!off1.assigned());
     31  CHECK(off2.assigned());
     32  CHECK_EQUAL(off2.getOffset(), 10);
     33  off1 = off2;
     34  CHECK(off1.assigned());
     35  CHECK_EQUAL(off1.getOffset(), 10);
     36 
     37  return true;
     38 }
     39 END_TEST(testAssemblerBuffer_BufferOffset)
     40 
     41 BEGIN_TEST(testAssemblerBuffer_AssemblerBuffer) {
     42  using js::jit::BufferOffset;
     43  using AsmBuf = js::jit::AssemblerBuffer<5 * sizeof(uint32_t), uint32_t>;
     44 
     45  AsmBuf ab;
     46  CHECK(ab.isAligned(16));
     47  CHECK_EQUAL(ab.size(), 0u);
     48  CHECK_EQUAL(ab.nextOffset().getOffset(), 0);
     49  CHECK(!ab.oom());
     50 
     51  BufferOffset off1 = ab.putInt(1000017);
     52  CHECK_EQUAL(off1.getOffset(), 0);
     53  CHECK_EQUAL(ab.size(), 4u);
     54  CHECK_EQUAL(ab.nextOffset().getOffset(), 4);
     55  CHECK(!ab.isAligned(16));
     56  CHECK(ab.isAligned(4));
     57  CHECK(ab.isAligned(1));
     58  CHECK_EQUAL(*ab.getInst(off1), 1000017u);
     59 
     60  BufferOffset off2 = ab.putInt(1000018);
     61  CHECK_EQUAL(off2.getOffset(), 4);
     62 
     63  BufferOffset off3 = ab.putInt(1000019);
     64  CHECK_EQUAL(off3.getOffset(), 8);
     65 
     66  BufferOffset off4 = ab.putInt(1000020);
     67  CHECK_EQUAL(off4.getOffset(), 12);
     68  CHECK_EQUAL(ab.size(), 16u);
     69  CHECK_EQUAL(ab.nextOffset().getOffset(), 16);
     70 
     71  // Last one in the slice.
     72  BufferOffset off5 = ab.putInt(1000021);
     73  CHECK_EQUAL(off5.getOffset(), 16);
     74  CHECK_EQUAL(ab.size(), 20u);
     75  CHECK_EQUAL(ab.nextOffset().getOffset(), 20);
     76 
     77  BufferOffset off6 = ab.putInt(1000022);
     78  CHECK_EQUAL(off6.getOffset(), 20);
     79  CHECK_EQUAL(ab.size(), 24u);
     80  CHECK_EQUAL(ab.nextOffset().getOffset(), 24);
     81 
     82  // Reference previous slice. Excercise the finger.
     83  CHECK_EQUAL(*ab.getInst(off1), 1000017u);
     84  CHECK_EQUAL(*ab.getInst(off6), 1000022u);
     85  CHECK_EQUAL(*ab.getInst(off1), 1000017u);
     86  CHECK_EQUAL(*ab.getInst(off5), 1000021u);
     87 
     88  // Too much data for one slice.
     89  const uint32_t fixdata[] = {2000036, 2000037, 2000038,
     90                              2000039, 2000040, 2000041};
     91 
     92  // Split payload across multiple slices.
     93  CHECK_EQUAL(ab.nextOffset().getOffset(), 24);
     94  BufferOffset good1 = ab.putBytesLarge(sizeof(fixdata), fixdata);
     95  CHECK_EQUAL(good1.getOffset(), 24);
     96  CHECK_EQUAL(ab.nextOffset().getOffset(), 48);
     97  CHECK_EQUAL(*ab.getInst(good1), 2000036u);
     98  CHECK_EQUAL(*ab.getInst(BufferOffset(32)), 2000038u);
     99  CHECK_EQUAL(*ab.getInst(BufferOffset(36)), 2000039u);
    100  CHECK_EQUAL(*ab.getInst(BufferOffset(40)), 2000040u);
    101  CHECK_EQUAL(*ab.getInst(BufferOffset(44)), 2000041u);
    102 
    103  return true;
    104 }
    105 END_TEST(testAssemblerBuffer_AssemblerBuffer)
    106 
    107 BEGIN_TEST(testAssemblerBuffer_BranchDeadlineSet) {
    108  using DLSet = js::jit::BranchDeadlineSet<3>;
    109  using js::jit::BufferOffset;
    110 
    111  js::LifoAlloc alloc(1024, js::MallocArena);
    112  DLSet dls(alloc);
    113 
    114  CHECK(dls.empty());
    115  CHECK(alloc.isEmpty());  // Constructor must be infallible.
    116  CHECK_EQUAL(dls.size(), 0u);
    117  CHECK_EQUAL(dls.maxRangeSize(), 0u);
    118 
    119  // Removing non-existant deadline is OK.
    120  dls.removeDeadline(1, BufferOffset(7));
    121 
    122  // Add deadlines in increasing order as intended. This is optimal.
    123  dls.addDeadline(1, BufferOffset(10));
    124  CHECK(!dls.empty());
    125  CHECK_EQUAL(dls.size(), 1u);
    126  CHECK_EQUAL(dls.maxRangeSize(), 1u);
    127  CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
    128  CHECK_EQUAL(dls.earliestDeadlineRange(), 1u);
    129 
    130  // Removing non-existant deadline is OK.
    131  dls.removeDeadline(1, BufferOffset(7));
    132  dls.removeDeadline(1, BufferOffset(17));
    133  dls.removeDeadline(0, BufferOffset(10));
    134  CHECK_EQUAL(dls.size(), 1u);
    135  CHECK_EQUAL(dls.maxRangeSize(), 1u);
    136 
    137  // Two identical deadlines for different ranges.
    138  dls.addDeadline(2, BufferOffset(10));
    139  CHECK(!dls.empty());
    140  CHECK_EQUAL(dls.size(), 2u);
    141  CHECK_EQUAL(dls.maxRangeSize(), 1u);
    142  CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
    143 
    144  // It doesn't matter which range earliestDeadlineRange() reports first,
    145  // but it must report both.
    146  if (dls.earliestDeadlineRange() == 1) {
    147    dls.removeDeadline(1, BufferOffset(10));
    148    CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
    149    CHECK_EQUAL(dls.earliestDeadlineRange(), 2u);
    150  } else {
    151    CHECK_EQUAL(dls.earliestDeadlineRange(), 2u);
    152    dls.removeDeadline(2, BufferOffset(10));
    153    CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
    154    CHECK_EQUAL(dls.earliestDeadlineRange(), 1u);
    155  }
    156 
    157  // Add deadline which is the front of range 0, but not the global earliest.
    158  dls.addDeadline(0, BufferOffset(20));
    159  CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
    160  CHECK(dls.earliestDeadlineRange() > 0);
    161 
    162  // Non-optimal add to front of single-entry range 0.
    163  dls.addDeadline(0, BufferOffset(15));
    164  CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
    165  CHECK(dls.earliestDeadlineRange() > 0);
    166 
    167  // Append to 2-entry range 0.
    168  dls.addDeadline(0, BufferOffset(30));
    169  CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
    170  CHECK(dls.earliestDeadlineRange() > 0);
    171 
    172  // Add penultimate entry.
    173  dls.addDeadline(0, BufferOffset(25));
    174  CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
    175  CHECK(dls.earliestDeadlineRange() > 0);
    176 
    177  // Prepend, stealing earliest from other range.
    178  dls.addDeadline(0, BufferOffset(5));
    179  CHECK_EQUAL(dls.earliestDeadline().getOffset(), 5);
    180  CHECK_EQUAL(dls.earliestDeadlineRange(), 0u);
    181 
    182  // Remove central element.
    183  dls.removeDeadline(0, BufferOffset(20));
    184  CHECK_EQUAL(dls.earliestDeadline().getOffset(), 5);
    185  CHECK_EQUAL(dls.earliestDeadlineRange(), 0u);
    186 
    187  // Remove front, giving back the lead.
    188  dls.removeDeadline(0, BufferOffset(5));
    189  CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
    190  CHECK(dls.earliestDeadlineRange() > 0);
    191 
    192  // Remove front, giving back earliest to range 0.
    193  dls.removeDeadline(dls.earliestDeadlineRange(), BufferOffset(10));
    194  CHECK_EQUAL(dls.earliestDeadline().getOffset(), 15);
    195  CHECK_EQUAL(dls.earliestDeadlineRange(), 0u);
    196 
    197  // Remove tail.
    198  dls.removeDeadline(0, BufferOffset(30));
    199  CHECK_EQUAL(dls.earliestDeadline().getOffset(), 15);
    200  CHECK_EQUAL(dls.earliestDeadlineRange(), 0u);
    201 
    202  // Now range 0 = [15, 25].
    203  CHECK_EQUAL(dls.size(), 2u);
    204  dls.removeDeadline(0, BufferOffset(25));
    205  dls.removeDeadline(0, BufferOffset(15));
    206  CHECK(dls.empty());
    207 
    208  return true;
    209 }
    210 END_TEST(testAssemblerBuffer_BranchDeadlineSet)
    211 
    212 // Mock Assembler class for testing the AssemblerBufferWithConstantPools
    213 // callbacks.
    214 namespace {
    215 
    216 struct TestAssembler;
    217 
    218 using AsmBufWithPool = js::jit::AssemblerBufferWithConstantPools<
    219    /* SliceSize */ 5 * sizeof(uint32_t),
    220    /* InstSize */ 4,
    221    /* Inst */ uint32_t,
    222    /* Asm */ TestAssembler,
    223    /* NumShortBranchRanges */ 3>;
    224 
    225 struct TestAssembler {
    226  // Mock instruction set:
    227  //
    228  //   0x1111xxxx - align filler instructions.
    229  //   0x2222xxxx - manually inserted 'arith' instructions.
    230  //   0xaaaaxxxx - noop filler instruction.
    231  //   0xb0bbxxxx - branch xxxx bytes forward. (Pool guard).
    232  //   0xb1bbxxxx - branch xxxx bytes forward. (Short-range branch).
    233  //   0xb2bbxxxx - branch xxxx bytes forward. (Veneer branch).
    234  //   0xb3bbxxxx - branch xxxx bytes forward. (Patched short-range branch).
    235  //   0xc0ccxxxx - constant pool load (uninitialized).
    236  //   0xc1ccxxxx - constant pool load to index xxxx.
    237  //   0xc2ccxxxx - constant pool load xxxx bytes ahead.
    238  //   0xffffxxxx - pool header with xxxx bytes.
    239 
    240  static const unsigned BranchRange = 36;
    241 
    242  static void InsertIndexIntoTag(uint8_t* load_, uint32_t index) {
    243    uint32_t* load = reinterpret_cast<uint32_t*>(load_);
    244    MOZ_ASSERT(*load == 0xc0cc0000,
    245               "Expected uninitialized constant pool load");
    246    MOZ_ASSERT(index < 0x10000);
    247    *load = 0xc1cc0000 + index;
    248  }
    249 
    250  static void PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
    251    uint32_t* load = reinterpret_cast<uint32_t*>(loadAddr);
    252    uint32_t index = *load & 0xffff;
    253    MOZ_ASSERT(*load == (0xc1cc0000 | index),
    254               "Expected constant pool load(index)");
    255    ptrdiff_t offset = reinterpret_cast<uint8_t*>(constPoolAddr) -
    256                       reinterpret_cast<uint8_t*>(loadAddr);
    257    offset += index * 4;
    258    MOZ_ASSERT(offset % 4 == 0, "Unaligned constant pool");
    259    MOZ_ASSERT(offset > 0 && offset < 0x10000, "Pool out of range");
    260    *load = 0xc2cc0000 + offset;
    261  }
    262 
    263  static void WritePoolGuard(js::jit::BufferOffset branch, uint32_t* dest,
    264                             js::jit::BufferOffset afterPool) {
    265    MOZ_ASSERT(branch.assigned());
    266    MOZ_ASSERT(afterPool.assigned());
    267    size_t branchOff = branch.getOffset();
    268    size_t afterPoolOff = afterPool.getOffset();
    269    MOZ_ASSERT(afterPoolOff > branchOff);
    270    uint32_t delta = afterPoolOff - branchOff;
    271    *dest = 0xb0bb0000 + delta;
    272  }
    273 
    274  static void WritePoolHeader(void* start, js::jit::Pool* p, bool isNatural) {
    275    MOZ_ASSERT(!isNatural, "Natural pool guards not implemented.");
    276    uint32_t* hdr = reinterpret_cast<uint32_t*>(start);
    277    *hdr = 0xffff0000 + p->getPoolSize();
    278  }
    279 
    280  static void PatchShortRangeBranchToVeneer(AsmBufWithPool* buffer,
    281                                            unsigned rangeIdx,
    282                                            js::jit::BufferOffset deadline,
    283                                            js::jit::BufferOffset veneer) {
    284    size_t branchOff = deadline.getOffset() - BranchRange;
    285    size_t veneerOff = veneer.getOffset();
    286    uint32_t* branch = buffer->getInst(js::jit::BufferOffset(branchOff));
    287 
    288    MOZ_ASSERT((*branch & 0xffff0000) == 0xb1bb0000,
    289               "Expected short-range branch instruction");
    290    // Copy branch offset to veneer. A real instruction set would require
    291    // some adjustment of the label linked-list.
    292    *buffer->getInst(veneer) = 0xb2bb0000 | (*branch & 0xffff);
    293    MOZ_ASSERT(veneerOff > branchOff, "Veneer should follow branch");
    294    *branch = 0xb3bb0000 + (veneerOff - branchOff);
    295  }
    296 };
    297 }  // namespace
    298 
    299 BEGIN_TEST(testAssemblerBuffer_AssemblerBufferWithConstantPools) {
    300  using js::jit::BufferOffset;
    301 
    302  AsmBufWithPool ab(/* guardSize= */ 1,
    303                    /* headerSize= */ 1,
    304                    /* instBufferAlign(unused)= */ 0,
    305                    /* poolMaxOffset= */ 17,
    306                    /* pcBias= */ 0,
    307                    /* alignFillInst= */ 0x11110000,
    308                    /* nopFillInst= */ 0xaaaa0000,
    309                    /* nopFill= */ 0);
    310 
    311  CHECK(ab.isAligned(16));
    312  CHECK_EQUAL(ab.size(), 0u);
    313  CHECK_EQUAL(ab.nextOffset().getOffset(), 0);
    314  CHECK(!ab.oom());
    315 
    316  // Each slice holds 5 instructions. Trigger a constant pool inside the slice.
    317  uint32_t poolLoad[] = {0xc0cc0000};
    318  uint32_t poolData[] = {0xdddd0000, 0xdddd0001, 0xdddd0002, 0xdddd0003};
    319  AsmBufWithPool::PoolEntry pe;
    320  BufferOffset load =
    321      ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)poolData, &pe);
    322  CHECK_EQUAL(pe.index(), 0u);
    323  CHECK_EQUAL(load.getOffset(), 0);
    324 
    325  // Pool hasn't been emitted yet. Load has been patched by
    326  // InsertIndexIntoTag.
    327  CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000);
    328 
    329  // Expected layout:
    330  //
    331  //   0: load [pc+16]
    332  //   4: 0x22220001
    333  //   8: guard branch pc+12
    334  //  12: pool header
    335  //  16: poolData
    336  //  20: 0x22220002
    337  //
    338  ab.putInt(0x22220001);
    339  // One could argue that the pool should be flushed here since there is no
    340  // more room. However, the current implementation doesn't dump pool until
    341  // asked to add data:
    342  ab.putInt(0x22220002);
    343 
    344  CHECK_EQUAL(*ab.getInst(BufferOffset(0)), 0xc2cc0010u);
    345  CHECK_EQUAL(*ab.getInst(BufferOffset(4)), 0x22220001u);
    346  CHECK_EQUAL(*ab.getInst(BufferOffset(8)), 0xb0bb000cu);
    347  CHECK_EQUAL(*ab.getInst(BufferOffset(12)), 0xffff0004u);
    348  CHECK_EQUAL(*ab.getInst(BufferOffset(16)), 0xdddd0000u);
    349  CHECK_EQUAL(*ab.getInst(BufferOffset(20)), 0x22220002u);
    350 
    351  // allocEntry() overwrites the load instruction! Restore the original.
    352  poolLoad[0] = 0xc0cc0000;
    353 
    354  // Now try with load and pool data on separate slices.
    355  load = ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)poolData, &pe);
    356  CHECK_EQUAL(pe.index(), 1u);  // Global pool entry index.
    357  CHECK_EQUAL(load.getOffset(), 24);
    358  CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000);  // Index into current pool.
    359  ab.putInt(0x22220001);
    360  ab.putInt(0x22220002);
    361  CHECK_EQUAL(*ab.getInst(BufferOffset(24)), 0xc2cc0010u);
    362  CHECK_EQUAL(*ab.getInst(BufferOffset(28)), 0x22220001u);
    363  CHECK_EQUAL(*ab.getInst(BufferOffset(32)), 0xb0bb000cu);
    364  CHECK_EQUAL(*ab.getInst(BufferOffset(36)), 0xffff0004u);
    365  CHECK_EQUAL(*ab.getInst(BufferOffset(40)), 0xdddd0000u);
    366  CHECK_EQUAL(*ab.getInst(BufferOffset(44)), 0x22220002u);
    367 
    368  // Two adjacent loads to the same pool.
    369  poolLoad[0] = 0xc0cc0000;
    370  load = ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)poolData, &pe);
    371  CHECK_EQUAL(pe.index(), 2u);  // Global pool entry index.
    372  CHECK_EQUAL(load.getOffset(), 48);
    373  CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000);  // Index into current pool.
    374 
    375  poolLoad[0] = 0xc0cc0000;
    376  load = ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)(poolData + 1), &pe);
    377  CHECK_EQUAL(pe.index(), 3u);  // Global pool entry index.
    378  CHECK_EQUAL(load.getOffset(), 52);
    379  CHECK_EQUAL(*ab.getInst(load), 0xc1cc0001);  // Index into current pool.
    380 
    381  ab.putInt(0x22220005);
    382 
    383  CHECK_EQUAL(*ab.getInst(BufferOffset(48)), 0xc2cc0010u);  // load pc+16.
    384  CHECK_EQUAL(*ab.getInst(BufferOffset(52)), 0xc2cc0010u);  // load pc+16.
    385  CHECK_EQUAL(*ab.getInst(BufferOffset(56)),
    386              0xb0bb0010u);  // guard branch pc+16.
    387  CHECK_EQUAL(*ab.getInst(BufferOffset(60)), 0xffff0008u);  // header 8 bytes.
    388  CHECK_EQUAL(*ab.getInst(BufferOffset(64)), 0xdddd0000u);  // datum 1.
    389  CHECK_EQUAL(*ab.getInst(BufferOffset(68)), 0xdddd0001u);  // datum 2.
    390  CHECK_EQUAL(*ab.getInst(BufferOffset(72)),
    391              0x22220005u);  // putInt(0x22220005)
    392 
    393  // Two loads as above, but the first load has an 8-byte pool entry, and the
    394  // second load wouldn't be able to reach its data. This must produce two
    395  // pools.
    396  poolLoad[0] = 0xc0cc0000;
    397  load = ab.allocEntry(1, 2, (uint8_t*)poolLoad, (uint8_t*)(poolData + 2), &pe);
    398  CHECK_EQUAL(pe.index(), 4u);  // Global pool entry index.
    399  CHECK_EQUAL(load.getOffset(), 76);
    400  CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000);  // Index into current pool.
    401 
    402  poolLoad[0] = 0xc0cc0000;
    403  load = ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)poolData, &pe);
    404  CHECK_EQUAL(pe.index(),
    405              6u);  // Global pool entry index. (Prev one is two indexes).
    406  CHECK_EQUAL(load.getOffset(), 96);
    407  CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000);  // Index into current pool.
    408 
    409  CHECK_EQUAL(*ab.getInst(BufferOffset(76)), 0xc2cc000cu);  // load pc+12.
    410  CHECK_EQUAL(*ab.getInst(BufferOffset(80)),
    411              0xb0bb0010u);  // guard branch pc+16.
    412  CHECK_EQUAL(*ab.getInst(BufferOffset(84)), 0xffff0008u);  // header 8 bytes.
    413  CHECK_EQUAL(*ab.getInst(BufferOffset(88)), 0xdddd0002u);  // datum 1.
    414  CHECK_EQUAL(*ab.getInst(BufferOffset(92)), 0xdddd0003u);  // datum 2.
    415 
    416  // Second pool is not flushed yet, and there is room for one instruction
    417  // after the load. Test the keep-together feature.
    418  ab.enterNoPool(2);
    419  ab.putInt(0x22220006);
    420  ab.putInt(0x22220007);
    421  ab.leaveNoPool();
    422 
    423  CHECK_EQUAL(*ab.getInst(BufferOffset(96)), 0xc2cc000cu);  // load pc+16.
    424  CHECK_EQUAL(*ab.getInst(BufferOffset(100)),
    425              0xb0bb000cu);  // guard branch pc+12.
    426  CHECK_EQUAL(*ab.getInst(BufferOffset(104)), 0xffff0004u);  // header 4 bytes.
    427  CHECK_EQUAL(*ab.getInst(BufferOffset(108)), 0xdddd0000u);  // datum 1.
    428  CHECK_EQUAL(*ab.getInst(BufferOffset(112)), 0x22220006u);
    429  CHECK_EQUAL(*ab.getInst(BufferOffset(116)), 0x22220007u);
    430 
    431  return true;
    432 }
    433 END_TEST(testAssemblerBuffer_AssemblerBufferWithConstantPools)
    434 
    435 BEGIN_TEST(testAssemblerBuffer_AssemblerBufferWithConstantPools_ShortBranch) {
    436  using js::jit::BufferOffset;
    437 
    438  AsmBufWithPool ab(/* guardSize= */ 1,
    439                    /* headerSize= */ 1,
    440                    /* instBufferAlign(unused)= */ 0,
    441                    /* poolMaxOffset= */ 17,
    442                    /* pcBias= */ 0,
    443                    /* alignFillInst= */ 0x11110000,
    444                    /* nopFillInst= */ 0xaaaa0000,
    445                    /* nopFill= */ 0);
    446 
    447  // Insert short-range branch.
    448  BufferOffset br1 = ab.putInt(0xb1bb00cc);
    449  ab.registerBranchDeadline(
    450      1, BufferOffset(br1.getOffset() + TestAssembler::BranchRange));
    451  ab.putInt(0x22220001);
    452  BufferOffset off = ab.putInt(0x22220002);
    453  ab.registerBranchDeadline(
    454      1, BufferOffset(off.getOffset() + TestAssembler::BranchRange));
    455  ab.putInt(0x22220003);
    456  ab.putInt(0x22220004);
    457 
    458  // Second short-range branch that will be swiped up by hysteresis.
    459  BufferOffset br2 = ab.putInt(0xb1bb0d2d);
    460  ab.registerBranchDeadline(
    461      1, BufferOffset(br2.getOffset() + TestAssembler::BranchRange));
    462 
    463  // Branch should not have been patched yet here.
    464  CHECK_EQUAL(*ab.getInst(br1), 0xb1bb00cc);
    465  CHECK_EQUAL(*ab.getInst(br2), 0xb1bb0d2d);
    466 
    467  // Cancel one of the pending branches.
    468  // This is what will happen to most branches as they are bound before
    469  // expiring by Assembler::bind().
    470  ab.unregisterBranchDeadline(
    471      1, BufferOffset(off.getOffset() + TestAssembler::BranchRange));
    472 
    473  off = ab.putInt(0x22220006);
    474  // Here we may or may not have patched the branch yet, but it is inevitable
    475  // now:
    476  //
    477  //  0: br1 pc+36
    478  //  4: 0x22220001
    479  //  8: 0x22220002 (unpatched)
    480  // 12: 0x22220003
    481  // 16: 0x22220004
    482  // 20: br2 pc+20
    483  // 24: 0x22220006
    484  CHECK_EQUAL(off.getOffset(), 24);
    485  // 28: guard branch pc+16
    486  // 32: pool header
    487  // 36: veneer1
    488  // 40: veneer2
    489  // 44: 0x22220007
    490 
    491  off = ab.putInt(0x22220007);
    492  CHECK_EQUAL(off.getOffset(), 44);
    493 
    494  // Now the branch must have been patched.
    495  CHECK_EQUAL(*ab.getInst(br1), 0xb3bb0000 + 36);  // br1 pc+36 (patched)
    496  CHECK_EQUAL(*ab.getInst(BufferOffset(8)),
    497              0x22220002u);                        // 0x22220002 (unpatched)
    498  CHECK_EQUAL(*ab.getInst(br2), 0xb3bb0000 + 20);  // br2 pc+20 (patched)
    499  CHECK_EQUAL(*ab.getInst(BufferOffset(28)), 0xb0bb0010u);  // br pc+16 (guard)
    500  CHECK_EQUAL(*ab.getInst(BufferOffset(32)),
    501              0xffff0000u);  // pool header 0 bytes.
    502  CHECK_EQUAL(*ab.getInst(BufferOffset(36)),
    503              0xb2bb00ccu);  // veneer1 w/ original 'cc' offset.
    504  CHECK_EQUAL(*ab.getInst(BufferOffset(40)),
    505              0xb2bb0d2du);  // veneer2 w/ original 'd2d' offset.
    506  CHECK_EQUAL(*ab.getInst(BufferOffset(44)), 0x22220007u);
    507 
    508  return true;
    509 }
    510 END_TEST(testAssemblerBuffer_AssemblerBufferWithConstantPools_ShortBranch)
    511 
    512 // Test that everything is put together correctly in the ARM64 assembler.
    513 #if defined(JS_CODEGEN_ARM64)
    514 
    515 #  include "jit/MacroAssembler-inl.h"
    516 
    517 BEGIN_TEST(testAssemblerBuffer_ARM64) {
    518  using namespace js::jit;
    519 
    520  js::LifoAlloc lifo(4096, js::MallocArena);
    521  TempAllocator alloc(&lifo);
    522  JitContext jc(cx);
    523  StackMacroAssembler masm(cx, alloc);
    524  AutoCreatedBy acb(masm, __func__);
    525 
    526  // Branches to an unbound label.
    527  Label lab1;
    528  masm.branch(Assembler::Equal, &lab1);
    529  masm.branch(Assembler::LessThan, &lab1);
    530  masm.bind(&lab1);
    531  masm.branch(Assembler::Equal, &lab1);
    532 
    533  CHECK_EQUAL(masm.getInstructionAt(BufferOffset(0))->InstructionBits(),
    534              vixl::B_cond | vixl::Assembler::ImmCondBranch(2) | vixl::eq);
    535  CHECK_EQUAL(masm.getInstructionAt(BufferOffset(4))->InstructionBits(),
    536              vixl::B_cond | vixl::Assembler::ImmCondBranch(1) | vixl::lt);
    537  CHECK_EQUAL(masm.getInstructionAt(BufferOffset(8))->InstructionBits(),
    538              vixl::B_cond | vixl::Assembler::ImmCondBranch(0) | vixl::eq);
    539 
    540  // Branches can reach the label, but the linked list of uses needs to be
    541  // rearranged. The final conditional branch cannot reach the first branch.
    542  Label lab2a;
    543  Label lab2b;
    544  masm.bind(&lab2a);
    545  masm.B(&lab2b);
    546  // Generate 1,100,000 bytes of NOPs.
    547  for (unsigned n = 0; n < 1100000; n += 4) {
    548    masm.Nop();
    549  }
    550  masm.branch(Assembler::LessThan, &lab2b);
    551  masm.bind(&lab2b);
    552  CHECK_EQUAL(
    553      masm.getInstructionAt(BufferOffset(lab2a.offset()))->InstructionBits(),
    554      vixl::B | vixl::Assembler::ImmUncondBranch(1100000 / 4 + 2));
    555  CHECK_EQUAL(masm.getInstructionAt(BufferOffset(lab2b.offset() - 4))
    556                  ->InstructionBits(),
    557              vixl::B_cond | vixl::Assembler::ImmCondBranch(1) | vixl::lt);
    558 
    559  // Generate a conditional branch that can't reach its label.
    560  Label lab3a;
    561  Label lab3b;
    562  masm.bind(&lab3a);
    563  masm.branch(Assembler::LessThan, &lab3b);
    564  for (unsigned n = 0; n < 1100000; n += 4) {
    565    masm.Nop();
    566  }
    567  masm.bind(&lab3b);
    568  masm.B(&lab3a);
    569  Instruction* bcond3 = masm.getInstructionAt(BufferOffset(lab3a.offset()));
    570  CHECK_EQUAL(bcond3->BranchType(), vixl::CondBranchType);
    571  ptrdiff_t delta = bcond3->ImmPCRawOffset() * 4;
    572  Instruction* veneer =
    573      masm.getInstructionAt(BufferOffset(lab3a.offset() + delta));
    574  CHECK_EQUAL(veneer->BranchType(), vixl::UncondBranchType);
    575  delta += veneer->ImmPCRawOffset() * 4;
    576  CHECK_EQUAL(delta, lab3b.offset() - lab3a.offset());
    577  Instruction* b3 = masm.getInstructionAt(BufferOffset(lab3b.offset()));
    578  CHECK_EQUAL(b3->BranchType(), vixl::UncondBranchType);
    579  CHECK_EQUAL(4 * b3->ImmPCRawOffset(), -delta);
    580 
    581  return true;
    582 }
    583 END_TEST(testAssemblerBuffer_ARM64)
    584 #endif /* JS_CODEGEN_ARM64 */