tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

MacroAssembler-arm-inl.h (89294B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef jit_arm_MacroAssembler_arm_inl_h
      8 #define jit_arm_MacroAssembler_arm_inl_h
      9 
     10 #include "jit/arm/MacroAssembler-arm.h"
     11 
     12 namespace js {
     13 namespace jit {
     14 
     15 //{{{ check_macroassembler_style
     16 
     17 void MacroAssembler::move64(Register64 src, Register64 dest) {
     18  move32(src.low, dest.low);
     19  move32(src.high, dest.high);
     20 }
     21 
     22 void MacroAssembler::move64(Imm64 imm, Register64 dest) {
     23  move32(Imm32(imm.value & 0xFFFFFFFFL), dest.low);
     24  move32(Imm32((imm.value >> 32) & 0xFFFFFFFFL), dest.high);
     25 }
     26 
     27 void MacroAssembler::moveFloat16ToGPR(FloatRegister src, Register dest) {
     28  ma_vxfer(src, dest);
     29 
     30  // Ensure the hi-word is zeroed.
     31  as_uxth(dest, dest, 0);
     32 }
     33 
     34 void MacroAssembler::moveGPRToFloat16(Register src, FloatRegister dest) {
     35  // Ensure the hi-word is zeroed.
     36  as_uxth(src, src, 0);
     37 
     38  ma_vxfer(src, dest);
     39 }
     40 
     41 void MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest) {
     42  ma_vxfer(src, dest);
     43 }
     44 
     45 void MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest) {
     46  ma_vxfer(src, dest);
     47 }
     48 
     49 void MacroAssembler::move8ZeroExtend(Register src, Register dest) {
     50  as_uxtb(dest, src, 0);
     51 }
     52 
     53 void MacroAssembler::move8SignExtend(Register src, Register dest) {
     54  as_sxtb(dest, src, 0);
     55 }
     56 
     57 void MacroAssembler::move16SignExtend(Register src, Register dest) {
     58  as_sxth(dest, src, 0);
     59 }
     60 
     61 void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
     62  ma_vxfer(src, dest.low, dest.high);
     63 }
     64 
     65 void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
     66  ma_vxfer(src.low, src.high, dest);
     67 }
     68 
     69 void MacroAssembler::moveLowDoubleToGPR(FloatRegister src, Register dest) {
     70  ma_vxfer(src, dest, InvalidReg);
     71 }
     72 
     73 void MacroAssembler::move64To32(Register64 src, Register dest) {
     74  if (src.low != dest) {
     75    move32(src.low, dest);
     76  }
     77 }
     78 
     79 void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
     80  if (src != dest.low) {
     81    move32(src, dest.low);
     82  }
     83  move32(Imm32(0), dest.high);
     84 }
     85 
     86 void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
     87  as_sxtb(dest.low, src, 0);
     88  ma_asr(Imm32(31), dest.low, dest.high);
     89 }
     90 
     91 void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
     92  as_sxth(dest.low, src, 0);
     93  ma_asr(Imm32(31), dest.low, dest.high);
     94 }
     95 
     96 void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
     97  if (src != dest.low) {
     98    move32(src, dest.low);
     99  }
    100  ma_asr(Imm32(31), dest.low, dest.high);
    101 }
    102 
    103 void MacroAssembler::move8SignExtendToPtr(Register src, Register dest) {
    104  move8SignExtend(src, dest);
    105 }
    106 
    107 void MacroAssembler::move16SignExtendToPtr(Register src, Register dest) {
    108  move16SignExtend(src, dest);
    109 }
    110 
    111 void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
    112  move32(src, dest);
    113 }
    114 
    115 void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
    116  move32(src, dest);
    117 }
    118 
    119 // ===============================================================
    120 // Load instructions
    121 
    122 void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
    123  load32(src, dest);
    124 }
    125 
    126 void MacroAssembler::loadAbiReturnAddress(Register dest) { movePtr(lr, dest); }
    127 
    128 // ===============================================================
    129 // Logical instructions
    130 
    131 void MacroAssembler::not32(Register reg) { ma_mvn(reg, reg); }
    132 
    133 void MacroAssembler::notPtr(Register reg) { ma_mvn(reg, reg); }
    134 
    135 void MacroAssembler::and32(Register src, Register dest) {
    136  ma_and(src, dest, SetCC);
    137 }
    138 
    139 void MacroAssembler::and32(Imm32 imm, Register dest) { and32(imm, dest, dest); }
    140 
    141 void MacroAssembler::and32(Imm32 imm, Register src, Register dest) {
    142  ScratchRegisterScope scratch(*this);
    143  ma_and(imm, src, dest, scratch, SetCC);
    144 }
    145 
    146 void MacroAssembler::and32(Imm32 imm, const Address& dest) {
    147  ScratchRegisterScope scratch(*this);
    148  SecondScratchRegisterScope scratch2(*this);
    149 
    150  ma_ldr(dest, scratch, scratch2);
    151  ma_and(imm, scratch, scratch2);
    152  ma_str(scratch, dest, scratch2);
    153 }
    154 
    155 void MacroAssembler::and32(const Address& src, Register dest) {
    156  ScratchRegisterScope scratch(*this);
    157  SecondScratchRegisterScope scratch2(*this);
    158 
    159  ma_ldr(src, scratch, scratch2);
    160  ma_and(scratch, dest, SetCC);
    161 }
    162 
    163 void MacroAssembler::andPtr(Register src, Register dest) { ma_and(src, dest); }
    164 
    165 void MacroAssembler::andPtr(Imm32 imm, Register dest) {
    166  andPtr(imm, dest, dest);
    167 }
    168 
    169 void MacroAssembler::andPtr(Imm32 imm, Register src, Register dest) {
    170  ScratchRegisterScope scratch(*this);
    171  ma_and(imm, src, dest, scratch);
    172 }
    173 
    174 void MacroAssembler::and64(Imm64 imm, Register64 dest) {
    175  if (imm.low().value != int32_t(0xFFFFFFFF)) {
    176    and32(imm.low(), dest.low);
    177  }
    178  if (imm.hi().value != int32_t(0xFFFFFFFF)) {
    179    and32(imm.hi(), dest.high);
    180  }
    181 }
    182 
    183 void MacroAssembler::or64(Imm64 imm, Register64 dest) {
    184  if (imm.low().value) {
    185    or32(imm.low(), dest.low);
    186  }
    187  if (imm.hi().value) {
    188    or32(imm.hi(), dest.high);
    189  }
    190 }
    191 
    192 void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
    193  if (imm.low().value) {
    194    xor32(imm.low(), dest.low);
    195  }
    196  if (imm.hi().value) {
    197    xor32(imm.hi(), dest.high);
    198  }
    199 }
    200 
    201 void MacroAssembler::or32(Register src, Register dest) { ma_orr(src, dest); }
    202 
    203 void MacroAssembler::or32(Imm32 imm, Register dest) { or32(imm, dest, dest); }
    204 
    205 void MacroAssembler::or32(Imm32 imm, Register src, Register dest) {
    206  ScratchRegisterScope scratch(*this);
    207  ma_orr(imm, src, dest, scratch);
    208 }
    209 
    210 void MacroAssembler::or32(Imm32 imm, const Address& dest) {
    211  ScratchRegisterScope scratch(*this);
    212  SecondScratchRegisterScope scratch2(*this);
    213 
    214  ma_ldr(dest, scratch, scratch2);
    215  ma_orr(imm, scratch, scratch2);
    216  ma_str(scratch, dest, scratch2);
    217 }
    218 
    219 void MacroAssembler::orPtr(Register src, Register dest) { ma_orr(src, dest); }
    220 
    221 void MacroAssembler::orPtr(Imm32 imm, Register dest) { orPtr(imm, dest, dest); }
    222 
    223 void MacroAssembler::orPtr(Imm32 imm, Register src, Register dest) {
    224  ScratchRegisterScope scratch(*this);
    225  ma_orr(imm, src, dest, scratch);
    226 }
    227 
    228 void MacroAssembler::and64(Register64 src, Register64 dest) {
    229  and32(src.low, dest.low);
    230  and32(src.high, dest.high);
    231 }
    232 
    233 void MacroAssembler::or64(Register64 src, Register64 dest) {
    234  or32(src.low, dest.low);
    235  or32(src.high, dest.high);
    236 }
    237 
    238 void MacroAssembler::xor64(Register64 src, Register64 dest) {
    239  ma_eor(src.low, dest.low);
    240  ma_eor(src.high, dest.high);
    241 }
    242 
    243 void MacroAssembler::xor32(Register src, Register dest) {
    244  ma_eor(src, dest, SetCC);
    245 }
    246 
    247 void MacroAssembler::xor32(Imm32 imm, Register dest) { xor32(imm, dest, dest); }
    248 
    249 void MacroAssembler::xor32(Imm32 imm, Register src, Register dest) {
    250  ScratchRegisterScope scratch(*this);
    251  ma_eor(imm, src, dest, scratch, SetCC);
    252 }
    253 
    254 void MacroAssembler::xor32(Imm32 imm, const Address& dest) {
    255  ScratchRegisterScope scratch(*this);
    256  SecondScratchRegisterScope scratch2(*this);
    257 
    258  ma_ldr(dest, scratch, scratch2);
    259  ma_eor(imm, scratch, scratch2);
    260  ma_str(scratch, dest, scratch2);
    261 }
    262 
    263 void MacroAssembler::xor32(const Address& src, Register dest) {
    264  ScratchRegisterScope scratch(*this);
    265  SecondScratchRegisterScope scratch2(*this);
    266 
    267  ma_ldr(src, scratch, scratch2);
    268  ma_eor(scratch, dest, SetCC);
    269 }
    270 
    271 void MacroAssembler::xorPtr(Register src, Register dest) { ma_eor(src, dest); }
    272 
    273 void MacroAssembler::xorPtr(Imm32 imm, Register dest) {
    274  xorPtr(imm, dest, dest);
    275 }
    276 
    277 void MacroAssembler::xorPtr(Imm32 imm, Register src, Register dest) {
    278  ScratchRegisterScope scratch(*this);
    279  ma_eor(imm, src, dest, scratch);
    280 }
    281 
    282 // ===============================================================
    283 // Swap instructions
    284 
    285 void MacroAssembler::byteSwap16SignExtend(Register reg) { as_revsh(reg, reg); }
    286 
    287 void MacroAssembler::byteSwap16ZeroExtend(Register reg) {
    288  as_rev16(reg, reg);
    289  as_uxth(reg, reg, 0);
    290 }
    291 
    292 void MacroAssembler::byteSwap32(Register reg) { as_rev(reg, reg); }
    293 
    294 void MacroAssembler::byteSwap64(Register64 reg) {
    295  as_rev(reg.high, reg.high);
    296  as_rev(reg.low, reg.low);
    297 
    298  ScratchRegisterScope scratch(*this);
    299  ma_mov(reg.high, scratch);
    300  ma_mov(reg.low, reg.high);
    301  ma_mov(scratch, reg.low);
    302 }
    303 
    304 // ===============================================================
    305 // Arithmetic functions
    306 
    307 void MacroAssembler::add32(Register src, Register dest) {
    308  ma_add(src, dest, SetCC);
    309 }
    310 
    311 void MacroAssembler::add32(Imm32 imm, Register dest) {
    312  ScratchRegisterScope scratch(*this);
    313  ma_add(imm, dest, scratch, SetCC);
    314 }
    315 
    316 void MacroAssembler::add32(Imm32 imm, Register src, Register dest) {
    317  ScratchRegisterScope scratch(*this);
    318  ma_add(src, imm, dest, scratch, SetCC);
    319 }
    320 
    321 void MacroAssembler::add32(Imm32 imm, const Address& dest) {
    322  ScratchRegisterScope scratch(*this);
    323  SecondScratchRegisterScope scratch2(*this);
    324 
    325  ma_ldr(dest, scratch, scratch2);
    326  ma_add(imm, scratch, scratch2, SetCC);
    327  ma_str(scratch, dest, scratch2);
    328 }
    329 
    330 void MacroAssembler::add32(const Address& src, Register dest) {
    331  ScratchRegisterScope scratch(*this);
    332  SecondScratchRegisterScope scratch2(*this);
    333 
    334  ma_ldr(src, scratch, scratch2);
    335  ma_add(scratch, dest, SetCC);
    336 }
    337 
    338 void MacroAssembler::addPtr(Register src, Register dest) { ma_add(src, dest); }
    339 
    340 void MacroAssembler::addPtr(Imm32 imm, Register dest) {
    341  ScratchRegisterScope scratch(*this);
    342  ma_add(imm, dest, scratch);
    343 }
    344 
    345 void MacroAssembler::addPtr(ImmWord imm, Register dest) {
    346  addPtr(Imm32(imm.value), dest);
    347 }
    348 
    349 void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
    350  ScratchRegisterScope scratch(*this);
    351  SecondScratchRegisterScope scratch2(*this);
    352 
    353  ma_ldr(dest, scratch, scratch2);
    354  ma_add(imm, scratch, scratch2);
    355  ma_str(scratch, dest, scratch2);
    356 }
    357 
    358 void MacroAssembler::addPtr(const Address& src, Register dest) {
    359  ScratchRegisterScope scratch(*this);
    360  SecondScratchRegisterScope scratch2(*this);
    361 
    362  ma_ldr(src, scratch, scratch2);
    363  ma_add(scratch, dest, SetCC);
    364 }
    365 
    366 void MacroAssembler::add64(Register64 src, Register64 dest) {
    367  ma_add(src.low, dest.low, SetCC);
    368  ma_adc(src.high, dest.high);
    369 }
    370 
    371 void MacroAssembler::add64(Imm32 imm, Register64 dest) {
    372  ScratchRegisterScope scratch(*this);
    373  ma_add(imm, dest.low, scratch, SetCC);
    374  as_adc(dest.high, dest.high, Imm8(0), LeaveCC);
    375 }
    376 
    377 void MacroAssembler::add64(Imm64 imm, Register64 dest) {
    378  ScratchRegisterScope scratch(*this);
    379  ma_add(imm.low(), dest.low, scratch, SetCC);
    380  ma_adc(imm.hi(), dest.high, scratch, LeaveCC);
    381 }
    382 
    383 CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
    384  ScratchRegisterScope scratch(*this);
    385  CodeOffset offs = CodeOffset(currentOffset());
    386  ma_movPatchable(Imm32(0), scratch, Always);
    387  ma_sub(getStackPointer(), scratch, dest);
    388  return offs;
    389 }
    390 
    391 void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
    392  ScratchRegisterScope scratch(*this);
    393  BufferInstructionIterator iter(BufferOffset(offset.offset()), &m_buffer);
    394  iter.maybeSkipAutomaticInstructions();
    395  ma_mov_patch(imm, scratch, Always, ARMFlags::HasMOVWT() ? L_MOVWT : L_LDR,
    396               iter);
    397 }
    398 
    399 void MacroAssembler::addDouble(FloatRegister src, FloatRegister dest) {
    400  ma_vadd(dest, src, dest);
    401 }
    402 
    403 void MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) {
    404  ma_vadd_f32(dest, src, dest);
    405 }
    406 
    407 void MacroAssembler::sub32(Register src, Register dest) {
    408  ma_sub(src, dest, SetCC);
    409 }
    410 
    411 void MacroAssembler::sub32(Imm32 imm, Register dest) {
    412  ScratchRegisterScope scratch(*this);
    413  ma_sub(imm, dest, scratch, SetCC);
    414 }
    415 
    416 void MacroAssembler::sub32(const Address& src, Register dest) {
    417  ScratchRegisterScope scratch(*this);
    418  SecondScratchRegisterScope scratch2(*this);
    419 
    420  ma_ldr(src, scratch, scratch2);
    421  ma_sub(scratch, dest, SetCC);
    422 }
    423 
    424 void MacroAssembler::subPtr(Register src, Register dest) { ma_sub(src, dest); }
    425 
    426 void MacroAssembler::subPtr(Register src, const Address& dest) {
    427  ScratchRegisterScope scratch(*this);
    428  SecondScratchRegisterScope scratch2(*this);
    429 
    430  ma_ldr(dest, scratch, scratch2);
    431  ma_sub(src, scratch);
    432  ma_str(scratch, dest, scratch2);
    433 }
    434 
    435 void MacroAssembler::subPtr(Imm32 imm, Register dest) {
    436  ScratchRegisterScope scratch(*this);
    437  ma_sub(imm, dest, scratch);
    438 }
    439 
    440 void MacroAssembler::subPtr(const Address& addr, Register dest) {
    441  ScratchRegisterScope scratch(*this);
    442  SecondScratchRegisterScope scratch2(*this);
    443 
    444  ma_ldr(addr, scratch, scratch2);
    445  ma_sub(scratch, dest);
    446 }
    447 
    448 void MacroAssembler::sub64(Register64 src, Register64 dest) {
    449  ma_sub(src.low, dest.low, SetCC);
    450  ma_sbc(src.high, dest.high, LeaveCC);
    451 }
    452 
    453 void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
    454  ScratchRegisterScope scratch(*this);
    455  ma_sub(imm.low(), dest.low, scratch, SetCC);
    456  ma_sbc(imm.hi(), dest.high, scratch, LeaveCC);
    457 }
    458 
    459 void MacroAssembler::subDouble(FloatRegister src, FloatRegister dest) {
    460  ma_vsub(dest, src, dest);
    461 }
    462 
    463 void MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest) {
    464  ma_vsub_f32(dest, src, dest);
    465 }
    466 
    467 void MacroAssembler::mul32(Register rhs, Register srcDest) {
    468  as_mul(srcDest, srcDest, rhs);
    469 }
    470 
    471 void MacroAssembler::mul32(Imm32 imm, Register srcDest) {
    472  ScratchRegisterScope scratch(*this);
    473  move32(imm, scratch);
    474  mul32(scratch, srcDest);
    475 }
    476 
    477 void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
    478  ScratchRegisterScope scratch(*this);
    479  ma_umull(src, imm, dest, scratch, scratch);
    480 }
    481 
    482 void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
    483  as_mul(srcDest, srcDest, rhs);
    484 }
    485 
    486 void MacroAssembler::mulPtr(ImmWord rhs, Register srcDest) {
    487  ScratchRegisterScope scratch(*this);
    488  movePtr(rhs, scratch);
    489  mulPtr(scratch, srcDest);
    490 }
    491 
    492 void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
    493  // LOW32  = LOW(LOW(dest) * LOW(imm));
    494  // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
    495  //        + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
    496  //        + HIGH(LOW(dest) * LOW(imm)) [carry]
    497 
    498  ScratchRegisterScope scratch(*this);
    499  SecondScratchRegisterScope scratch2(*this);
    500 
    501  // HIGH(dest) = LOW(HIGH(dest) * LOW(imm));
    502  ma_mov(Imm32(imm.value & 0xFFFFFFFFL), scratch);
    503  as_mul(dest.high, dest.high, scratch);
    504 
    505  // high:low = LOW(dest) * LOW(imm);
    506  as_umull(scratch2, scratch, dest.low, scratch);
    507 
    508  // HIGH(dest) += high;
    509  as_add(dest.high, dest.high, O2Reg(scratch2));
    510 
    511  // HIGH(dest) += LOW(LOW(dest) * HIGH(imm));
    512  if (((imm.value >> 32) & 0xFFFFFFFFL) == 5) {
    513    as_add(scratch2, dest.low, lsl(dest.low, 2));
    514  } else {
    515    MOZ_CRASH("Not supported imm");
    516  }
    517  as_add(dest.high, dest.high, O2Reg(scratch2));
    518 
    519  // LOW(dest) = low;
    520  ma_mov(scratch, dest.low);
    521 }
    522 
    523 void MacroAssembler::mul64(Imm64 imm, const Register64& dest,
    524                           const Register temp) {
    525  // LOW32  = LOW(LOW(dest) * LOW(src));                                  (1)
    526  // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits]   (2)
    527  //        + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits]  (3)
    528  //        + HIGH(LOW(dest) * LOW(src)) [carry]                          (4)
    529 
    530  MOZ_ASSERT(temp != dest.high && temp != dest.low);
    531 
    532  // Compute mul64
    533  ScratchRegisterScope scratch(*this);
    534  ma_mul(dest.high, imm.low(), dest.high, scratch);  // (2)
    535  ma_mul(dest.low, imm.hi(), temp, scratch);         // (3)
    536  ma_add(dest.high, temp, temp);
    537  ma_umull(dest.low, imm.low(), dest.high, dest.low, scratch);  // (4) + (1)
    538  ma_add(temp, dest.high, dest.high);
    539 }
    540 
    541 void MacroAssembler::mul64(const Register64& src, const Register64& dest,
    542                           const Register temp) {
    543  // LOW32  = LOW(LOW(dest) * LOW(src));                                  (1)
    544  // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits]   (2)
    545  //        + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits]  (3)
    546  //        + HIGH(LOW(dest) * LOW(src)) [carry]                          (4)
    547 
    548  MOZ_ASSERT(dest != src);
    549  MOZ_ASSERT(dest.low != src.high && dest.high != src.low);
    550 
    551  // Compute mul64
    552  ma_mul(dest.high, src.low, dest.high);  // (2)
    553  ma_mul(src.high, dest.low, temp);       // (3)
    554  ma_add(dest.high, temp, temp);
    555  ma_umull(dest.low, src.low, dest.high, dest.low);  // (4) + (1)
    556  ma_add(temp, dest.high, dest.high);
    557 }
    558 
    559 void MacroAssembler::mulBy3(Register src, Register dest) {
    560  as_add(dest, src, lsl(src, 1));
    561 }
    562 
    563 void MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest) {
    564  ma_vmul_f32(dest, src, dest);
    565 }
    566 
    567 void MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest) {
    568  ma_vmul(dest, src, dest);
    569 }
    570 
    571 void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
    572                                  FloatRegister dest) {
    573  ScratchRegisterScope scratch(*this);
    574  ScratchDoubleScope scratchDouble(*this);
    575 
    576  movePtr(imm, scratch);
    577  ma_vldr(Operand(Address(scratch, 0)).toVFPAddr(), scratchDouble);
    578  mulDouble(scratchDouble, dest);
    579 }
    580 
    581 void MacroAssembler::quotient32(Register lhs, Register rhs, Register dest,
    582                                bool isUnsigned) {
    583  MOZ_ASSERT(ARMFlags::HasIDIV());
    584  if (isUnsigned) {
    585    ma_udiv(lhs, rhs, dest);
    586  } else {
    587    ma_sdiv(lhs, rhs, dest);
    588  }
    589 }
    590 
    591 void MacroAssembler::remainder32(Register lhs, Register rhs, Register dest,
    592                                 bool isUnsigned) {
    593  MOZ_ASSERT(ARMFlags::HasIDIV());
    594 
    595  ScratchRegisterScope scratch(*this);
    596  if (isUnsigned) {
    597    ma_umod(lhs, rhs, dest, scratch);
    598  } else {
    599    ma_smod(lhs, rhs, dest, scratch);
    600  }
    601 }
    602 
    603 void MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) {
    604  ma_vdiv_f32(dest, src, dest);
    605 }
    606 
    607 void MacroAssembler::divDouble(FloatRegister src, FloatRegister dest) {
    608  ma_vdiv(dest, src, dest);
    609 }
    610 
    611 void MacroAssembler::inc64(AbsoluteAddress dest) {
    612  ScratchRegisterScope scratch(*this);
    613 
    614  ma_strd(r0, r1, EDtrAddr(sp, EDtrOffImm(-8)), PreIndex);
    615 
    616  ma_mov(Imm32((int32_t)dest.addr), scratch);
    617  ma_ldrd(EDtrAddr(scratch, EDtrOffImm(0)), r0, r1);
    618 
    619  as_add(r0, r0, Imm8(1), SetCC);
    620  as_adc(r1, r1, Imm8(0), LeaveCC);
    621 
    622  ma_strd(r0, r1, EDtrAddr(scratch, EDtrOffImm(0)));
    623  ma_ldrd(EDtrAddr(sp, EDtrOffImm(8)), r0, r1, PostIndex);
    624 }
    625 
    626 void MacroAssembler::neg32(Register reg) { ma_neg(reg, reg, SetCC); }
    627 
    628 void MacroAssembler::neg64(Register64 reg) {
    629  as_rsb(reg.low, reg.low, Imm8(0), SetCC);
    630  as_rsc(reg.high, reg.high, Imm8(0));
    631 }
    632 
    633 void MacroAssembler::negPtr(Register reg) { neg32(reg); }
    634 
    635 void MacroAssembler::negateDouble(FloatRegister reg) { ma_vneg(reg, reg); }
    636 
    637 void MacroAssembler::negateFloat(FloatRegister reg) { ma_vneg_f32(reg, reg); }
    638 
    639 void MacroAssembler::abs32(Register src, Register dest) {
    640  as_cmp(src, Imm8(0));
    641  as_rsb(dest, src, Imm8(0), LeaveCC, LessThan);
    642  if (dest != src) {
    643    as_mov(dest, O2Reg(src), LeaveCC, GreaterThanOrEqual);
    644  }
    645 }
    646 
    647 void MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest) {
    648  ma_vabs_f32(src, dest);
    649 }
    650 
    651 void MacroAssembler::absDouble(FloatRegister src, FloatRegister dest) {
    652  ma_vabs(src, dest);
    653 }
    654 
    655 void MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest) {
    656  ma_vsqrt_f32(src, dest);
    657 }
    658 
    659 void MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest) {
    660  ma_vsqrt(src, dest);
    661 }
    662 
    663 void MacroAssembler::min32(Register lhs, Register rhs, Register dest) {
    664  minMax32(lhs, rhs, dest, /* isMax = */ false);
    665 }
    666 
    667 void MacroAssembler::min32(Register lhs, Imm32 rhs, Register dest) {
    668  minMax32(lhs, rhs, dest, /* isMax = */ false);
    669 }
    670 
    671 void MacroAssembler::max32(Register lhs, Register rhs, Register dest) {
    672  minMax32(lhs, rhs, dest, /* isMax = */ true);
    673 }
    674 
    675 void MacroAssembler::max32(Register lhs, Imm32 rhs, Register dest) {
    676  minMax32(lhs, rhs, dest, /* isMax = */ true);
    677 }
    678 
    679 void MacroAssembler::minPtr(Register lhs, Register rhs, Register dest) {
    680  minMax32(lhs, rhs, dest, /* isMax = */ false);
    681 }
    682 
    683 void MacroAssembler::minPtr(Register lhs, ImmWord rhs, Register dest) {
    684  minMax32(lhs, Imm32(rhs.value), dest, /* isMax = */ false);
    685 }
    686 
    687 void MacroAssembler::maxPtr(Register lhs, Register rhs, Register dest) {
    688  minMax32(lhs, rhs, dest, /* isMax = */ true);
    689 }
    690 
    691 void MacroAssembler::maxPtr(Register lhs, ImmWord rhs, Register dest) {
    692  minMax32(lhs, Imm32(rhs.value), dest, /* isMax = */ true);
    693 }
    694 
    695 void MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest,
    696                                bool handleNaN) {
    697  minMaxFloat32(srcDest, other, handleNaN, false);
    698 }
    699 
    700 void MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest,
    701                               bool handleNaN) {
    702  minMaxDouble(srcDest, other, handleNaN, false);
    703 }
    704 
    705 void MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest,
    706                                bool handleNaN) {
    707  minMaxFloat32(srcDest, other, handleNaN, true);
    708 }
    709 
    710 void MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest,
    711                               bool handleNaN) {
    712  minMaxDouble(srcDest, other, handleNaN, true);
    713 }
    714 
    715 // ===============================================================
    716 // Shift functions
    717 
    718 void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
    719  lshiftPtr(imm, dest, dest);
    720 }
    721 
    722 void MacroAssembler::lshiftPtr(Imm32 imm, Register src, Register dest) {
    723  MOZ_ASSERT(0 <= imm.value && imm.value < 32);
    724  ma_lsl(imm, src, dest);
    725 }
    726 
    727 void MacroAssembler::lshiftPtr(Register src, Register dest) {
    728  ma_lsl(src, dest, dest);
    729 }
    730 
    731 void MacroAssembler::flexibleLshiftPtr(Register shift, Register srcDest) {
    732  flexibleLshift32(shift, srcDest);
    733 }
    734 
    735 void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
    736  MOZ_ASSERT(0 <= imm.value && imm.value < 64);
    737  if (imm.value == 0) {
    738    return;
    739  }
    740 
    741  if (imm.value < 32) {
    742    as_mov(dest.high, lsl(dest.high, imm.value));
    743    as_orr(dest.high, dest.high, lsr(dest.low, 32 - imm.value));
    744    as_mov(dest.low, lsl(dest.low, imm.value));
    745  } else {
    746    as_mov(dest.high, lsl(dest.low, imm.value - 32));
    747    ma_mov(Imm32(0), dest.low);
    748  }
    749 }
    750 
    751 void MacroAssembler::lshift64(Register unmaskedShift, Register64 dest) {
    752  // dest.high = dest.high << shift | dest.low << shift - 32 | dest.low >> 32 -
    753  // shift Note: one of the two dest.low shift will always yield zero due to
    754  // negative shift.
    755 
    756  ScratchRegisterScope shift(*this);
    757  as_and(shift, unmaskedShift, Imm8(0x3f));
    758  as_mov(dest.high, lsl(dest.high, shift));
    759  as_sub(shift, shift, Imm8(32));
    760  as_orr(dest.high, dest.high, lsl(dest.low, shift));
    761  ma_neg(shift, shift);
    762  as_orr(dest.high, dest.high, lsr(dest.low, shift));
    763  as_and(shift, unmaskedShift, Imm8(0x3f));
    764  as_mov(dest.low, lsl(dest.low, shift));
    765 }
    766 
    767 void MacroAssembler::lshift32(Register src, Register dest) {
    768  ma_lsl(src, dest, dest);
    769 }
    770 
    771 void MacroAssembler::flexibleLshift32(Register src, Register dest) {
    772  ScratchRegisterScope scratch(*this);
    773  as_and(scratch, src, Imm8(0x1F));
    774  lshift32(scratch, dest);
    775 }
    776 
    777 void MacroAssembler::lshift32(Imm32 imm, Register dest) {
    778  lshiftPtr(imm, dest, dest);
    779 }
    780 
    781 void MacroAssembler::lshift32(Imm32 imm, Register src, Register dest) {
    782  lshiftPtr(imm, src, dest);
    783 }
    784 
    785 void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
    786  rshiftPtr(imm, dest, dest);
    787 }
    788 
    789 void MacroAssembler::rshiftPtr(Imm32 imm, Register src, Register dest) {
    790  MOZ_ASSERT(0 <= imm.value && imm.value < 32);
    791  if (imm.value) {
    792    ma_lsr(imm, src, dest);
    793  }
    794 }
    795 
    796 void MacroAssembler::rshiftPtr(Register src, Register dest) {
    797  ma_lsr(src, dest, dest);
    798 }
    799 
    800 void MacroAssembler::flexibleRshiftPtr(Register shift, Register srcDest) {
    801  flexibleRshift32(shift, srcDest);
    802 }
    803 
    804 void MacroAssembler::rshift32(Register src, Register dest) {
    805  ma_lsr(src, dest, dest);
    806 }
    807 
    808 void MacroAssembler::flexibleRshift32(Register src, Register dest) {
    809  ScratchRegisterScope scratch(*this);
    810  as_and(scratch, src, Imm8(0x1F));
    811  rshift32(scratch, dest);
    812 }
    813 
    814 void MacroAssembler::rshift32(Imm32 imm, Register dest) {
    815  rshiftPtr(imm, dest, dest);
    816 }
    817 
    818 void MacroAssembler::rshift32(Imm32 imm, Register src, Register dest) {
    819  rshiftPtr(imm, src, dest);
    820 }
    821 
    822 void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
    823  rshiftPtrArithmetic(imm, dest, dest);
    824 }
    825 
    826 void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register src,
    827                                         Register dest) {
    828  MOZ_ASSERT(0 <= imm.value && imm.value < 32);
    829  if (imm.value) {
    830    ma_asr(imm, src, dest);
    831  }
    832 }
    833 
    834 void MacroAssembler::rshiftPtrArithmetic(Register src, Register dest) {
    835  ma_asr(src, dest, dest);
    836 }
    837 
    838 void MacroAssembler::flexibleRshiftPtrArithmetic(Register shift,
    839                                                 Register srcDest) {
    840  flexibleRshift32Arithmetic(shift, srcDest);
    841 }
    842 
    843 void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
    844  MOZ_ASSERT(0 <= imm.value && imm.value < 64);
    845  if (!imm.value) {
    846    return;
    847  }
    848 
    849  if (imm.value < 32) {
    850    as_mov(dest.low, lsr(dest.low, imm.value));
    851    as_orr(dest.low, dest.low, lsl(dest.high, 32 - imm.value));
    852    as_mov(dest.high, asr(dest.high, imm.value));
    853  } else if (imm.value == 32) {
    854    as_mov(dest.low, O2Reg(dest.high));
    855    as_mov(dest.high, asr(dest.high, 31));
    856  } else {
    857    as_mov(dest.low, asr(dest.high, imm.value - 32));
    858    as_mov(dest.high, asr(dest.high, 31));
    859  }
    860 }
    861 
    862 void MacroAssembler::rshift64Arithmetic(Register unmaskedShift,
    863                                        Register64 dest) {
    864  Label proceed;
    865 
    866  // dest.low = dest.low >>> shift | dest.high <<< 32 - shift
    867  // if (shift - 32 >= 0)
    868  //   dest.low |= dest.high >>> shift - 32
    869  // Note: Negative shifts yield a zero as result, except for the signed
    870  //       right shift. Therefore we need to test for it and only do it if
    871  //       it isn't negative.
    872  ScratchRegisterScope shift(*this);
    873 
    874  as_and(shift, unmaskedShift, Imm8(0x3f));
    875  as_mov(dest.low, lsr(dest.low, shift));
    876  as_rsb(shift, shift, Imm8(32));
    877  as_orr(dest.low, dest.low, lsl(dest.high, shift));
    878  ma_neg(shift, shift, SetCC);
    879  ma_b(&proceed, Signed);
    880 
    881  as_orr(dest.low, dest.low, asr(dest.high, shift));
    882 
    883  bind(&proceed);
    884  as_and(shift, unmaskedShift, Imm8(0x3f));
    885  as_mov(dest.high, asr(dest.high, shift));
    886 }
    887 
    888 void MacroAssembler::rshift32Arithmetic(Register src, Register dest) {
    889  ma_asr(src, dest, dest);
    890 }
    891 
    892 void MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest) {
    893  rshiftPtrArithmetic(imm, dest, dest);
    894 }
    895 
    896 void MacroAssembler::rshift32Arithmetic(Imm32 imm, Register src,
    897                                        Register dest) {
    898  rshiftPtrArithmetic(imm, src, dest);
    899 }
    900 
    901 void MacroAssembler::flexibleRshift32Arithmetic(Register src, Register dest) {
    902  ScratchRegisterScope scratch(*this);
    903  as_and(scratch, src, Imm8(0x1F));
    904  rshift32Arithmetic(scratch, dest);
    905 }
    906 
    907 void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
    908  MOZ_ASSERT(0 <= imm.value && imm.value < 64);
    909  MOZ_ASSERT(0 <= imm.value && imm.value < 64);
    910  if (!imm.value) {
    911    return;
    912  }
    913 
    914  if (imm.value < 32) {
    915    as_mov(dest.low, lsr(dest.low, imm.value));
    916    as_orr(dest.low, dest.low, lsl(dest.high, 32 - imm.value));
    917    as_mov(dest.high, lsr(dest.high, imm.value));
    918  } else if (imm.value == 32) {
    919    ma_mov(dest.high, dest.low);
    920    ma_mov(Imm32(0), dest.high);
    921  } else {
    922    ma_lsr(Imm32(imm.value - 32), dest.high, dest.low);
    923    ma_mov(Imm32(0), dest.high);
    924  }
    925 }
    926 
    927 void MacroAssembler::rshift64(Register unmaskedShift, Register64 dest) {
    928  // dest.low = dest.low >> shift | dest.high >> shift - 32 | dest.high << 32 -
    929  // shift Note: one of the two dest.high shifts will always yield zero due to
    930  // negative shift.
    931 
    932  ScratchRegisterScope shift(*this);
    933  as_and(shift, unmaskedShift, Imm8(0x3f));
    934  as_mov(dest.low, lsr(dest.low, shift));
    935  as_sub(shift, shift, Imm8(32));
    936  as_orr(dest.low, dest.low, lsr(dest.high, shift));
    937  ma_neg(shift, shift);
    938  as_orr(dest.low, dest.low, lsl(dest.high, shift));
    939  as_and(shift, unmaskedShift, Imm8(0x3f));
    940  as_mov(dest.high, lsr(dest.high, shift));
    941 }
    942 
    943 // ===============================================================
    944 // Rotate functions
    945 void MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) {
    946  if (count.value) {
    947    ma_rol(count, input, dest);
    948  } else {
    949    ma_mov(input, dest);
    950  }
    951 }
    952 
    953 void MacroAssembler::rotateLeft(Register count, Register input, Register dest) {
    954  ScratchRegisterScope scratch(*this);
    955  ma_rol(count, input, dest, scratch);
    956 }
    957 
    958 void MacroAssembler::rotateLeft64(Imm32 count, Register64 input,
    959                                  Register64 dest, Register temp) {
    960  MOZ_ASSERT(temp == InvalidReg);
    961  MOZ_ASSERT(input.low != dest.high && input.high != dest.low);
    962 
    963  int32_t amount = count.value & 0x3f;
    964  if (amount > 32) {
    965    rotateRight64(Imm32(64 - amount), input, dest, temp);
    966  } else {
    967    ScratchRegisterScope scratch(*this);
    968    if (amount == 0) {
    969      ma_mov(input.low, dest.low);
    970      ma_mov(input.high, dest.high);
    971    } else if (amount == 32) {
    972      ma_mov(input.low, scratch);
    973      ma_mov(input.high, dest.low);
    974      ma_mov(scratch, dest.high);
    975    } else {
    976      MOZ_ASSERT(0 < amount && amount < 32);
    977      ma_mov(dest.high, scratch);
    978      as_mov(dest.high, lsl(dest.high, amount));
    979      as_orr(dest.high, dest.high, lsr(dest.low, 32 - amount));
    980      as_mov(dest.low, lsl(dest.low, amount));
    981      as_orr(dest.low, dest.low, lsr(scratch, 32 - amount));
    982    }
    983  }
    984 }
    985 
    986 void MacroAssembler::rotateLeft64(Register shift, Register64 src,
    987                                  Register64 dest, Register temp) {
    988  MOZ_ASSERT(shift != temp);
    989  MOZ_ASSERT(src == dest);
    990  MOZ_ASSERT(temp != src.low && temp != src.high);
    991  MOZ_ASSERT(shift != src.low && shift != src.high);
    992  MOZ_ASSERT(temp != InvalidReg);
    993 
    994  ScratchRegisterScope shift_value(*this);
    995  Label high, done;
    996 
    997  ma_mov(src.high, temp);
    998  as_and(shift_value, shift, Imm8(0x3f));
    999  as_cmp(shift_value, Imm8(32));
   1000  ma_b(&high, GreaterThanOrEqual);
   1001 
   1002  // high = high << shift | low >> 32 - shift
   1003  // low = low << shift | high >> 32 - shift
   1004  as_mov(dest.high, lsl(src.high, shift_value));
   1005  as_rsb(shift_value, shift_value, Imm8(32));
   1006  as_orr(dest.high, dest.high, lsr(src.low, shift_value));
   1007 
   1008  as_rsb(shift_value, shift_value, Imm8(32));
   1009  as_mov(dest.low, lsl(src.low, shift_value));
   1010  as_rsb(shift_value, shift_value, Imm8(32));
   1011  as_orr(dest.low, dest.low, lsr(temp, shift_value));
   1012 
   1013  ma_b(&done);
   1014 
   1015  // A 32 - 64 shift is a 0 - 32 shift in the other direction.
   1016  bind(&high);
   1017  as_rsb(shift_value, shift_value, Imm8(64));
   1018 
   1019  as_mov(dest.high, lsr(src.high, shift_value));
   1020  as_rsb(shift_value, shift_value, Imm8(32));
   1021  as_orr(dest.high, dest.high, lsl(src.low, shift_value));
   1022 
   1023  as_rsb(shift_value, shift_value, Imm8(32));
   1024  as_mov(dest.low, lsr(src.low, shift_value));
   1025  as_rsb(shift_value, shift_value, Imm8(32));
   1026  as_orr(dest.low, dest.low, lsl(temp, shift_value));
   1027 
   1028  bind(&done);
   1029 }
   1030 
   1031 void MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) {
   1032  if (count.value) {
   1033    ma_ror(count, input, dest);
   1034  } else {
   1035    ma_mov(input, dest);
   1036  }
   1037 }
   1038 
   1039 void MacroAssembler::rotateRight(Register count, Register input,
   1040                                 Register dest) {
   1041  ma_ror(count, input, dest);
   1042 }
   1043 
   1044 void MacroAssembler::rotateRight64(Imm32 count, Register64 input,
   1045                                   Register64 dest, Register temp) {
   1046  MOZ_ASSERT(temp == InvalidReg);
   1047  MOZ_ASSERT(input.low != dest.high && input.high != dest.low);
   1048 
   1049  int32_t amount = count.value & 0x3f;
   1050  if (amount > 32) {
   1051    rotateLeft64(Imm32(64 - amount), input, dest, temp);
   1052  } else {
   1053    ScratchRegisterScope scratch(*this);
   1054    if (amount == 0) {
   1055      ma_mov(input.low, dest.low);
   1056      ma_mov(input.high, dest.high);
   1057    } else if (amount == 32) {
   1058      ma_mov(input.low, scratch);
   1059      ma_mov(input.high, dest.low);
   1060      ma_mov(scratch, dest.high);
   1061    } else {
   1062      MOZ_ASSERT(0 < amount && amount < 32);
   1063      ma_mov(dest.high, scratch);
   1064      as_mov(dest.high, lsr(dest.high, amount));
   1065      as_orr(dest.high, dest.high, lsl(dest.low, 32 - amount));
   1066      as_mov(dest.low, lsr(dest.low, amount));
   1067      as_orr(dest.low, dest.low, lsl(scratch, 32 - amount));
   1068    }
   1069  }
   1070 }
   1071 
   1072 void MacroAssembler::rotateRight64(Register shift, Register64 src,
   1073                                   Register64 dest, Register temp) {
   1074  MOZ_ASSERT(shift != temp);
   1075  MOZ_ASSERT(src == dest);
   1076  MOZ_ASSERT(temp != src.low && temp != src.high);
   1077  MOZ_ASSERT(shift != src.low && shift != src.high);
   1078  MOZ_ASSERT(temp != InvalidReg);
   1079 
   1080  ScratchRegisterScope shift_value(*this);
   1081  Label high, done;
   1082 
   1083  ma_mov(src.high, temp);
   1084  as_and(shift_value, shift, Imm8(0x3f));
   1085  as_cmp(shift_value, Imm8(32));
   1086  ma_b(&high, GreaterThanOrEqual);
   1087 
   1088  // high = high >> shift | low << 32 - shift
   1089  // low = low >> shift | high << 32 - shift
   1090  as_mov(dest.high, lsr(src.high, shift_value));
   1091  as_rsb(shift_value, shift_value, Imm8(32));
   1092  as_orr(dest.high, dest.high, lsl(src.low, shift_value));
   1093 
   1094  as_rsb(shift_value, shift_value, Imm8(32));
   1095  as_mov(dest.low, lsr(src.low, shift_value));
   1096  as_rsb(shift_value, shift_value, Imm8(32));
   1097  as_orr(dest.low, dest.low, lsl(temp, shift_value));
   1098 
   1099  ma_b(&done);
   1100 
   1101  // A 32 - 64 shift is a 0 - 32 shift in the other direction.
   1102  bind(&high);
   1103  as_rsb(shift_value, shift_value, Imm8(64));
   1104 
   1105  as_mov(dest.high, lsl(src.high, shift_value));
   1106  as_rsb(shift_value, shift_value, Imm8(32));
   1107  as_orr(dest.high, dest.high, lsr(src.low, shift_value));
   1108 
   1109  as_rsb(shift_value, shift_value, Imm8(32));
   1110  as_mov(dest.low, lsl(src.low, shift_value));
   1111  as_rsb(shift_value, shift_value, Imm8(32));
   1112  as_orr(dest.low, dest.low, lsr(temp, shift_value));
   1113 
   1114  bind(&done);
   1115 }
   1116 
   1117 // ===============================================================
   1118 // Condition functions
   1119 
   1120 void MacroAssembler::cmp8Set(Condition cond, Address lhs, Imm32 rhs,
   1121                             Register dest) {
   1122  ScratchRegisterScope scratch(*this);
   1123  SecondScratchRegisterScope scratch2(*this);
   1124 
   1125  // Inlined calls to load8{Zero,Sign}Extend() and cmp32Set() to acquire
   1126  // exclusive access to scratch registers.
   1127 
   1128  bool isSigned;
   1129  Imm32 imm(0);
   1130  switch (cond) {
   1131    case Assembler::Equal:
   1132    case Assembler::NotEqual:
   1133    case Assembler::Above:
   1134    case Assembler::AboveOrEqual:
   1135    case Assembler::Below:
   1136    case Assembler::BelowOrEqual:
   1137      isSigned = false;
   1138      imm = Imm32(uint8_t(rhs.value));
   1139      break;
   1140 
   1141    case Assembler::GreaterThan:
   1142    case Assembler::GreaterThanOrEqual:
   1143    case Assembler::LessThan:
   1144    case Assembler::LessThanOrEqual:
   1145      isSigned = true;
   1146      imm = Imm32(int8_t(rhs.value));
   1147      break;
   1148 
   1149    default:
   1150      MOZ_CRASH("unexpected condition");
   1151  }
   1152 
   1153  ma_dataTransferN(IsLoad, 8, isSigned, lhs.base, Imm32(lhs.offset), scratch,
   1154                   scratch2);
   1155  ma_cmp(scratch, imm, scratch2);
   1156  emitSet(cond, dest);
   1157 }
   1158 
   1159 void MacroAssembler::cmp16Set(Condition cond, Address lhs, Imm32 rhs,
   1160                              Register dest) {
   1161  ScratchRegisterScope scratch(*this);
   1162  SecondScratchRegisterScope scratch2(*this);
   1163 
   1164  // Inlined calls to load16{Zero,Sign}Extend() and cmp32Set() to acquire
   1165  // exclusive access to scratch registers.
   1166 
   1167  bool isSigned;
   1168  Imm32 imm(0);
   1169  switch (cond) {
   1170    case Assembler::Equal:
   1171    case Assembler::NotEqual:
   1172    case Assembler::Above:
   1173    case Assembler::AboveOrEqual:
   1174    case Assembler::Below:
   1175    case Assembler::BelowOrEqual:
   1176      isSigned = false;
   1177      imm = Imm32(uint16_t(rhs.value));
   1178      break;
   1179 
   1180    case Assembler::GreaterThan:
   1181    case Assembler::GreaterThanOrEqual:
   1182    case Assembler::LessThan:
   1183    case Assembler::LessThanOrEqual:
   1184      isSigned = true;
   1185      imm = Imm32(int16_t(rhs.value));
   1186      break;
   1187 
   1188    default:
   1189      MOZ_CRASH("unexpected condition");
   1190  }
   1191 
   1192  ma_dataTransferN(IsLoad, 16, isSigned, lhs.base, Imm32(lhs.offset), scratch,
   1193                   scratch2);
   1194  ma_cmp(scratch, imm, scratch2);
   1195  emitSet(cond, dest);
   1196 }
   1197 
   1198 template <typename T1, typename T2>
   1199 void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
   1200  cmp32(lhs, rhs);
   1201  emitSet(cond, dest);
   1202 }
   1203 
   1204 void MacroAssembler::cmp64Set(Condition cond, Register64 lhs, Register64 rhs,
   1205                              Register dest) {
   1206  if (lhs.high == dest || lhs.low == dest || rhs.high == dest ||
   1207      rhs.low == dest) {
   1208    cmp64SetAliased(cond, lhs, rhs, dest);
   1209  } else {
   1210    cmp64SetNonAliased(cond, lhs, rhs, dest);
   1211  }
   1212 }
   1213 
   1214 void MacroAssembler::cmp64Set(Condition cond, Register64 lhs, Imm64 rhs,
   1215                              Register dest) {
   1216  if (rhs.value == 0 &&
   1217      (cond == Assembler::Equal || cond == Assembler::NotEqual)) {
   1218    ma_orr(lhs.low, lhs.high, dest);
   1219    cmp32Set(cond, dest, Imm32(0), dest);
   1220  } else if (lhs.high == dest || lhs.low == dest) {
   1221    cmp64SetAliased(cond, lhs, rhs, dest);
   1222  } else {
   1223    cmp64SetNonAliased(cond, lhs, rhs, dest);
   1224  }
   1225 }
   1226 
   1227 void MacroAssembler::cmp64Set(Condition cond, Address lhs, Register64 rhs,
   1228                              Register dest) {
   1229  if (lhs.base == dest || rhs.high == dest || rhs.low == dest) {
   1230    cmp64SetAliased(cond, lhs, rhs, dest);
   1231  } else {
   1232    cmp64SetNonAliased(cond, lhs, rhs, dest);
   1233  }
   1234 }
   1235 
   1236 void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
   1237                              Register dest) {
   1238  if (lhs.base == dest) {
   1239    cmp64SetAliased(cond, lhs, rhs, dest);
   1240  } else {
   1241    cmp64SetNonAliased(cond, lhs, rhs, dest);
   1242  }
   1243 }
   1244 
   1245 template <typename T1, typename T2>
   1246 void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
   1247  cmpPtr(lhs, rhs);
   1248  emitSet(cond, dest);
   1249 }
   1250 
   1251 // ===============================================================
   1252 // Bit counting functions
   1253 
   1254 void MacroAssembler::clz32(Register src, Register dest, bool knownNotZero) {
   1255  ma_clz(src, dest);
   1256 }
   1257 
   1258 void MacroAssembler::clz64(Register64 src, Register64 dest) {
   1259  ScratchRegisterScope scratch(*this);
   1260 
   1261  ma_clz(src.high, scratch);
   1262  as_cmp(scratch, Imm8(32));
   1263  ma_mov(scratch, dest.low, LeaveCC, NotEqual);
   1264  ma_clz(src.low, dest.low, Equal);
   1265  as_add(dest.low, dest.low, Imm8(32), LeaveCC, Equal);
   1266  ma_mov(Imm32(0), dest.high);
   1267 }
   1268 
   1269 void MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero) {
   1270  ScratchRegisterScope scratch(*this);
   1271  ma_ctz(src, dest, scratch);
   1272 }
   1273 
   1274 void MacroAssembler::ctz64(Register64 src, Register64 dest) {
   1275  Label done, high;
   1276  as_cmp(src.low, Imm8(0));
   1277  ma_b(&high, Equal);
   1278 
   1279  ctz32(src.low, dest.low, /* knownNotZero = */ true);
   1280  ma_b(&done);
   1281 
   1282  bind(&high);
   1283  ctz32(src.high, dest.low, /* knownNotZero = */ false);
   1284  as_add(dest.low, dest.low, Imm8(32));
   1285 
   1286  bind(&done);
   1287  ma_mov(Imm32(0), dest.high);
   1288 }
   1289 
   1290 void MacroAssembler::popcnt32(Register input, Register output, Register tmp) {
   1291  // Equivalent to GCC output of mozilla::CountPopulation32()
   1292 
   1293  ScratchRegisterScope scratch(*this);
   1294 
   1295  if (input != output) {
   1296    ma_mov(input, output);
   1297  }
   1298  as_mov(tmp, asr(output, 1));
   1299  ma_and(Imm32(0x55555555), tmp, scratch);
   1300  ma_sub(output, tmp, output);
   1301  as_mov(tmp, asr(output, 2));
   1302  ma_mov(Imm32(0x33333333), scratch);
   1303  ma_and(scratch, output);
   1304  ma_and(scratch, tmp);
   1305  ma_add(output, tmp, output);
   1306  as_add(output, output, lsr(output, 4));
   1307  ma_and(Imm32(0xF0F0F0F), output, scratch);
   1308  as_add(output, output, lsl(output, 8));
   1309  as_add(output, output, lsl(output, 16));
   1310  as_mov(output, asr(output, 24));
   1311 }
   1312 
   1313 void MacroAssembler::popcnt64(Register64 src, Register64 dest, Register tmp) {
   1314  MOZ_ASSERT(dest.low != tmp);
   1315  MOZ_ASSERT(dest.high != tmp);
   1316  MOZ_ASSERT(dest.low != dest.high);
   1317  // The source and destination can overlap. Therefore make sure we don't
   1318  // clobber the source before we have the data.
   1319  if (dest.low != src.high) {
   1320    popcnt32(src.low, dest.low, tmp);
   1321    popcnt32(src.high, dest.high, tmp);
   1322  } else {
   1323    MOZ_ASSERT(dest.high != src.high);
   1324    popcnt32(src.low, dest.high, tmp);
   1325    popcnt32(src.high, dest.low, tmp);
   1326  }
   1327  ma_add(dest.high, dest.low);
   1328  ma_mov(Imm32(0), dest.high);
   1329 }
   1330 
   1331 // ===============================================================
   1332 // Branch functions
   1333 
   1334 void MacroAssembler::branch8(Condition cond, const Address& lhs, Imm32 rhs,
   1335                             Label* label) {
   1336  ScratchRegisterScope scratch(*this);
   1337  SecondScratchRegisterScope scratch2(*this);
   1338 
   1339  // Inlined calls to load8{Zero,Sign}Extend() and branch32() to acquire
   1340  // exclusive access to scratch registers.
   1341 
   1342  bool isSigned;
   1343  Imm32 imm(0);
   1344  switch (cond) {
   1345    case Assembler::Equal:
   1346    case Assembler::NotEqual:
   1347    case Assembler::Above:
   1348    case Assembler::AboveOrEqual:
   1349    case Assembler::Below:
   1350    case Assembler::BelowOrEqual:
   1351      isSigned = false;
   1352      imm = Imm32(uint8_t(rhs.value));
   1353      break;
   1354 
   1355    case Assembler::GreaterThan:
   1356    case Assembler::GreaterThanOrEqual:
   1357    case Assembler::LessThan:
   1358    case Assembler::LessThanOrEqual:
   1359      isSigned = true;
   1360      imm = Imm32(int8_t(rhs.value));
   1361      break;
   1362 
   1363    default:
   1364      MOZ_CRASH("unexpected condition");
   1365  }
   1366 
   1367  ma_dataTransferN(IsLoad, 8, isSigned, lhs.base, Imm32(lhs.offset), scratch,
   1368                   scratch2);
   1369  ma_cmp(scratch, imm, scratch2);
   1370  ma_b(label, cond);
   1371 }
   1372 
   1373 void MacroAssembler::branch8(Condition cond, const BaseIndex& lhs, Register rhs,
   1374                             Label* label) {
   1375  ScratchRegisterScope scratch(*this);
   1376  SecondScratchRegisterScope scratch2(*this);
   1377 
   1378  // Inlined calls to load8{Zero,Sign}Extend() and branch32() to acquire
   1379  // exclusive access to scratch registers.
   1380 
   1381  bool isSigned;
   1382  switch (cond) {
   1383    case Assembler::Equal:
   1384    case Assembler::NotEqual:
   1385    case Assembler::Above:
   1386    case Assembler::AboveOrEqual:
   1387    case Assembler::Below:
   1388    case Assembler::BelowOrEqual:
   1389      isSigned = false;
   1390      break;
   1391 
   1392    case Assembler::GreaterThan:
   1393    case Assembler::GreaterThanOrEqual:
   1394    case Assembler::LessThan:
   1395    case Assembler::LessThanOrEqual:
   1396      isSigned = true;
   1397      break;
   1398 
   1399    default:
   1400      MOZ_CRASH("unexpected condition");
   1401  }
   1402 
   1403  if (isSigned) {
   1404    Register index = lhs.index;
   1405 
   1406    // ARMv7 does not have LSL on an index register with an extended load.
   1407    if (lhs.scale != TimesOne) {
   1408      ma_lsl(Imm32::ShiftOf(lhs.scale), index, scratch);
   1409      index = scratch;
   1410    }
   1411 
   1412    if (lhs.offset != 0) {
   1413      if (index != scratch) {
   1414        ma_mov(index, scratch);
   1415        index = scratch;
   1416      }
   1417      ma_add(Imm32(lhs.offset), index, scratch2);
   1418    }
   1419    ma_ldrsb(EDtrAddr(lhs.base, EDtrOffReg(index)), scratch);
   1420  } else {
   1421    Register base = lhs.base;
   1422    uint32_t scale = Imm32::ShiftOf(lhs.scale).value;
   1423 
   1424    if (lhs.offset == 0) {
   1425      ma_ldrb(DTRAddr(base, DtrRegImmShift(lhs.index, LSL, scale)), scratch);
   1426    } else {
   1427      ma_add(base, Imm32(lhs.offset), scratch, scratch2);
   1428      ma_ldrb(DTRAddr(scratch, DtrRegImmShift(lhs.index, LSL, scale)), scratch);
   1429    }
   1430  }
   1431 
   1432  ma_cmp(scratch, rhs);
   1433  ma_b(label, cond);
   1434 }
   1435 
   1436 void MacroAssembler::branch16(Condition cond, const Address& lhs, Imm32 rhs,
   1437                              Label* label) {
   1438  ScratchRegisterScope scratch(*this);
   1439  SecondScratchRegisterScope scratch2(*this);
   1440 
   1441  // Inlined calls to load16{Zero,Sign}Extend() and branch32() to acquire
   1442  // exclusive access to scratch registers.
   1443 
   1444  bool isSigned;
   1445  Imm32 imm(0);
   1446  switch (cond) {
   1447    case Assembler::Equal:
   1448    case Assembler::NotEqual:
   1449    case Assembler::Above:
   1450    case Assembler::AboveOrEqual:
   1451    case Assembler::Below:
   1452    case Assembler::BelowOrEqual:
   1453      isSigned = false;
   1454      imm = Imm32(uint16_t(rhs.value));
   1455      break;
   1456 
   1457    case Assembler::GreaterThan:
   1458    case Assembler::GreaterThanOrEqual:
   1459    case Assembler::LessThan:
   1460    case Assembler::LessThanOrEqual:
   1461      isSigned = true;
   1462      imm = Imm32(int16_t(rhs.value));
   1463      break;
   1464 
   1465    default:
   1466      MOZ_CRASH("unexpected condition");
   1467  }
   1468 
   1469  ma_dataTransferN(IsLoad, 16, isSigned, lhs.base, Imm32(lhs.offset), scratch,
   1470                   scratch2);
   1471  ma_cmp(scratch, imm, scratch2);
   1472  ma_b(label, cond);
   1473 }
   1474 
   1475 void MacroAssembler::branch32(Condition cond, Register lhs, Register rhs,
   1476                              Label* label) {
   1477  ma_cmp(lhs, rhs);
   1478  ma_b(label, cond);
   1479 }
   1480 
   1481 void MacroAssembler::branch32(Condition cond, Register lhs, Imm32 rhs,
   1482                              Label* label) {
   1483  ScratchRegisterScope scratch(*this);
   1484 
   1485  ma_cmp(lhs, rhs, scratch);
   1486  ma_b(label, cond);
   1487 }
   1488 
   1489 void MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs,
   1490                              Label* label) {
   1491  ScratchRegisterScope scratch(*this);
   1492  SecondScratchRegisterScope scratch2(*this);
   1493 
   1494  ma_ldr(lhs, scratch, scratch2);
   1495  ma_cmp(scratch, rhs);
   1496  ma_b(label, cond);
   1497 }
   1498 
   1499 void MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 rhs,
   1500                              Label* label) {
   1501  ScratchRegisterScope scratch(*this);
   1502  SecondScratchRegisterScope scratch2(*this);
   1503 
   1504  ma_ldr(lhs, scratch, scratch2);
   1505  ma_cmp(scratch, rhs, scratch2);
   1506  ma_b(label, cond);
   1507 }
   1508 
   1509 void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
   1510                              Register rhs, Label* label) {
   1511  ScratchRegisterScope scratch(*this);
   1512 
   1513  // Load into scratch.
   1514  movePtr(ImmWord(uintptr_t(lhs.addr)), scratch);
   1515  ma_ldr(DTRAddr(scratch, DtrOffImm(0)), scratch);
   1516 
   1517  ma_cmp(scratch, rhs);
   1518  ma_b(label, cond);
   1519 }
   1520 
   1521 void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
   1522                              Imm32 rhs, Label* label) {
   1523  ScratchRegisterScope scratch(*this);
   1524  SecondScratchRegisterScope scratch2(*this);
   1525 
   1526  // Load into scratch.
   1527  movePtr(ImmWord(uintptr_t(lhs.addr)), scratch);
   1528  ma_ldr(DTRAddr(scratch, DtrOffImm(0)), scratch);
   1529 
   1530  ma_cmp(scratch, rhs, scratch2);
   1531  ma_b(label, cond);
   1532 }
   1533 
   1534 void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
   1535                              Label* label) {
   1536  SecondScratchRegisterScope scratch2(*this);
   1537  {
   1538    ScratchRegisterScope scratch(*this);
   1539 
   1540    Register base = lhs.base;
   1541    uint32_t scale = Imm32::ShiftOf(lhs.scale).value;
   1542 
   1543    // Load lhs into scratch2.
   1544    if (lhs.offset != 0) {
   1545      ma_add(base, Imm32(lhs.offset), scratch, scratch2);
   1546      ma_ldr(DTRAddr(scratch, DtrRegImmShift(lhs.index, LSL, scale)), scratch2);
   1547    } else {
   1548      ma_ldr(DTRAddr(base, DtrRegImmShift(lhs.index, LSL, scale)), scratch2);
   1549    }
   1550  }
   1551  branch32(cond, scratch2, rhs, label);
   1552 }
   1553 
   1554 void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs,
   1555                              Register rhs, Label* label) {
   1556  SecondScratchRegisterScope scratch2(*this);
   1557  {
   1558    ScratchRegisterScope scratch(*this);
   1559 
   1560    Register base = lhs.base;
   1561    uint32_t scale = Imm32::ShiftOf(lhs.scale).value;
   1562 
   1563    // Load lhs into scratch2.
   1564    if (lhs.offset != 0) {
   1565      ma_add(base, Imm32(lhs.offset), scratch, scratch2);
   1566      ma_ldr(DTRAddr(scratch, DtrRegImmShift(lhs.index, LSL, scale)), scratch2);
   1567    } else {
   1568      ma_ldr(DTRAddr(base, DtrRegImmShift(lhs.index, LSL, scale)), scratch2);
   1569    }
   1570  }
   1571  branch32(cond, scratch2, rhs, label);
   1572 }
   1573 
   1574 void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs,
   1575                              Imm32 rhs, Label* label) {
   1576  ScratchRegisterScope scratch(*this);
   1577  SecondScratchRegisterScope scratch2(*this);
   1578 
   1579  movePtr(lhs, scratch);
   1580  ma_ldr(DTRAddr(scratch, DtrOffImm(0)), scratch);
   1581 
   1582  ma_cmp(scratch, rhs, scratch2);
   1583  ma_b(label, cond);
   1584 }
   1585 
   1586 void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
   1587                              Label* success, Label* fail) {
   1588  branch64Impl(cond, lhs, val, success, fail);
   1589 }
   1590 
   1591 void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
   1592                              Label* success, Label* fail) {
   1593  branch64Impl(cond, lhs, rhs, success, fail);
   1594 }
   1595 
   1596 void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
   1597                              Label* success, Label* fail) {
   1598  branch64Impl(cond, lhs, val, success, fail);
   1599 }
   1600 
   1601 void MacroAssembler::branch64(Condition cond, const Address& lhs,
   1602                              Register64 rhs, Label* success, Label* fail) {
   1603  branch64Impl(cond, lhs, rhs, success, fail);
   1604 }
   1605 
   1606 void MacroAssembler::branch64(Condition cond, const Address& lhs,
   1607                              const Address& rhs, Register scratch,
   1608                              Label* label) {
   1609  MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
   1610             "other condition codes not supported");
   1611  MOZ_ASSERT(lhs.base != scratch);
   1612  MOZ_ASSERT(rhs.base != scratch);
   1613 
   1614  Label done;
   1615 
   1616  load32(LowWord(rhs), scratch);
   1617  if (cond == Assembler::Equal) {
   1618    branch32(Assembler::NotEqual, LowWord(lhs), scratch, &done);
   1619  } else {
   1620    branch32(Assembler::NotEqual, LowWord(lhs), scratch, label);
   1621  }
   1622 
   1623  load32(HighWord(rhs), scratch);
   1624  branch32(cond, HighWord(lhs), scratch, label);
   1625 
   1626  bind(&done);
   1627 }
   1628 
   1629 void MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs,
   1630                               Label* label) {
   1631  branch32(cond, lhs, rhs, label);
   1632 }
   1633 
   1634 void MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs,
   1635                               Label* label) {
   1636  branch32(cond, lhs, rhs, label);
   1637 }
   1638 
   1639 void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs,
   1640                               Label* label) {
   1641  branchPtr(cond, lhs, ImmWord(uintptr_t(rhs.value)), label);
   1642 }
   1643 
   1644 void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
   1645                               Label* label) {
   1646  ScratchRegisterScope scratch(*this);
   1647  movePtr(rhs, scratch);
   1648  branchPtr(cond, lhs, scratch, label);
   1649 }
   1650 
   1651 void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs,
   1652                               Label* label) {
   1653  branch32(cond, lhs, Imm32(rhs.value), label);
   1654 }
   1655 
   1656 void MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs,
   1657                               Label* label) {
   1658  branch32(cond, lhs, rhs, label);
   1659 }
   1660 
   1661 void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
   1662                               Label* label) {
   1663  branchPtr(cond, lhs, ImmWord(uintptr_t(rhs.value)), label);
   1664 }
   1665 
   1666 void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
   1667                               Label* label) {
   1668  SecondScratchRegisterScope scratch2(*this);
   1669  loadPtr(lhs, scratch2);
   1670  branchPtr(cond, scratch2, rhs, label);
   1671 }
   1672 
   1673 void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
   1674                               Label* label) {
   1675  SecondScratchRegisterScope scratch2(*this);
   1676  loadPtr(lhs, scratch2);
   1677  branchPtr(cond, scratch2, rhs, label);
   1678 }
   1679 
   1680 void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
   1681                               Register rhs, Label* label) {
   1682  SecondScratchRegisterScope scratch2(*this);
   1683  loadPtr(lhs, scratch2);
   1684  branchPtr(cond, scratch2, rhs, label);
   1685 }
   1686 
   1687 void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
   1688                               ImmWord rhs, Label* label) {
   1689  SecondScratchRegisterScope scratch2(*this);
   1690  loadPtr(lhs, scratch2);
   1691  branchPtr(cond, scratch2, rhs, label);
   1692 }
   1693 
   1694 void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
   1695                               Register rhs, Label* label) {
   1696  SecondScratchRegisterScope scratch2(*this);
   1697  loadPtr(lhs, scratch2);
   1698  branchPtr(cond, scratch2, rhs, label);
   1699 }
   1700 
   1701 void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
   1702                               ImmWord rhs, Label* label) {
   1703  branch32(cond, lhs, Imm32(rhs.value), label);
   1704 }
   1705 
   1706 void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
   1707                               Register rhs, Label* label) {
   1708  branch32(cond, lhs, rhs, label);
   1709 }
   1710 
   1711 void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
   1712                                      Register rhs, Label* label) {
   1713  branchPtr(cond, lhs, rhs, label);
   1714 }
   1715 
   1716 void MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs,
   1717                                 FloatRegister rhs, Label* label) {
   1718  compareFloat(lhs, rhs);
   1719 
   1720  if (cond == DoubleNotEqual) {
   1721    // Force the unordered cases not to jump.
   1722    Label unordered;
   1723    ma_b(&unordered, VFP_Unordered);
   1724    ma_b(label, VFP_NotEqualOrUnordered);
   1725    bind(&unordered);
   1726    return;
   1727  }
   1728 
   1729  if (cond == DoubleEqualOrUnordered) {
   1730    ma_b(label, VFP_Unordered);
   1731    ma_b(label, VFP_Equal);
   1732    return;
   1733  }
   1734 
   1735  ma_b(label, ConditionFromDoubleCondition(cond));
   1736 }
   1737 
   1738 void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
   1739                                                         Register dest,
   1740                                                         Label* fail) {
   1741  branchTruncateFloat32ToInt32(src, dest, fail);
   1742 }
   1743 
   1744 void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
   1745                                                  Register dest, Label* fail) {
   1746  ScratchFloat32Scope scratchFloat32(*this);
   1747  ScratchRegisterScope scratch(*this);
   1748 
   1749  ma_vcvt_F32_I32(src, scratchFloat32.sintOverlay());
   1750  ma_vxfer(scratchFloat32, dest);
   1751  ma_cmp(dest, Imm32(0x7fffffff), scratch);
   1752  ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::NotEqual);
   1753  ma_b(fail, Assembler::Equal);
   1754 }
   1755 
   1756 void MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs,
   1757                                  FloatRegister rhs, Label* label) {
   1758  compareDouble(lhs, rhs);
   1759 
   1760  if (cond == DoubleNotEqual) {
   1761    // Force the unordered cases not to jump.
   1762    Label unordered;
   1763    ma_b(&unordered, VFP_Unordered);
   1764    ma_b(label, VFP_NotEqualOrUnordered);
   1765    bind(&unordered);
   1766    return;
   1767  }
   1768 
   1769  if (cond == DoubleEqualOrUnordered) {
   1770    ma_b(label, VFP_Unordered);
   1771    ma_b(label, VFP_Equal);
   1772    return;
   1773  }
   1774 
   1775  ma_b(label, ConditionFromDoubleCondition(cond));
   1776 }
   1777 
   1778 void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
   1779                                                        Register dest,
   1780                                                        Label* fail) {
   1781  branchTruncateDoubleToInt32(src, dest, fail);
   1782 }
   1783 
   1784 // There are two options for implementing branchTruncateDoubleToInt32:
   1785 //
   1786 // 1. Convert the floating point value to an integer, if it did not fit, then it
   1787 // was clamped to INT_MIN/INT_MAX, and we can test it. NOTE: if the value
   1788 // really was supposed to be INT_MAX / INT_MIN then it will be wrong.
   1789 //
   1790 // 2. Convert the floating point value to an integer, if it did not fit, then it
   1791 // set one or two bits in the fpcsr. Check those.
   1792 void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
   1793                                                 Register dest, Label* fail) {
   1794  ScratchDoubleScope scratchDouble(*this);
   1795  FloatRegister scratchSIntReg = scratchDouble.sintOverlay();
   1796  ScratchRegisterScope scratch(*this);
   1797 
   1798  ma_vcvt_F64_I32(src, scratchSIntReg);
   1799  ma_vxfer(scratchSIntReg, dest);
   1800  ma_cmp(dest, Imm32(0x7fffffff), scratch);
   1801  ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::NotEqual);
   1802  ma_b(fail, Assembler::Equal);
   1803 }
   1804 
   1805 void MacroAssembler::branchInt64NotInPtrRange(Register64 src, Label* label) {
   1806  // The high-word needs to be either all zero or all one, depending on the MSB
   1807  // of the low-word.
   1808  as_cmp(src.high, asr(src.low, 31));
   1809  ma_b(label, Assembler::NotEqual);
   1810 }
   1811 
   1812 void MacroAssembler::branchUInt64NotInPtrRange(Register64 src, Label* label) {
   1813  ScratchRegisterScope scratch(*this);
   1814 
   1815  // The low-word MSB and all bits in the high-word must be zero.
   1816  as_orr(scratch, src.high, asr(src.low, 31), SetCC);
   1817  ma_b(label, Assembler::NonZero);
   1818 }
   1819 
   1820 template <typename T>
   1821 void MacroAssembler::branchAdd32(Condition cond, T src, Register dest,
   1822                                 Label* label) {
   1823  add32(src, dest);
   1824  as_b(label, cond);
   1825 }
   1826 
   1827 template <typename T>
   1828 void MacroAssembler::branchSub32(Condition cond, T src, Register dest,
   1829                                 Label* label) {
   1830  sub32(src, dest);
   1831  j(cond, label);
   1832 }
   1833 
   1834 template <typename T>
   1835 void MacroAssembler::branchMul32(Condition cond, T src, Register dest,
   1836                                 Label* label) {
   1837  MOZ_ASSERT(cond == Assembler::Overflow);
   1838  ScratchRegisterScope scratch(*this);
   1839  Assembler::Condition overflow_cond =
   1840      ma_check_mul(src, dest, dest, scratch, cond);
   1841  j(overflow_cond, label);
   1842 }
   1843 
   1844 template <typename T>
   1845 void MacroAssembler::branchRshift32(Condition cond, T src, Register dest,
   1846                                    Label* label) {
   1847  MOZ_ASSERT(cond == Zero || cond == NonZero);
   1848  rshift32(src, dest);
   1849  branch32(cond == Zero ? Equal : NotEqual, dest, Imm32(0), label);
   1850 }
   1851 
   1852 void MacroAssembler::branchNeg32(Condition cond, Register reg, Label* label) {
   1853  MOZ_ASSERT(cond == Overflow);
   1854  neg32(reg);
   1855  j(cond, label);
   1856 }
   1857 
   1858 void MacroAssembler::branchAdd64(Condition cond, Imm64 imm, Register64 dest,
   1859                                 Label* label) {
   1860  ScratchRegisterScope scratch(*this);
   1861  ma_add(imm.low(), dest.low, scratch, SetCC);
   1862  ma_adc(imm.hi(), dest.high, scratch, SetCC);
   1863  j(cond, label);
   1864 }
   1865 
   1866 template <typename T>
   1867 void MacroAssembler::branchAddPtr(Condition cond, T src, Register dest,
   1868                                  Label* label) {
   1869  branchAdd32(cond, src, dest, label);
   1870 }
   1871 
   1872 template <typename T>
   1873 void MacroAssembler::branchSubPtr(Condition cond, T src, Register dest,
   1874                                  Label* label) {
   1875  branchSub32(cond, src, dest, label);
   1876 }
   1877 
   1878 void MacroAssembler::branchMulPtr(Condition cond, Register src, Register dest,
   1879                                  Label* label) {
   1880  branchMul32(cond, src, dest, label);
   1881 }
   1882 
   1883 void MacroAssembler::branchNegPtr(Condition cond, Register reg, Label* label) {
   1884  branchNeg32(cond, reg, label);
   1885 }
   1886 
   1887 void MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
   1888                                  Label* label) {
   1889  ScratchRegisterScope scratch(*this);
   1890  ma_sub(rhs, lhs, scratch, SetCC);
   1891  as_b(label, cond);
   1892 }
   1893 
   1894 void MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs,
   1895                                  Label* label) {
   1896  MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
   1897             cond == NotSigned);
   1898  // x86 likes test foo, foo rather than cmp foo, #0.
   1899  // Convert the former into the latter.
   1900  if (lhs == rhs && (cond == Zero || cond == NonZero)) {
   1901    as_cmp(lhs, Imm8(0));
   1902  } else {
   1903    ma_tst(lhs, rhs);
   1904  }
   1905  ma_b(label, cond);
   1906 }
   1907 
   1908 void MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs,
   1909                                  Label* label) {
   1910  MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
   1911             cond == NotSigned);
   1912  ScratchRegisterScope scratch(*this);
   1913  ma_tst(lhs, rhs, scratch);
   1914  ma_b(label, cond);
   1915 }
   1916 
   1917 void MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs,
   1918                                  Label* label) {
   1919  SecondScratchRegisterScope scratch2(*this);
   1920  load32(lhs, scratch2);
   1921  branchTest32(cond, scratch2, rhs, label);
   1922 }
   1923 
   1924 void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
   1925                                  Imm32 rhs, Label* label) {
   1926  SecondScratchRegisterScope scratch2(*this);
   1927  load32(lhs, scratch2);
   1928  branchTest32(cond, scratch2, rhs, label);
   1929 }
   1930 
   1931 void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs,
   1932                                   Label* label) {
   1933  branchTest32(cond, lhs, rhs, label);
   1934 }
   1935 
   1936 void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
   1937                                   Label* label) {
   1938  branchTest32(cond, lhs, rhs, label);
   1939 }
   1940 
   1941 void MacroAssembler::branchTestPtr(Condition cond, Register lhs, ImmWord rhs,
   1942                                   Label* label) {
   1943  branchTest32(cond, lhs, Imm32(rhs.value), label);
   1944 }
   1945 
   1946 void MacroAssembler::branchTestPtr(Condition cond, const Address& lhs,
   1947                                   Imm32 rhs, Label* label) {
   1948  branchTest32(cond, lhs, rhs, label);
   1949 }
   1950 
   1951 void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
   1952                                  Register64 rhs, Register temp, Label* success,
   1953                                  Label* fail) {
   1954  bool fallthrough = false;
   1955  Label fallthroughLabel;
   1956 
   1957  if (!fail) {
   1958    fail = &fallthroughLabel;
   1959    fallthrough = true;
   1960  }
   1961 
   1962  if (cond == Assembler::Zero || cond == Assembler::NonZero) {
   1963    if (lhs == rhs) {
   1964      ScratchRegisterScope scratch(*this);
   1965      ma_orr(lhs.low, lhs.high, scratch);
   1966      branchTest32(cond, scratch, scratch, success);
   1967    } else if (cond == Assembler::Zero) {
   1968      branchTest32(Assembler::NonZero, lhs.low, rhs.low, fail);
   1969      branchTest32(Assembler::Zero, lhs.high, rhs.high, success);
   1970    } else {
   1971      branchTest32(Assembler::NonZero, lhs.low, rhs.low, success);
   1972      branchTest32(Assembler::NonZero, lhs.high, rhs.high, success);
   1973    }
   1974  } else if (cond == Assembler::Signed || cond == Assembler::NotSigned) {
   1975    branchTest32(cond, lhs.high, rhs.high, success);
   1976  } else {
   1977    MOZ_CRASH("Unsupported condition");
   1978  }
   1979 
   1980  if (fallthrough) {
   1981    bind(fail);
   1982  } else {
   1983    jump(fail);
   1984  }
   1985 }
   1986 
   1987 void MacroAssembler::branchTest64(Condition cond, Register64 lhs, Imm64 rhs,
   1988                                  Label* success, Label* fail) {
   1989  bool fallthrough = false;
   1990  Label fallthroughLabel;
   1991 
   1992  if (!fail) {
   1993    fail = &fallthroughLabel;
   1994    fallthrough = true;
   1995  }
   1996 
   1997  if (cond == Assembler::Zero || cond == Assembler::NonZero) {
   1998    if (rhs.hi().value == 0) {
   1999      branchTest32(cond, lhs.low, rhs.low(), success);
   2000    } else if (rhs.low().value == 0) {
   2001      branchTest32(cond, lhs.high, rhs.hi(), success);
   2002    } else if (cond == Assembler::Zero) {
   2003      branchTest32(Assembler::NonZero, lhs.low, rhs.low(), fail);
   2004      branchTest32(Assembler::Zero, lhs.high, rhs.hi(), success);
   2005    } else {
   2006      branchTest32(Assembler::NonZero, lhs.low, rhs.low(), success);
   2007      branchTest32(Assembler::NonZero, lhs.high, rhs.hi(), success);
   2008    }
   2009  } else {
   2010    MOZ_CRASH("Unsupported condition");
   2011  }
   2012 
   2013  if (fallthrough) {
   2014    bind(fail);
   2015  } else {
   2016    jump(fail);
   2017  }
   2018 }
   2019 
   2020 void MacroAssembler::branchTestUndefined(Condition cond, Register tag,
   2021                                         Label* label) {
   2022  branchTestUndefinedImpl(cond, tag, label);
   2023 }
   2024 
   2025 void MacroAssembler::branchTestUndefined(Condition cond, const Address& address,
   2026                                         Label* label) {
   2027  branchTestUndefinedImpl(cond, address, label);
   2028 }
   2029 
   2030 void MacroAssembler::branchTestUndefined(Condition cond,
   2031                                         const BaseIndex& address,
   2032                                         Label* label) {
   2033  branchTestUndefinedImpl(cond, address, label);
   2034 }
   2035 
   2036 void MacroAssembler::branchTestUndefined(Condition cond,
   2037                                         const ValueOperand& value,
   2038                                         Label* label) {
   2039  branchTestUndefinedImpl(cond, value, label);
   2040 }
   2041 
   2042 template <typename T>
   2043 void MacroAssembler::branchTestUndefinedImpl(Condition cond, const T& t,
   2044                                             Label* label) {
   2045  Condition c = testUndefined(cond, t);
   2046  ma_b(label, c);
   2047 }
   2048 
   2049 void MacroAssembler::branchTestInt32(Condition cond, Register tag,
   2050                                     Label* label) {
   2051  branchTestInt32Impl(cond, tag, label);
   2052 }
   2053 
   2054 void MacroAssembler::branchTestInt32(Condition cond, const Address& address,
   2055                                     Label* label) {
   2056  branchTestInt32Impl(cond, address, label);
   2057 }
   2058 
   2059 void MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address,
   2060                                     Label* label) {
   2061  branchTestInt32Impl(cond, address, label);
   2062 }
   2063 
   2064 void MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value,
   2065                                     Label* label) {
   2066  branchTestInt32Impl(cond, value, label);
   2067 }
   2068 
   2069 template <typename T>
   2070 void MacroAssembler::branchTestInt32Impl(Condition cond, const T& t,
   2071                                         Label* label) {
   2072  Condition c = testInt32(cond, t);
   2073  ma_b(label, c);
   2074 }
   2075 
   2076 void MacroAssembler::branchTestInt32Truthy(bool truthy,
   2077                                           const ValueOperand& value,
   2078                                           Label* label) {
   2079  Condition c = testInt32Truthy(truthy, value);
   2080  ma_b(label, c);
   2081 }
   2082 
   2083 void MacroAssembler::branchTestDouble(Condition cond, Register tag,
   2084                                      Label* label) {
   2085  branchTestDoubleImpl(cond, tag, label);
   2086 }
   2087 
   2088 void MacroAssembler::branchTestDouble(Condition cond, const Address& address,
   2089                                      Label* label) {
   2090  branchTestDoubleImpl(cond, address, label);
   2091 }
   2092 
   2093 void MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address,
   2094                                      Label* label) {
   2095  branchTestDoubleImpl(cond, address, label);
   2096 }
   2097 
   2098 void MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value,
   2099                                      Label* label) {
   2100  branchTestDoubleImpl(cond, value, label);
   2101 }
   2102 
   2103 template <typename T>
   2104 void MacroAssembler::branchTestDoubleImpl(Condition cond, const T& t,
   2105                                          Label* label) {
   2106  Condition c = testDouble(cond, t);
   2107  ma_b(label, c);
   2108 }
   2109 
   2110 void MacroAssembler::branchTestDoubleTruthy(bool truthy, FloatRegister reg,
   2111                                            Label* label) {
   2112  Condition c = testDoubleTruthy(truthy, reg);
   2113  ma_b(label, c);
   2114 }
   2115 
   2116 void MacroAssembler::branchTestNumber(Condition cond, Register tag,
   2117                                      Label* label) {
   2118  branchTestNumberImpl(cond, tag, label);
   2119 }
   2120 
   2121 void MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value,
   2122                                      Label* label) {
   2123  branchTestNumberImpl(cond, value, label);
   2124 }
   2125 
   2126 template <typename T>
   2127 void MacroAssembler::branchTestNumberImpl(Condition cond, const T& t,
   2128                                          Label* label) {
   2129  cond = testNumber(cond, t);
   2130  ma_b(label, cond);
   2131 }
   2132 
   2133 void MacroAssembler::branchTestBoolean(Condition cond, Register tag,
   2134                                       Label* label) {
   2135  branchTestBooleanImpl(cond, tag, label);
   2136 }
   2137 
   2138 void MacroAssembler::branchTestBoolean(Condition cond, const Address& address,
   2139                                       Label* label) {
   2140  branchTestBooleanImpl(cond, address, label);
   2141 }
   2142 
   2143 void MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address,
   2144                                       Label* label) {
   2145  branchTestBooleanImpl(cond, address, label);
   2146 }
   2147 
   2148 void MacroAssembler::branchTestBoolean(Condition cond,
   2149                                       const ValueOperand& value,
   2150                                       Label* label) {
   2151  branchTestBooleanImpl(cond, value, label);
   2152 }
   2153 
   2154 template <typename T>
   2155 void MacroAssembler::branchTestBooleanImpl(Condition cond, const T& t,
   2156                                           Label* label) {
   2157  Condition c = testBoolean(cond, t);
   2158  ma_b(label, c);
   2159 }
   2160 
   2161 void MacroAssembler::branchTestBooleanTruthy(bool truthy,
   2162                                             const ValueOperand& value,
   2163                                             Label* label) {
   2164  Condition c = testBooleanTruthy(truthy, value);
   2165  ma_b(label, c);
   2166 }
   2167 
   2168 void MacroAssembler::branchTestString(Condition cond, Register tag,
   2169                                      Label* label) {
   2170  branchTestStringImpl(cond, tag, label);
   2171 }
   2172 
   2173 void MacroAssembler::branchTestString(Condition cond, const Address& address,
   2174                                      Label* label) {
   2175  branchTestStringImpl(cond, address, label);
   2176 }
   2177 
   2178 void MacroAssembler::branchTestString(Condition cond, const BaseIndex& address,
   2179                                      Label* label) {
   2180  branchTestStringImpl(cond, address, label);
   2181 }
   2182 
   2183 void MacroAssembler::branchTestString(Condition cond, const ValueOperand& value,
   2184                                      Label* label) {
   2185  branchTestStringImpl(cond, value, label);
   2186 }
   2187 
   2188 template <typename T>
   2189 void MacroAssembler::branchTestStringImpl(Condition cond, const T& t,
   2190                                          Label* label) {
   2191  Condition c = testString(cond, t);
   2192  ma_b(label, c);
   2193 }
   2194 
   2195 void MacroAssembler::branchTestStringTruthy(bool truthy,
   2196                                            const ValueOperand& value,
   2197                                            Label* label) {
   2198  Condition c = testStringTruthy(truthy, value);
   2199  ma_b(label, c);
   2200 }
   2201 
   2202 void MacroAssembler::branchTestSymbol(Condition cond, Register tag,
   2203                                      Label* label) {
   2204  branchTestSymbolImpl(cond, tag, label);
   2205 }
   2206 
   2207 void MacroAssembler::branchTestSymbol(Condition cond, const Address& address,
   2208                                      Label* label) {
   2209  branchTestSymbolImpl(cond, address, label);
   2210 }
   2211 
   2212 void MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address,
   2213                                      Label* label) {
   2214  branchTestSymbolImpl(cond, address, label);
   2215 }
   2216 
   2217 void MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value,
   2218                                      Label* label) {
   2219  branchTestSymbolImpl(cond, value, label);
   2220 }
   2221 
   2222 template <typename T>
   2223 void MacroAssembler::branchTestSymbolImpl(Condition cond, const T& t,
   2224                                          Label* label) {
   2225  Condition c = testSymbol(cond, t);
   2226  ma_b(label, c);
   2227 }
   2228 
   2229 void MacroAssembler::branchTestBigInt(Condition cond, Register tag,
   2230                                      Label* label) {
   2231  branchTestBigIntImpl(cond, tag, label);
   2232 }
   2233 
   2234 void MacroAssembler::branchTestBigInt(Condition cond, const Address& address,
   2235                                      Label* label) {
   2236  branchTestBigIntImpl(cond, address, label);
   2237 }
   2238 
   2239 void MacroAssembler::branchTestBigInt(Condition cond, const BaseIndex& address,
   2240                                      Label* label) {
   2241  branchTestBigIntImpl(cond, address, label);
   2242 }
   2243 
   2244 void MacroAssembler::branchTestBigInt(Condition cond, const ValueOperand& value,
   2245                                      Label* label) {
   2246  branchTestBigIntImpl(cond, value, label);
   2247 }
   2248 
   2249 template <typename T>
   2250 void MacroAssembler::branchTestBigIntImpl(Condition cond, const T& t,
   2251                                          Label* label) {
   2252  Condition c = testBigInt(cond, t);
   2253  ma_b(label, c);
   2254 }
   2255 
   2256 void MacroAssembler::branchTestBigIntTruthy(bool truthy,
   2257                                            const ValueOperand& value,
   2258                                            Label* label) {
   2259  Condition c = testBigIntTruthy(truthy, value);
   2260  ma_b(label, c);
   2261 }
   2262 
   2263 void MacroAssembler::branchTestNull(Condition cond, Register tag,
   2264                                    Label* label) {
   2265  branchTestNullImpl(cond, tag, label);
   2266 }
   2267 
   2268 void MacroAssembler::branchTestNull(Condition cond, const Address& address,
   2269                                    Label* label) {
   2270  branchTestNullImpl(cond, address, label);
   2271 }
   2272 
   2273 void MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address,
   2274                                    Label* label) {
   2275  branchTestNullImpl(cond, address, label);
   2276 }
   2277 
   2278 void MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value,
   2279                                    Label* label) {
   2280  branchTestNullImpl(cond, value, label);
   2281 }
   2282 
   2283 template <typename T>
   2284 void MacroAssembler::branchTestNullImpl(Condition cond, const T& t,
   2285                                        Label* label) {
   2286  Condition c = testNull(cond, t);
   2287  ma_b(label, c);
   2288 }
   2289 
   2290 void MacroAssembler::branchTestObject(Condition cond, Register tag,
   2291                                      Label* label) {
   2292  branchTestObjectImpl(cond, tag, label);
   2293 }
   2294 
   2295 void MacroAssembler::branchTestObject(Condition cond, const Address& address,
   2296                                      Label* label) {
   2297  branchTestObjectImpl(cond, address, label);
   2298 }
   2299 
   2300 void MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address,
   2301                                      Label* label) {
   2302  branchTestObjectImpl(cond, address, label);
   2303 }
   2304 
   2305 void MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value,
   2306                                      Label* label) {
   2307  branchTestObjectImpl(cond, value, label);
   2308 }
   2309 
   2310 template <typename T>
   2311 void MacroAssembler::branchTestObjectImpl(Condition cond, const T& t,
   2312                                          Label* label) {
   2313  Condition c = testObject(cond, t);
   2314  ma_b(label, c);
   2315 }
   2316 
   2317 void MacroAssembler::branchTestGCThing(Condition cond, const Address& address,
   2318                                       Label* label) {
   2319  branchTestGCThingImpl(cond, address, label);
   2320 }
   2321 
   2322 void MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address,
   2323                                       Label* label) {
   2324  branchTestGCThingImpl(cond, address, label);
   2325 }
   2326 
   2327 void MacroAssembler::branchTestGCThing(Condition cond,
   2328                                       const ValueOperand& value,
   2329                                       Label* label) {
   2330  branchTestGCThingImpl(cond, value, label);
   2331 }
   2332 
   2333 template <typename T>
   2334 void MacroAssembler::branchTestGCThingImpl(Condition cond, const T& t,
   2335                                           Label* label) {
   2336  Condition c = testGCThing(cond, t);
   2337  ma_b(label, c);
   2338 }
   2339 
   2340 void MacroAssembler::branchTestPrimitive(Condition cond, Register tag,
   2341                                         Label* label) {
   2342  branchTestPrimitiveImpl(cond, tag, label);
   2343 }
   2344 
   2345 void MacroAssembler::branchTestPrimitive(Condition cond,
   2346                                         const ValueOperand& value,
   2347                                         Label* label) {
   2348  branchTestPrimitiveImpl(cond, value, label);
   2349 }
   2350 
   2351 template <typename T>
   2352 void MacroAssembler::branchTestPrimitiveImpl(Condition cond, const T& t,
   2353                                             Label* label) {
   2354  Condition c = testPrimitive(cond, t);
   2355  ma_b(label, c);
   2356 }
   2357 
   2358 void MacroAssembler::branchTestMagic(Condition cond, Register tag,
   2359                                     Label* label) {
   2360  branchTestMagicImpl(cond, tag, label);
   2361 }
   2362 
   2363 void MacroAssembler::branchTestMagic(Condition cond, const Address& address,
   2364                                     Label* label) {
   2365  branchTestMagicImpl(cond, address, label);
   2366 }
   2367 
   2368 void MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address,
   2369                                     Label* label) {
   2370  branchTestMagicImpl(cond, address, label);
   2371 }
   2372 
   2373 void MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value,
   2374                                     Label* label) {
   2375  branchTestMagicImpl(cond, value, label);
   2376 }
   2377 
   2378 template <typename T>
   2379 void MacroAssembler::branchTestMagicImpl(Condition cond, const T& t,
   2380                                         Label* label) {
   2381  cond = testMagic(cond, t);
   2382  ma_b(label, cond);
   2383 }
   2384 
   2385 void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
   2386                                     JSWhyMagic why, Label* label) {
   2387  MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
   2388 
   2389  Label notMagic;
   2390  if (cond == Assembler::Equal) {
   2391    branchTestMagic(Assembler::NotEqual, valaddr, &notMagic);
   2392  } else {
   2393    branchTestMagic(Assembler::NotEqual, valaddr, label);
   2394  }
   2395 
   2396  branch32(cond, ToPayload(valaddr), Imm32(why), label);
   2397  bind(&notMagic);
   2398 }
   2399 
   2400 template <typename T>
   2401 void MacroAssembler::branchTestValue(Condition cond, const T& lhs,
   2402                                     const ValueOperand& rhs, Label* label) {
   2403  MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
   2404 
   2405  Label notSameValue;
   2406  if (cond == Assembler::Equal) {
   2407    branch32(Assembler::NotEqual, ToType(lhs), rhs.typeReg(), &notSameValue);
   2408  } else {
   2409    branch32(Assembler::NotEqual, ToType(lhs), rhs.typeReg(), label);
   2410  }
   2411 
   2412  branch32(cond, ToPayload(lhs), rhs.payloadReg(), label);
   2413  bind(&notSameValue);
   2414 }
   2415 
   2416 template <typename T>
   2417 void MacroAssembler::testNumberSet(Condition cond, const T& src,
   2418                                   Register dest) {
   2419  cond = testNumber(cond, src);
   2420  emitSet(cond, dest);
   2421 }
   2422 
   2423 template <typename T>
   2424 void MacroAssembler::testBooleanSet(Condition cond, const T& src,
   2425                                    Register dest) {
   2426  cond = testBoolean(cond, src);
   2427  emitSet(cond, dest);
   2428 }
   2429 
   2430 template <typename T>
   2431 void MacroAssembler::testStringSet(Condition cond, const T& src,
   2432                                   Register dest) {
   2433  cond = testString(cond, src);
   2434  emitSet(cond, dest);
   2435 }
   2436 
   2437 template <typename T>
   2438 void MacroAssembler::testSymbolSet(Condition cond, const T& src,
   2439                                   Register dest) {
   2440  cond = testSymbol(cond, src);
   2441  emitSet(cond, dest);
   2442 }
   2443 
   2444 template <typename T>
   2445 void MacroAssembler::testBigIntSet(Condition cond, const T& src,
   2446                                   Register dest) {
   2447  cond = testBigInt(cond, src);
   2448  emitSet(cond, dest);
   2449 }
   2450 
   2451 void MacroAssembler::branchToComputedAddress(const BaseIndex& addr) {
   2452  MOZ_ASSERT(
   2453      addr.offset == 0,
   2454      "NYI: offsets from pc should be shifted by the number of instructions.");
   2455 
   2456  Register base = addr.base;
   2457  uint32_t scale = Imm32::ShiftOf(addr.scale).value;
   2458 
   2459  ma_ldr(DTRAddr(base, DtrRegImmShift(addr.index, LSL, scale)), pc);
   2460 
   2461  if (base == pc) {
   2462    // When loading from pc, the pc is shifted to the next instruction, we
   2463    // add one extra instruction to accomodate for this shifted offset.
   2464    breakpoint();
   2465  }
   2466 }
   2467 
   2468 void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Imm32 rhs,
   2469                                 Register src, Register dest) {
   2470  cmp32(lhs, rhs);
   2471  ma_mov(src, dest, LeaveCC, cond);
   2472 }
   2473 
   2474 void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Register rhs,
   2475                                 Register src, Register dest) {
   2476  cmp32(lhs, rhs);
   2477  ma_mov(src, dest, LeaveCC, cond);
   2478 }
   2479 
   2480 void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
   2481                                  Register src, Register dest) {
   2482  cmp32(lhs, rhs);
   2483  ma_mov(src, dest, LeaveCC, cond);
   2484 }
   2485 
   2486 void MacroAssembler::cmp32Move32(Condition cond, Register lhs,
   2487                                 const Address& rhs, Register src,
   2488                                 Register dest) {
   2489  ScratchRegisterScope scratch(*this);
   2490  SecondScratchRegisterScope scratch2(*this);
   2491  ma_ldr(rhs, scratch, scratch2);
   2492  cmp32Move32(cond, lhs, scratch, src, dest);
   2493 }
   2494 
   2495 void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Imm32 rhs,
   2496                                   Register src, Register dest) {
   2497  cmp32MovePtr(cond, lhs, rhs, src, dest);
   2498 }
   2499 
   2500 void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
   2501                                   Register src, Register dest) {
   2502  cmp32Move32(cond, lhs, rhs, src, dest);
   2503 }
   2504 
   2505 void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
   2506                                   const Address& rhs, Register src,
   2507                                   Register dest) {
   2508  cmp32Move32(cond, lhs, rhs, src, dest);
   2509 }
   2510 
   2511 void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
   2512                                 const Address& rhs, const Address& src,
   2513                                 Register dest) {
   2514  // This is never used, but must be present to facilitate linking on arm.
   2515  MOZ_CRASH("No known use cases");
   2516 }
   2517 
   2518 void MacroAssembler::cmp32Load32(Condition cond, Register lhs, Register rhs,
   2519                                 const Address& src, Register dest) {
   2520  // This is never used, but must be present to facilitate linking on arm.
   2521  MOZ_CRASH("No known use cases");
   2522 }
   2523 
   2524 void MacroAssembler::cmp32Load32(Condition cond, Register lhs, Imm32 rhs,
   2525                                 const Address& src, Register dest) {
   2526  cmp32(lhs, rhs);
   2527  ScratchRegisterScope scratch(*this);
   2528  ma_ldr(src, dest, scratch, Offset, cond);
   2529 }
   2530 
   2531 void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
   2532                                  const Address& src, Register dest) {
   2533  cmp32(lhs, rhs);
   2534  ScratchRegisterScope scratch(*this);
   2535  ma_ldr(src, dest, scratch, Offset, cond);
   2536 }
   2537 
   2538 void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
   2539                                   Imm32 mask, const Address& src,
   2540                                   Register dest) {
   2541  MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
   2542  test32(addr, mask);
   2543  ScratchRegisterScope scratch(*this);
   2544  ma_ldr(src, dest, scratch, Offset, cond);
   2545 }
   2546 
   2547 void MacroAssembler::test32MovePtr(Condition cond, Register operand, Imm32 mask,
   2548                                   Register src, Register dest) {
   2549  MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
   2550  test32(operand, mask);
   2551  ma_mov(src, dest, LeaveCC, cond);
   2552 }
   2553 
   2554 void MacroAssembler::test32MovePtr(Condition cond, const Address& addr,
   2555                                   Imm32 mask, Register src, Register dest) {
   2556  MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
   2557  test32(addr, mask);
   2558  ma_mov(src, dest, LeaveCC, cond);
   2559 }
   2560 
   2561 void MacroAssembler::spectreMovePtr(Condition cond, Register src,
   2562                                    Register dest) {
   2563  ma_mov(src, dest, LeaveCC, cond);
   2564 }
   2565 
   2566 void MacroAssembler::spectreZeroRegister(Condition cond, Register,
   2567                                         Register dest) {
   2568  ma_mov(Imm32(0), dest, cond);
   2569 }
   2570 
   2571 void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
   2572                                          Register maybeScratch,
   2573                                          Label* failure) {
   2574  MOZ_ASSERT(length != maybeScratch);
   2575  MOZ_ASSERT(index != maybeScratch);
   2576 
   2577  branch32(Assembler::BelowOrEqual, length, index, failure);
   2578 
   2579  if (JitOptions.spectreIndexMasking) {
   2580    ma_mov(Imm32(0), index, Assembler::BelowOrEqual);
   2581  }
   2582 }
   2583 
   2584 void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
   2585                                          Register maybeScratch,
   2586                                          Label* failure) {
   2587  MOZ_ASSERT(index != length.base);
   2588  MOZ_ASSERT(length.base != maybeScratch);
   2589  MOZ_ASSERT(index != maybeScratch);
   2590 
   2591  branch32(Assembler::BelowOrEqual, length, index, failure);
   2592 
   2593  if (JitOptions.spectreIndexMasking) {
   2594    ma_mov(Imm32(0), index, Assembler::BelowOrEqual);
   2595  }
   2596 }
   2597 
   2598 void MacroAssembler::spectreBoundsCheckPtr(Register index, Register length,
   2599                                           Register maybeScratch,
   2600                                           Label* failure) {
   2601  spectreBoundsCheck32(index, length, maybeScratch, failure);
   2602 }
   2603 
   2604 void MacroAssembler::spectreBoundsCheckPtr(Register index,
   2605                                           const Address& length,
   2606                                           Register maybeScratch,
   2607                                           Label* failure) {
   2608  spectreBoundsCheck32(index, length, maybeScratch, failure);
   2609 }
   2610 
   2611 // ========================================================================
   2612 // Memory access primitives.
   2613 FaultingCodeOffset MacroAssembler::storeDouble(FloatRegister src,
   2614                                               const Address& addr) {
   2615  ScratchRegisterScope scratch(*this);
   2616  BufferOffset offset = ma_vstr(src, addr, scratch);
   2617  return FaultingCodeOffset(offset.getOffset());
   2618 }
   2619 FaultingCodeOffset MacroAssembler::storeDouble(FloatRegister src,
   2620                                               const BaseIndex& addr) {
   2621  ScratchRegisterScope scratch(*this);
   2622  SecondScratchRegisterScope scratch2(*this);
   2623  uint32_t scale = Imm32::ShiftOf(addr.scale).value;
   2624  BufferOffset offset = ma_vstr(src, addr.base, addr.index, scratch, scratch2,
   2625                                scale, addr.offset);
   2626  return FaultingCodeOffset(offset.getOffset());
   2627 }
   2628 
   2629 FaultingCodeOffset MacroAssembler::storeFloat32(FloatRegister src,
   2630                                                const Address& addr) {
   2631  ScratchRegisterScope scratch(*this);
   2632  BufferOffset offset = ma_vstr(src.asSingle(), addr, scratch);
   2633  return FaultingCodeOffset(offset.getOffset());
   2634 }
   2635 FaultingCodeOffset MacroAssembler::storeFloat32(FloatRegister src,
   2636                                                const BaseIndex& addr) {
   2637  ScratchRegisterScope scratch(*this);
   2638  SecondScratchRegisterScope scratch2(*this);
   2639  uint32_t scale = Imm32::ShiftOf(addr.scale).value;
   2640  BufferOffset offset = ma_vstr(src.asSingle(), addr.base, addr.index, scratch,
   2641                                scratch2, scale, addr.offset);
   2642  return FaultingCodeOffset(offset.getOffset());
   2643 }
   2644 
   2645 FaultingCodeOffset MacroAssembler::storeFloat16(FloatRegister src,
   2646                                                const Address& dest,
   2647                                                Register scratch) {
   2648  ma_vxfer(src, scratch);
   2649 
   2650  // store16 uses |strh|, which supports unaligned access.
   2651  return store16(scratch, dest);
   2652 }
   2653 FaultingCodeOffset MacroAssembler::storeFloat16(FloatRegister src,
   2654                                                const BaseIndex& dest,
   2655                                                Register scratch) {
   2656  ma_vxfer(src, scratch);
   2657 
   2658  // store16 uses |strh|, which supports unaligned access.
   2659  return store16(scratch, dest);
   2660 }
   2661 
   2662 void MacroAssembler::memoryBarrier(MemoryBarrier barrier) {
   2663  // On ARMv6 the optional argument (BarrierST, etc) is ignored.
   2664  if (barrier.isSyncStoreStore()) {
   2665    ma_dsb(BarrierST);
   2666  } else if (barrier.hasSync()) {
   2667    ma_dsb();
   2668  } else if (barrier.isStoreStore()) {
   2669    ma_dmb(BarrierST);
   2670  } else if (!barrier.isNone()) {
   2671    ma_dmb();
   2672  }
   2673 }
   2674 
   2675 // ===============================================================
   2676 // Clamping functions.
   2677 
   2678 void MacroAssembler::clampIntToUint8(Register reg) {
   2679  // Look at (reg >> 8) if it is 0, then reg shouldn't be clamped if it is
   2680  // <0, then we want to clamp to 0, otherwise, we wish to clamp to 255
   2681  ScratchRegisterScope scratch(*this);
   2682  as_mov(scratch, asr(reg, 8), SetCC);
   2683  ma_mov(Imm32(0xff), reg, NotEqual);
   2684  ma_mov(Imm32(0), reg, Signed);
   2685 }
   2686 
   2687 template <typename T>
   2688 void MacroAssemblerARMCompat::fallibleUnboxPtrImpl(const T& src, Register dest,
   2689                                                   JSValueType type,
   2690                                                   Label* fail) {
   2691  switch (type) {
   2692    case JSVAL_TYPE_OBJECT:
   2693      asMasm().branchTestObject(Assembler::NotEqual, src, fail);
   2694      break;
   2695    case JSVAL_TYPE_STRING:
   2696      asMasm().branchTestString(Assembler::NotEqual, src, fail);
   2697      break;
   2698    case JSVAL_TYPE_SYMBOL:
   2699      asMasm().branchTestSymbol(Assembler::NotEqual, src, fail);
   2700      break;
   2701    case JSVAL_TYPE_BIGINT:
   2702      asMasm().branchTestBigInt(Assembler::NotEqual, src, fail);
   2703      break;
   2704    default:
   2705      MOZ_CRASH("Unexpected type");
   2706  }
   2707  unboxNonDouble(src, dest, type);
   2708 }
   2709 
   2710 void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
   2711                                      JSValueType type, Label* fail) {
   2712  fallibleUnboxPtrImpl(src, dest, type, fail);
   2713 }
   2714 
   2715 void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
   2716                                      JSValueType type, Label* fail) {
   2717  fallibleUnboxPtrImpl(src, dest, type, fail);
   2718 }
   2719 
   2720 void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
   2721                                      JSValueType type, Label* fail) {
   2722  fallibleUnboxPtrImpl(src, dest, type, fail);
   2723 }
   2724 
   2725 //}}} check_macroassembler_style
   2726 // ===============================================================
   2727 
   2728 void MacroAssemblerARMCompat::incrementInt32Value(const Address& addr) {
   2729  asMasm().add32(Imm32(1), ToPayload(addr));
   2730 }
   2731 
   2732 template <typename T1, typename T2>
   2733 void MacroAssemblerARMCompat::cmp64SetAliased(Condition cond, T1 lhs, T2 rhs,
   2734                                              Register dest) {
   2735  auto& masm = asMasm();
   2736 
   2737  Label success, done;
   2738 
   2739  masm.branch64(cond, lhs, rhs, &success);
   2740  masm.move32(Imm32(0), dest);
   2741  masm.jump(&done);
   2742  masm.bind(&success);
   2743  masm.move32(Imm32(1), dest);
   2744  masm.bind(&done);
   2745 }
   2746 
   2747 template <typename T1, typename T2>
   2748 void MacroAssemblerARMCompat::cmp64SetNonAliased(Condition cond, T1 lhs, T2 rhs,
   2749                                                 Register dest) {
   2750  auto& masm = asMasm();
   2751 
   2752  Label done;
   2753 
   2754  masm.move32(Imm32(1), dest);
   2755  masm.branch64(cond, lhs, rhs, &done);
   2756  masm.move32(Imm32(0), dest);
   2757  masm.bind(&done);
   2758 }
   2759 
   2760 template <typename T1, typename T2>
   2761 void MacroAssemblerARMCompat::branch64Impl(Condition cond, T1 lhs, T2 rhs,
   2762                                           Label* success, Label* fail) {
   2763  auto& masm = asMasm();
   2764 
   2765  auto words = [](auto operand) {
   2766    using Operand = decltype(operand);
   2767    if constexpr (std::is_same_v<Operand, Imm64>) {
   2768      return std::pair{operand.hi(), operand.low()};
   2769    } else if constexpr (std::is_same_v<Operand, Register64>) {
   2770      return std::pair{operand.high, operand.low};
   2771    } else {
   2772      return std::pair{HighWord(operand), LowWord(operand)};
   2773    }
   2774  };
   2775 
   2776  auto [lhsHigh, lhsLow] = words(lhs);
   2777  auto [rhsHigh, rhsLow] = words(rhs);
   2778 
   2779  bool fallthrough = false;
   2780  Label fallthroughLabel;
   2781 
   2782  if (!fail) {
   2783    fail = &fallthroughLabel;
   2784    fallthrough = true;
   2785  }
   2786 
   2787  switch (cond) {
   2788    case Assembler::Equal:
   2789      masm.branch32(Assembler::NotEqual, lhsLow, rhsLow, fail);
   2790      masm.branch32(Assembler::Equal, lhsHigh, rhsHigh, success);
   2791      break;
   2792    case Assembler::NotEqual:
   2793      masm.branch32(Assembler::NotEqual, lhsLow, rhsLow, success);
   2794      masm.branch32(Assembler::NotEqual, lhsHigh, rhsHigh, success);
   2795      break;
   2796    case Assembler::LessThan:
   2797    case Assembler::LessThanOrEqual:
   2798    case Assembler::GreaterThan:
   2799    case Assembler::GreaterThanOrEqual:
   2800    case Assembler::Below:
   2801    case Assembler::BelowOrEqual:
   2802    case Assembler::Above:
   2803    case Assembler::AboveOrEqual: {
   2804      Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
   2805      Assembler::Condition cond2 =
   2806          Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
   2807      Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
   2808 
   2809      cmp32(lhsHigh, rhsHigh);
   2810      ma_b(success, cond1);
   2811      ma_b(fail, cond2);
   2812      cmp32(lhsLow, rhsLow);
   2813      ma_b(success, cond3);
   2814      break;
   2815    }
   2816    default:
   2817      MOZ_CRASH("Condition code not supported");
   2818      break;
   2819  }
   2820 
   2821  if (fallthrough) {
   2822    bind(fail);
   2823  } else {
   2824    jump(fail);
   2825  }
   2826 }
   2827 
   2828 }  // namespace jit
   2829 }  // namespace js
   2830 
   2831 #endif /* jit_arm_MacroAssembler_arm_inl_h */