tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

MacroAssembler-x64-inl.h (39419B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef jit_x64_MacroAssembler_x64_inl_h
      8 #define jit_x64_MacroAssembler_x64_inl_h
      9 
     10 #include "jit/x64/MacroAssembler-x64.h"
     11 
     12 #include "jit/x86-shared/MacroAssembler-x86-shared-inl.h"
     13 
     14 namespace js {
     15 namespace jit {
     16 
     17 //{{{ check_macroassembler_style
     18 // ===============================================================
     19 
     20 void MacroAssembler::move64(Imm64 imm, Register64 dest) {
     21  // Use mov instead of movq because it has special optimizations for imm == 0.
     22  mov(ImmWord(imm.value), dest.reg);
     23 }
     24 
     25 void MacroAssembler::move64(Register64 src, Register64 dest) {
     26  movq(src.reg, dest.reg);
     27 }
     28 
     29 void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
     30  vmovq(src, dest.reg);
     31 }
     32 
     33 void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
     34  vmovq(src.reg, dest);
     35 }
     36 
     37 void MacroAssembler::move64To32(Register64 src, Register dest) {
     38  movl(src.reg, dest);
     39 }
     40 
     41 void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
     42  movl(src, dest.reg);
     43 }
     44 
     45 void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
     46  movsbq(Operand(src), dest.reg);
     47 }
     48 
     49 void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
     50  movswq(Operand(src), dest.reg);
     51 }
     52 
     53 void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
     54  movslq(src, dest.reg);
     55 }
     56 
     57 void MacroAssembler::move8SignExtendToPtr(Register src, Register dest) {
     58  movsbq(Operand(src), dest);
     59 }
     60 
     61 void MacroAssembler::move16SignExtendToPtr(Register src, Register dest) {
     62  movswq(Operand(src), dest);
     63 }
     64 
     65 void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
     66  movslq(src, dest);
     67 }
     68 
     69 void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
     70  movl(src, dest);
     71 }
     72 
     73 // ===============================================================
     74 // Load instructions
     75 
     76 void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
     77  movslq(Operand(src), dest);
     78 }
     79 
     80 // ===============================================================
     81 // Logical instructions
     82 
     83 void MacroAssembler::notPtr(Register reg) { notq(reg); }
     84 
     85 void MacroAssembler::andPtr(Register src, Register dest) { andq(src, dest); }
     86 
     87 void MacroAssembler::andPtr(Imm32 imm, Register dest) {
     88  if (imm.value >= 0) {
     89    andl(imm, dest);
     90  } else {
     91    andq(imm, dest);
     92  }
     93 }
     94 
     95 void MacroAssembler::andPtr(Imm32 imm, Register src, Register dest) {
     96  if (src != dest) {
     97    movq(src, dest);
     98  }
     99  andPtr(imm, dest);
    100 }
    101 
    102 void MacroAssembler::and64(Imm64 imm, Register64 dest) {
    103  if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
    104    if (int32_t(imm.value) >= 0) {
    105      andl(Imm32(imm.value), dest.reg);
    106    } else {
    107      andq(Imm32(imm.value), dest.reg);
    108    }
    109  } else {
    110    ScratchRegisterScope scratch(*this);
    111    movq(ImmWord(uintptr_t(imm.value)), scratch);
    112    andq(scratch, dest.reg);
    113  }
    114 }
    115 
    116 void MacroAssembler::or64(Imm64 imm, Register64 dest) {
    117  if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
    118    orq(Imm32(imm.value), dest.reg);
    119  } else {
    120    ScratchRegisterScope scratch(*this);
    121    movq(ImmWord(uintptr_t(imm.value)), scratch);
    122    orq(scratch, dest.reg);
    123  }
    124 }
    125 
    126 void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
    127  if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
    128    xorq(Imm32(imm.value), dest.reg);
    129  } else {
    130    ScratchRegisterScope scratch(*this);
    131    movq(ImmWord(uintptr_t(imm.value)), scratch);
    132    xorq(scratch, dest.reg);
    133  }
    134 }
    135 
    136 void MacroAssembler::orPtr(Register src, Register dest) { orq(src, dest); }
    137 
    138 void MacroAssembler::orPtr(Imm32 imm, Register dest) { orq(imm, dest); }
    139 
    140 void MacroAssembler::orPtr(Imm32 imm, Register src, Register dest) {
    141  if (src != dest) {
    142    movq(src, dest);
    143  }
    144  orq(imm, dest);
    145 }
    146 
    147 void MacroAssembler::and64(Register64 src, Register64 dest) {
    148  andq(src.reg, dest.reg);
    149 }
    150 
    151 void MacroAssembler::or64(Register64 src, Register64 dest) {
    152  orq(src.reg, dest.reg);
    153 }
    154 
    155 void MacroAssembler::xor64(Register64 src, Register64 dest) {
    156  xorq(src.reg, dest.reg);
    157 }
    158 
    159 void MacroAssembler::xorPtr(Register src, Register dest) { xorq(src, dest); }
    160 
    161 void MacroAssembler::xorPtr(Imm32 imm, Register dest) { xorq(imm, dest); }
    162 
    163 void MacroAssembler::xorPtr(Imm32 imm, Register src, Register dest) {
    164  if (src != dest) {
    165    movq(src, dest);
    166  }
    167  xorq(imm, dest);
    168 }
    169 
    170 void MacroAssembler::and64(const Operand& src, Register64 dest) {
    171  andq(src, dest.reg);
    172 }
    173 
    174 void MacroAssembler::or64(const Operand& src, Register64 dest) {
    175  orq(src, dest.reg);
    176 }
    177 
    178 void MacroAssembler::xor64(const Operand& src, Register64 dest) {
    179  xorq(src, dest.reg);
    180 }
    181 
    182 // ===============================================================
    183 // Swap instructions
    184 
    185 void MacroAssembler::byteSwap64(Register64 reg) { bswapq(reg.reg); }
    186 
    187 // ===============================================================
    188 // Arithmetic functions
    189 
    190 void MacroAssembler::addPtr(Register src, Register dest) { addq(src, dest); }
    191 
    192 void MacroAssembler::addPtr(Imm32 imm, Register dest) { addq(imm, dest); }
    193 
    194 void MacroAssembler::addPtr(ImmWord imm, Register dest) {
    195  ScratchRegisterScope scratch(*this);
    196  MOZ_ASSERT(dest != scratch);
    197  if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
    198    addq(Imm32((int32_t)imm.value), dest);
    199  } else {
    200    mov(imm, scratch);
    201    addq(scratch, dest);
    202  }
    203 }
    204 
    205 void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
    206  addq(imm, Operand(dest));
    207 }
    208 
    209 void MacroAssembler::addPtr(Imm32 imm, const AbsoluteAddress& dest) {
    210  addq(imm, Operand(dest));
    211 }
    212 
    213 void MacroAssembler::addPtr(const Address& src, Register dest) {
    214  addq(Operand(src), dest);
    215 }
    216 
    217 void MacroAssembler::add64(const Operand& src, Register64 dest) {
    218  addq(src, dest.reg);
    219 }
    220 
    221 void MacroAssembler::add64(Register64 src, Register64 dest) {
    222  addq(src.reg, dest.reg);
    223 }
    224 
    225 void MacroAssembler::add64(Imm32 imm, Register64 dest) { addq(imm, dest.reg); }
    226 
    227 void MacroAssembler::add64(Imm64 imm, Register64 dest) {
    228  addPtr(ImmWord(imm.value), dest.reg);
    229 }
    230 
    231 CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
    232  moveStackPtrTo(dest);
    233  addqWithPatch(Imm32(0), dest);
    234  return CodeOffset(currentOffset());
    235 }
    236 
    237 void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
    238  patchAddq(offset, -imm.value);
    239 }
    240 
    241 void MacroAssembler::subPtr(Register src, Register dest) { subq(src, dest); }
    242 
    243 void MacroAssembler::subPtr(Register src, const Address& dest) {
    244  subq(src, Operand(dest));
    245 }
    246 
    247 void MacroAssembler::subPtr(Imm32 imm, Register dest) { subq(imm, dest); }
    248 
    249 void MacroAssembler::subPtr(ImmWord imm, Register dest) {
    250  ScratchRegisterScope scratch(*this);
    251  MOZ_ASSERT(dest != scratch);
    252  if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
    253    subq(Imm32((int32_t)imm.value), dest);
    254  } else {
    255    mov(imm, scratch);
    256    subq(scratch, dest);
    257  }
    258 }
    259 
    260 void MacroAssembler::subPtr(const Address& addr, Register dest) {
    261  subq(Operand(addr), dest);
    262 }
    263 
    264 void MacroAssembler::sub64(const Operand& src, Register64 dest) {
    265  subq(src, dest.reg);
    266 }
    267 
    268 void MacroAssembler::sub64(Register64 src, Register64 dest) {
    269  subq(src.reg, dest.reg);
    270 }
    271 
    272 void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
    273  subPtr(ImmWord(imm.value), dest.reg);
    274 }
    275 
    276 void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
    277  // To compute the unsigned multiplication using imulq, we have to ensure both
    278  // operands don't have any bits set in the high word.
    279 
    280  if (imm.value >= 0) {
    281    // Clear the high word of |src|.
    282    movl(src, src);
    283 
    284    // |imm| and |src| are both positive, so directly perform imulq.
    285    imulq(imm, src, dest);
    286  } else {
    287    // Store the low word of |src| into |dest|.
    288    movl(src, dest);
    289 
    290    // Compute the unsigned value of |imm| before performing imulq.
    291    movl(imm, ScratchReg);
    292    imulq(ScratchReg, dest);
    293  }
    294 
    295  // Move the high word into |dest|.
    296  shrq(Imm32(32), dest);
    297 }
    298 
    299 void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
    300  imulq(rhs, srcDest);
    301 }
    302 
    303 void MacroAssembler::mulPtr(ImmWord rhs, Register srcDest) {
    304  mul64(Imm64(rhs.value), Register64(srcDest));
    305 }
    306 
    307 void MacroAssembler::mul64(Imm64 imm, const Register64& dest,
    308                           const Register temp) {
    309  MOZ_ASSERT(temp == InvalidReg);
    310  mul64(imm, dest);
    311 }
    312 
    313 void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
    314  if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
    315    imulq(Imm32((int32_t)imm.value), dest.reg, dest.reg);
    316  } else {
    317    movq(ImmWord(uintptr_t(imm.value)), ScratchReg);
    318    imulq(ScratchReg, dest.reg);
    319  }
    320 }
    321 
    322 void MacroAssembler::mul64(const Register64& src, const Register64& dest,
    323                           const Register temp) {
    324  MOZ_ASSERT(temp == InvalidReg);
    325  mul64(Operand(src.reg), dest);
    326 }
    327 
    328 void MacroAssembler::mul64(const Operand& src, const Register64& dest) {
    329  imulq(src, dest.reg);
    330 }
    331 
    332 void MacroAssembler::mul64(const Operand& src, const Register64& dest,
    333                           const Register temp) {
    334  MOZ_ASSERT(temp == InvalidReg);
    335  mul64(src, dest);
    336 }
    337 
    338 void MacroAssembler::mulBy3(Register src, Register dest) {
    339  lea(Operand(src, src, TimesTwo), dest);
    340 }
    341 
    342 void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
    343                                  FloatRegister dest) {
    344  movq(imm, ScratchReg);
    345  vmulsd(Operand(ScratchReg, 0), dest, dest);
    346 }
    347 
    348 void MacroAssembler::inc64(AbsoluteAddress dest) {
    349  if (X86Encoding::IsAddressImmediate(dest.addr)) {
    350    addPtr(Imm32(1), dest);
    351  } else {
    352    ScratchRegisterScope scratch(*this);
    353    mov(ImmPtr(dest.addr), scratch);
    354    addPtr(Imm32(1), Address(scratch, 0));
    355  }
    356 }
    357 
    358 void MacroAssembler::neg64(Register64 reg) { negq(reg.reg); }
    359 
    360 void MacroAssembler::negPtr(Register reg) { negq(reg); }
    361 
    362 // ===============================================================
    363 // Shift functions
    364 
    365 void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
    366  MOZ_ASSERT(0 <= imm.value && imm.value < 64);
    367  shlq(imm, dest);
    368 }
    369 
    370 void MacroAssembler::lshiftPtr(Imm32 imm, Register src, Register dest) {
    371  MOZ_ASSERT(0 <= imm.value && imm.value < 64);
    372  if (src != dest) {
    373    movq(src, dest);
    374  }
    375  shlq(imm, dest);
    376 }
    377 
    378 void MacroAssembler::lshiftPtr(Register shift, Register srcDest) {
    379  if (Assembler::HasBMI2()) {
    380    shlxq(srcDest, shift, srcDest);
    381    return;
    382  }
    383  MOZ_ASSERT(shift == rcx);
    384  shlq_cl(srcDest);
    385 }
    386 
    387 void MacroAssembler::flexibleLshiftPtr(Register shift, Register srcDest) {
    388  if (HasBMI2()) {
    389    shlxq(srcDest, shift, srcDest);
    390    return;
    391  }
    392  if (shift == rcx) {
    393    shlq_cl(srcDest);
    394  } else {
    395    // Shift amount must be in rcx.
    396    xchg(shift, rcx);
    397    shlq_cl(shift == srcDest ? rcx : srcDest == rcx ? shift : srcDest);
    398    xchg(shift, rcx);
    399  }
    400 }
    401 
    402 void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
    403  MOZ_ASSERT(0 <= imm.value && imm.value < 64);
    404  lshiftPtr(imm, dest.reg);
    405 }
    406 
    407 void MacroAssembler::lshift64(Register shift, Register64 srcDest) {
    408  if (Assembler::HasBMI2()) {
    409    shlxq(srcDest.reg, shift, srcDest.reg);
    410    return;
    411  }
    412  MOZ_ASSERT(shift == rcx);
    413  shlq_cl(srcDest.reg);
    414 }
    415 
    416 void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
    417  MOZ_ASSERT(0 <= imm.value && imm.value < 64);
    418  shrq(imm, dest);
    419 }
    420 
    421 void MacroAssembler::rshiftPtr(Imm32 imm, Register src, Register dest) {
    422  MOZ_ASSERT(0 <= imm.value && imm.value < 64);
    423  if (src != dest) {
    424    movq(src, dest);
    425  }
    426  shrq(imm, dest);
    427 }
    428 
    429 void MacroAssembler::rshiftPtr(Register shift, Register srcDest) {
    430  if (Assembler::HasBMI2()) {
    431    shrxq(srcDest, shift, srcDest);
    432    return;
    433  }
    434  MOZ_ASSERT(shift == rcx);
    435  shrq_cl(srcDest);
    436 }
    437 
    438 void MacroAssembler::flexibleRshiftPtr(Register shift, Register srcDest) {
    439  if (HasBMI2()) {
    440    shrxq(srcDest, shift, srcDest);
    441    return;
    442  }
    443  if (shift == rcx) {
    444    shrq_cl(srcDest);
    445  } else {
    446    // Shift amount must be in rcx.
    447    xchg(shift, rcx);
    448    shrq_cl(shift == srcDest ? rcx : srcDest == rcx ? shift : srcDest);
    449    xchg(shift, rcx);
    450  }
    451 }
    452 
    453 void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
    454  rshiftPtr(imm, dest.reg);
    455 }
    456 
    457 void MacroAssembler::rshift64(Register shift, Register64 srcDest) {
    458  if (Assembler::HasBMI2()) {
    459    shrxq(srcDest.reg, shift, srcDest.reg);
    460    return;
    461  }
    462  MOZ_ASSERT(shift == rcx);
    463  shrq_cl(srcDest.reg);
    464 }
    465 
    466 void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
    467  MOZ_ASSERT(0 <= imm.value && imm.value < 64);
    468  sarq(imm, dest);
    469 }
    470 
    471 void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register src,
    472                                         Register dest) {
    473  MOZ_ASSERT(0 <= imm.value && imm.value < 64);
    474  if (src != dest) {
    475    movq(src, dest);
    476  }
    477  sarq(imm, dest);
    478 }
    479 
    480 void MacroAssembler::rshiftPtrArithmetic(Register shift, Register srcDest) {
    481  if (Assembler::HasBMI2()) {
    482    sarxq(srcDest, shift, srcDest);
    483    return;
    484  }
    485  MOZ_ASSERT(shift == rcx);
    486  sarq_cl(srcDest);
    487 }
    488 
    489 void MacroAssembler::flexibleRshiftPtrArithmetic(Register shift,
    490                                                 Register srcDest) {
    491  if (HasBMI2()) {
    492    sarxq(srcDest, shift, srcDest);
    493    return;
    494  }
    495  if (shift == rcx) {
    496    sarq_cl(srcDest);
    497  } else {
    498    // Shift amount must be in rcx.
    499    xchg(shift, rcx);
    500    sarq_cl(shift == srcDest ? rcx : srcDest == rcx ? shift : srcDest);
    501    xchg(shift, rcx);
    502  }
    503 }
    504 
    505 void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
    506  MOZ_ASSERT(0 <= imm.value && imm.value < 64);
    507  rshiftPtrArithmetic(imm, dest.reg);
    508 }
    509 
    510 void MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest) {
    511  if (Assembler::HasBMI2()) {
    512    sarxq(srcDest.reg, shift, srcDest.reg);
    513    return;
    514  }
    515  MOZ_ASSERT(shift == rcx);
    516  sarq_cl(srcDest.reg);
    517 }
    518 
    519 // ===============================================================
    520 // Rotation functions
    521 
    522 void MacroAssembler::rotateLeft64(Register count, Register64 src,
    523                                  Register64 dest) {
    524  MOZ_ASSERT(src == dest, "defineReuseInput");
    525  MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
    526 
    527  rolq_cl(dest.reg);
    528 }
    529 
    530 void MacroAssembler::rotateLeft64(Register count, Register64 src,
    531                                  Register64 dest, Register temp) {
    532  MOZ_ASSERT(temp == InvalidReg);
    533  rotateLeft64(count, src, dest);
    534 }
    535 
    536 void MacroAssembler::rotateRight64(Register count, Register64 src,
    537                                   Register64 dest) {
    538  MOZ_ASSERT(src == dest, "defineReuseInput");
    539  MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
    540 
    541  rorq_cl(dest.reg);
    542 }
    543 
    544 void MacroAssembler::rotateRight64(Register count, Register64 src,
    545                                   Register64 dest, Register temp) {
    546  MOZ_ASSERT(temp == InvalidReg);
    547  rotateRight64(count, src, dest);
    548 }
    549 
    550 void MacroAssembler::rotateLeft64(Imm32 count, Register64 src,
    551                                  Register64 dest) {
    552  MOZ_ASSERT(src == dest, "defineReuseInput");
    553  rolq(count, dest.reg);
    554 }
    555 
    556 void MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest,
    557                                  Register temp) {
    558  MOZ_ASSERT(temp == InvalidReg);
    559  rotateLeft64(count, src, dest);
    560 }
    561 
    562 void MacroAssembler::rotateRight64(Imm32 count, Register64 src,
    563                                   Register64 dest) {
    564  MOZ_ASSERT(src == dest, "defineReuseInput");
    565  rorq(count, dest.reg);
    566 }
    567 
    568 void MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest,
    569                                   Register temp) {
    570  MOZ_ASSERT(temp == InvalidReg);
    571  rotateRight64(count, src, dest);
    572 }
    573 
    574 // ===============================================================
    575 // Condition functions
    576 
    577 void MacroAssembler::cmp64Set(Condition cond, Register64 lhs, Register64 rhs,
    578                              Register dest) {
    579  cmpPtrSet(cond, lhs.reg, rhs.reg, dest);
    580 }
    581 
    582 void MacroAssembler::cmp64Set(Condition cond, Register64 lhs, Imm64 rhs,
    583                              Register dest) {
    584  cmpPtrSet(cond, lhs.reg, ImmWord(static_cast<uintptr_t>(rhs.value)), dest);
    585 }
    586 
    587 void MacroAssembler::cmp64Set(Condition cond, Address lhs, Register64 rhs,
    588                              Register dest) {
    589  cmpPtrSet(cond, lhs, rhs.reg, dest);
    590 }
    591 
    592 void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
    593                              Register dest) {
    594  cmpPtrSet(cond, lhs, ImmWord(static_cast<uintptr_t>(rhs.value)), dest);
    595 }
    596 
    597 template <typename T1, typename T2>
    598 void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
    599  bool destIsZero = maybeEmitSetZeroByteRegister(lhs, rhs, dest);
    600  cmpPtr(lhs, rhs);
    601  emitSet(cond, dest, destIsZero);
    602 }
    603 
    604 // ===============================================================
    605 // Bit counting functions
    606 
    607 void MacroAssembler::clz64(Register64 src, Register64 dest) {
    608  if (AssemblerX86Shared::HasLZCNT()) {
    609    lzcntq(src.reg, dest.reg);
    610    return;
    611  }
    612 
    613  Label nonzero;
    614  bsrq(src.reg, dest.reg);
    615  j(Assembler::NonZero, &nonzero);
    616  movq(ImmWord(0x7F), dest.reg);
    617  bind(&nonzero);
    618  xorq(Imm32(0x3F), dest.reg);
    619 }
    620 
    621 void MacroAssembler::ctz64(Register64 src, Register64 dest) {
    622  if (AssemblerX86Shared::HasBMI1()) {
    623    tzcntq(src.reg, dest.reg);
    624    return;
    625  }
    626 
    627  Label nonzero;
    628  bsfq(src.reg, dest.reg);
    629  j(Assembler::NonZero, &nonzero);
    630  movq(ImmWord(64), dest.reg);
    631  bind(&nonzero);
    632 }
    633 
    634 void MacroAssembler::popcnt64(Register64 src64, Register64 dest64,
    635                              Register tmp) {
    636  Register src = src64.reg;
    637  Register dest = dest64.reg;
    638 
    639  if (AssemblerX86Shared::HasPOPCNT()) {
    640    popcntq(src, dest);
    641    return;
    642  }
    643 
    644  MOZ_ASSERT(tmp != InvalidReg);
    645 
    646  if (src != dest) {
    647    movq(src, dest);
    648  }
    649 
    650  MOZ_ASSERT(tmp != dest);
    651 
    652  ScratchRegisterScope scratch(*this);
    653 
    654  // Equivalent to mozilla::CountPopulation32, adapted for 64 bits.
    655  // x -= (x >> 1) & m1;
    656  movq(src, tmp);
    657  movq(ImmWord(0x5555555555555555), scratch);
    658  shrq(Imm32(1), tmp);
    659  andq(scratch, tmp);
    660  subq(tmp, dest);
    661 
    662  // x = (x & m2) + ((x >> 2) & m2);
    663  movq(dest, tmp);
    664  movq(ImmWord(0x3333333333333333), scratch);
    665  andq(scratch, dest);
    666  shrq(Imm32(2), tmp);
    667  andq(scratch, tmp);
    668  addq(tmp, dest);
    669 
    670  // x = (x + (x >> 4)) & m4;
    671  movq(dest, tmp);
    672  movq(ImmWord(0x0f0f0f0f0f0f0f0f), scratch);
    673  shrq(Imm32(4), tmp);
    674  addq(tmp, dest);
    675  andq(scratch, dest);
    676 
    677  // (x * h01) >> 56
    678  movq(ImmWord(0x0101010101010101), scratch);
    679  imulq(scratch, dest);
    680  shrq(Imm32(56), dest);
    681 }
    682 
    683 // ===============================================================
    684 // Branch functions
    685 
    686 void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
    687                              Register rhs, Label* label) {
    688  if (X86Encoding::IsAddressImmediate(lhs.addr)) {
    689    branch32(cond, Operand(lhs), rhs, label);
    690  } else {
    691    ScratchRegisterScope scratch(*this);
    692    mov(ImmPtr(lhs.addr), scratch);
    693    branch32(cond, Address(scratch, 0), rhs, label);
    694  }
    695 }
    696 void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
    697                              Imm32 rhs, Label* label) {
    698  if (X86Encoding::IsAddressImmediate(lhs.addr)) {
    699    branch32(cond, Operand(lhs), rhs, label);
    700  } else {
    701    ScratchRegisterScope scratch(*this);
    702    mov(ImmPtr(lhs.addr), scratch);
    703    branch32(cond, Address(scratch, 0), rhs, label);
    704  }
    705 }
    706 
    707 void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs,
    708                              Imm32 rhs, Label* label) {
    709  ScratchRegisterScope scratch(*this);
    710  mov(lhs, scratch);
    711  branch32(cond, Address(scratch, 0), rhs, label);
    712 }
    713 
    714 void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
    715                              Label* success, Label* fail) {
    716  MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
    717                 cond == Assembler::LessThan ||
    718                 cond == Assembler::LessThanOrEqual ||
    719                 cond == Assembler::GreaterThan ||
    720                 cond == Assembler::GreaterThanOrEqual ||
    721                 cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
    722                 cond == Assembler::Above || cond == Assembler::AboveOrEqual,
    723             "other condition codes not supported");
    724 
    725  branchPtr(cond, lhs.reg, ImmWord(val.value), success);
    726  if (fail) {
    727    jump(fail);
    728  }
    729 }
    730 
    731 void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
    732                              Label* success, Label* fail) {
    733  MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
    734                 cond == Assembler::LessThan ||
    735                 cond == Assembler::LessThanOrEqual ||
    736                 cond == Assembler::GreaterThan ||
    737                 cond == Assembler::GreaterThanOrEqual ||
    738                 cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
    739                 cond == Assembler::Above || cond == Assembler::AboveOrEqual,
    740             "other condition codes not supported");
    741 
    742  branchPtr(cond, lhs.reg, rhs.reg, success);
    743  if (fail) {
    744    jump(fail);
    745  }
    746 }
    747 
    748 void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
    749                              Label* success, Label* fail) {
    750  MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
    751                 cond == Assembler::LessThan ||
    752                 cond == Assembler::LessThanOrEqual ||
    753                 cond == Assembler::GreaterThan ||
    754                 cond == Assembler::GreaterThanOrEqual ||
    755                 cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
    756                 cond == Assembler::Above || cond == Assembler::AboveOrEqual,
    757             "other condition codes not supported");
    758 
    759  branchPtr(cond, lhs, ImmWord(val.value), success);
    760  if (fail) {
    761    jump(fail);
    762  }
    763 }
    764 
    765 void MacroAssembler::branch64(Condition cond, const Address& lhs,
    766                              Register64 rhs, Label* success, Label* fail) {
    767  MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
    768                 cond == Assembler::LessThan ||
    769                 cond == Assembler::LessThanOrEqual ||
    770                 cond == Assembler::GreaterThan ||
    771                 cond == Assembler::GreaterThanOrEqual ||
    772                 cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
    773                 cond == Assembler::Above || cond == Assembler::AboveOrEqual,
    774             "other condition codes not supported");
    775 
    776  branchPtr(cond, lhs, rhs.reg, success);
    777  if (fail) {
    778    jump(fail);
    779  }
    780 }
    781 
    782 void MacroAssembler::branch64(Condition cond, const Address& lhs,
    783                              const Address& rhs, Register scratch,
    784                              Label* label) {
    785  MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
    786             "other condition codes not supported");
    787  MOZ_ASSERT(lhs.base != scratch);
    788  MOZ_ASSERT(rhs.base != scratch);
    789 
    790  loadPtr(rhs, scratch);
    791  branchPtr(cond, lhs, scratch, label);
    792 }
    793 
    794 void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
    795                               Register rhs, Label* label) {
    796  ScratchRegisterScope scratch(*this);
    797  MOZ_ASSERT(rhs != scratch);
    798  if (X86Encoding::IsAddressImmediate(lhs.addr)) {
    799    branchPtrImpl(cond, Operand(lhs), rhs, label);
    800  } else {
    801    mov(ImmPtr(lhs.addr), scratch);
    802    branchPtrImpl(cond, Operand(scratch, 0x0), rhs, label);
    803  }
    804 }
    805 
    806 void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
    807                               ImmWord rhs, Label* label) {
    808  if (X86Encoding::IsAddressImmediate(lhs.addr)) {
    809    branchPtrImpl(cond, Operand(lhs), rhs, label);
    810  } else {
    811    ScratchRegisterScope scratch(*this);
    812    mov(ImmPtr(lhs.addr), scratch);
    813    branchPtrImpl(cond, Operand(scratch, 0x0), rhs, label);
    814  }
    815 }
    816 
    817 void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
    818                               Register rhs, Label* label) {
    819  ScratchRegisterScope scratch(*this);
    820  MOZ_ASSERT(rhs != scratch);
    821  mov(lhs, scratch);
    822  branchPtrImpl(cond, Operand(scratch, 0x0), rhs, label);
    823 }
    824 
    825 void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
    826                                      Register rhs, Label* label) {
    827  branchPtr(cond, lhs, rhs, label);
    828 }
    829 
    830 void MacroAssembler::branchTruncateFloat32ToPtr(FloatRegister src,
    831                                                Register dest, Label* fail) {
    832  vcvttss2sq(src, dest);
    833 
    834  // Same trick as for Doubles
    835  cmpPtr(dest, Imm32(1));
    836  j(Assembler::Overflow, fail);
    837 }
    838 
    839 void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
    840                                                         Register dest,
    841                                                         Label* fail) {
    842  // Infallible operation on x64.
    843  truncateFloat32ModUint32(src, dest);
    844 }
    845 
    846 void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
    847                                                  Register dest, Label* fail) {
    848  branchTruncateFloat32ToPtr(src, dest, fail);
    849 
    850  // Check that the result is in the int32_t range.
    851  ScratchRegisterScope scratch(*this);
    852  move32SignExtendToPtr(dest, scratch);
    853  cmpPtr(dest, scratch);
    854  j(Assembler::NotEqual, fail);
    855 
    856  movl(dest, dest);  // Zero upper 32-bits.
    857 }
    858 
    859 void MacroAssembler::branchTruncateDoubleToPtr(FloatRegister src, Register dest,
    860                                               Label* fail) {
    861  vcvttsd2sq(src, dest);
    862 
    863  // vcvttsd2sq returns 0x8000000000000000 on failure. Test for it by
    864  // subtracting 1 and testing overflow (this avoids the need to
    865  // materialize that value in a register).
    866  cmpPtr(dest, Imm32(1));
    867  j(Assembler::Overflow, fail);
    868 }
    869 
    870 void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
    871                                                        Register dest,
    872                                                        Label* fail) {
    873  branchTruncateDoubleToPtr(src, dest, fail);
    874  movl(dest, dest);  // Zero upper 32-bits.
    875 }
    876 
    877 void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
    878                                                 Register dest, Label* fail) {
    879  branchTruncateDoubleToPtr(src, dest, fail);
    880 
    881  // Check that the result is in the int32_t range.
    882  ScratchRegisterScope scratch(*this);
    883  move32SignExtendToPtr(dest, scratch);
    884  cmpPtr(dest, scratch);
    885  j(Assembler::NotEqual, fail);
    886 
    887  movl(dest, dest);  // Zero upper 32-bits.
    888 }
    889 
    890 void MacroAssembler::branchInt64NotInPtrRange(Register64 src, Label* label) {
    891  // No-op on 64-bit platforms.
    892 }
    893 
    894 void MacroAssembler::branchUInt64NotInPtrRange(Register64 src, Label* label) {
    895  branchTest64(Assembler::Signed, src, src, label);
    896 }
    897 
    898 void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
    899                                  Imm32 rhs, Label* label) {
    900  if (X86Encoding::IsAddressImmediate(lhs.addr)) {
    901    test32(Operand(lhs), rhs);
    902  } else {
    903    ScratchRegisterScope scratch(*this);
    904    mov(ImmPtr(lhs.addr), scratch);
    905    test32(Operand(scratch, 0), rhs);
    906  }
    907  j(cond, label);
    908 }
    909 
    910 void MacroAssembler::branchTestPtr(Condition cond, Register lhs, ImmWord rhs,
    911                                   Label* label) {
    912  testPtr(lhs, rhs);
    913  j(cond, label);
    914 }
    915 
    916 void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
    917                                  Register64 rhs, Register temp, Label* success,
    918                                  Label* fail) {
    919  branchTestPtr(cond, lhs.reg, rhs.reg, success);
    920  if (fail) {
    921    jump(fail);
    922  }
    923 }
    924 
    925 void MacroAssembler::branchTest64(Condition cond, Register64 lhs, Imm64 rhs,
    926                                  Label* success, Label* fail) {
    927  branchTestPtr(cond, lhs.reg, ImmWord(rhs.value), success);
    928  if (fail) {
    929    jump(fail);
    930  }
    931 }
    932 
    933 void MacroAssembler::branchTestBooleanTruthy(bool truthy,
    934                                             const ValueOperand& value,
    935                                             Label* label) {
    936  test32(value.valueReg(), value.valueReg());
    937  j(truthy ? NonZero : Zero, label);
    938 }
    939 
    940 void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
    941                                     JSWhyMagic why, Label* label) {
    942  uint64_t magic = MagicValue(why).asRawBits();
    943  cmpPtr(valaddr, ImmWord(magic));
    944  j(cond, label);
    945 }
    946 
    947 template <typename T>
    948 void MacroAssembler::branchTestValue(Condition cond, const T& lhs,
    949                                     const ValueOperand& rhs, Label* label) {
    950  MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
    951  branchPtr(cond, lhs, rhs.valueReg(), label);
    952 }
    953 
    954 void MacroAssembler::branchToComputedAddress(const BaseIndex& address) {
    955  jmp(Operand(address));
    956 }
    957 
    958 void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Imm32 rhs,
    959                                   Register src, Register dest) {
    960  cmpPtr(lhs, rhs);
    961  cmovCCq(cond, src, dest);
    962 }
    963 
    964 void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
    965                                   Register src, Register dest) {
    966  cmpPtr(lhs, rhs);
    967  cmovCCq(cond, src, dest);
    968 }
    969 
    970 void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
    971                                   const Address& rhs, Register src,
    972                                   Register dest) {
    973  cmpPtr(lhs, Operand(rhs));
    974  cmovCCq(cond, src, dest);
    975 }
    976 
    977 void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
    978                                  Register src, Register dest) {
    979  cmp32(lhs, rhs);
    980  cmovCCq(cond, Operand(src), dest);
    981 }
    982 
    983 void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
    984                                  const Address& src, Register dest) {
    985  cmp32(lhs, rhs);
    986  cmovCCq(cond, Operand(src), dest);
    987 }
    988 
    989 void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
    990                                   Imm32 mask, const Address& src,
    991                                   Register dest) {
    992  MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
    993  test32(addr, mask);
    994  cmovCCq(cond, Operand(src), dest);
    995 }
    996 
    997 void MacroAssembler::test32MovePtr(Condition cond, Register operand, Imm32 mask,
    998                                   Register src, Register dest) {
    999  MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
   1000  test32(operand, mask);
   1001  cmovCCq(cond, Operand(src), dest);
   1002 }
   1003 
   1004 void MacroAssembler::test32MovePtr(Condition cond, const Address& addr,
   1005                                   Imm32 mask, Register src, Register dest) {
   1006  MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
   1007  test32(addr, mask);
   1008  cmovCCq(cond, Operand(src), dest);
   1009 }
   1010 
   1011 void MacroAssembler::spectreMovePtr(Condition cond, Register src,
   1012                                    Register dest) {
   1013  cmovCCq(cond, Operand(src), dest);
   1014 }
   1015 
   1016 void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
   1017                                          Register maybeScratch,
   1018                                          Label* failure) {
   1019  MOZ_ASSERT(length != maybeScratch);
   1020  MOZ_ASSERT(index != maybeScratch);
   1021 
   1022  ScratchRegisterScope scratch(*this);
   1023  MOZ_ASSERT(index != scratch);
   1024  MOZ_ASSERT(length != scratch);
   1025 
   1026  if (JitOptions.spectreIndexMasking) {
   1027    move32(Imm32(0), scratch);
   1028  }
   1029 
   1030  cmp32(index, length);
   1031  j(Assembler::AboveOrEqual, failure);
   1032 
   1033  if (JitOptions.spectreIndexMasking) {
   1034    cmovCCl(Assembler::AboveOrEqual, scratch, index);
   1035  }
   1036 }
   1037 
   1038 void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
   1039                                          Register maybeScratch,
   1040                                          Label* failure) {
   1041  MOZ_ASSERT(index != length.base);
   1042  MOZ_ASSERT(length.base != maybeScratch);
   1043  MOZ_ASSERT(index != maybeScratch);
   1044 
   1045  ScratchRegisterScope scratch(*this);
   1046  MOZ_ASSERT(index != scratch);
   1047  MOZ_ASSERT(length.base != scratch);
   1048 
   1049  if (JitOptions.spectreIndexMasking) {
   1050    move32(Imm32(0), scratch);
   1051  }
   1052 
   1053  cmp32(index, Operand(length));
   1054  j(Assembler::AboveOrEqual, failure);
   1055 
   1056  if (JitOptions.spectreIndexMasking) {
   1057    cmovCCl(Assembler::AboveOrEqual, scratch, index);
   1058  }
   1059 }
   1060 
   1061 void MacroAssembler::spectreBoundsCheckPtr(Register index, Register length,
   1062                                           Register maybeScratch,
   1063                                           Label* failure) {
   1064  MOZ_ASSERT(length != maybeScratch);
   1065  MOZ_ASSERT(index != maybeScratch);
   1066 
   1067  ScratchRegisterScope scratch(*this);
   1068  MOZ_ASSERT(index != scratch);
   1069  MOZ_ASSERT(length != scratch);
   1070 
   1071  if (JitOptions.spectreIndexMasking) {
   1072    movePtr(ImmWord(0), scratch);
   1073  }
   1074 
   1075  cmpPtr(index, length);
   1076  j(Assembler::AboveOrEqual, failure);
   1077 
   1078  if (JitOptions.spectreIndexMasking) {
   1079    cmovCCq(Assembler::AboveOrEqual, scratch, index);
   1080  }
   1081 }
   1082 
   1083 void MacroAssembler::spectreBoundsCheckPtr(Register index,
   1084                                           const Address& length,
   1085                                           Register maybeScratch,
   1086                                           Label* failure) {
   1087  MOZ_ASSERT(index != length.base);
   1088  MOZ_ASSERT(length.base != maybeScratch);
   1089  MOZ_ASSERT(index != maybeScratch);
   1090 
   1091  ScratchRegisterScope scratch(*this);
   1092  MOZ_ASSERT(index != scratch);
   1093  MOZ_ASSERT(length.base != scratch);
   1094 
   1095  if (JitOptions.spectreIndexMasking) {
   1096    movePtr(ImmWord(0), scratch);
   1097  }
   1098 
   1099  cmpPtr(index, Operand(length));
   1100  j(Assembler::AboveOrEqual, failure);
   1101 
   1102  if (JitOptions.spectreIndexMasking) {
   1103    cmovCCq(Assembler::AboveOrEqual, scratch, index);
   1104  }
   1105 }
   1106 
   1107 // ========================================================================
   1108 // SIMD.
   1109 
   1110 // Extract lane as scalar
   1111 
   1112 void MacroAssembler::extractLaneInt64x2(uint32_t lane, FloatRegister src,
   1113                                        Register64 dest) {
   1114  if (lane == 0) {
   1115    vmovq(src, dest.reg);
   1116  } else {
   1117    vpextrq(lane, src, dest.reg);
   1118  }
   1119 }
   1120 
   1121 // Replace lane value
   1122 
   1123 void MacroAssembler::replaceLaneInt64x2(unsigned lane, Register64 rhs,
   1124                                        FloatRegister lhsDest) {
   1125  vpinsrq(lane, rhs.reg, lhsDest, lhsDest);
   1126 }
   1127 
   1128 void MacroAssembler::replaceLaneInt64x2(unsigned lane, FloatRegister lhs,
   1129                                        Register64 rhs, FloatRegister dest) {
   1130  vpinsrq(lane, rhs.reg, lhs, dest);
   1131 }
   1132 
   1133 // Splat
   1134 
   1135 void MacroAssembler::splatX2(Register64 src, FloatRegister dest) {
   1136  vmovq(src.reg, dest);
   1137  if (HasAVX2()) {
   1138    vbroadcastq(Operand(dest), dest);
   1139  } else {
   1140    vpunpcklqdq(dest, dest, dest);
   1141  }
   1142 }
   1143 
   1144 // ========================================================================
   1145 // Truncate floating point.
   1146 
   1147 void MacroAssembler::truncateFloat32ToUInt64(Address src, Address dest,
   1148                                             Register temp,
   1149                                             FloatRegister floatTemp) {
   1150  Label done;
   1151 
   1152  loadFloat32(src, floatTemp);
   1153 
   1154  truncateFloat32ToInt64(src, dest, temp);
   1155 
   1156  // For unsigned conversion the case of [INT64, UINT64] needs to get handled
   1157  // separately.
   1158  loadPtr(dest, temp);
   1159  branchPtr(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
   1160 
   1161  // Move the value inside INT64 range.
   1162  storeFloat32(floatTemp, dest);
   1163  loadConstantFloat32(double(int64_t(0x8000000000000000)), floatTemp);
   1164  vaddss(Operand(dest), floatTemp, floatTemp);
   1165  storeFloat32(floatTemp, dest);
   1166  truncateFloat32ToInt64(dest, dest, temp);
   1167 
   1168  loadPtr(dest, temp);
   1169  or64(Imm64(0x8000000000000000), Register64(temp));
   1170  storePtr(temp, dest);
   1171 
   1172  bind(&done);
   1173 }
   1174 
   1175 void MacroAssembler::truncateDoubleToUInt64(Address src, Address dest,
   1176                                            Register temp,
   1177                                            FloatRegister floatTemp) {
   1178  Label done;
   1179 
   1180  loadDouble(src, floatTemp);
   1181 
   1182  truncateDoubleToInt64(src, dest, temp);
   1183 
   1184  // For unsigned conversion the case of [INT64, UINT64] needs to get handle
   1185  // seperately.
   1186  loadPtr(dest, temp);
   1187  branchPtr(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
   1188 
   1189  // Move the value inside INT64 range.
   1190  storeDouble(floatTemp, dest);
   1191  loadConstantDouble(double(int64_t(0x8000000000000000)), floatTemp);
   1192  vaddsd(Operand(dest), floatTemp, floatTemp);
   1193  storeDouble(floatTemp, dest);
   1194  truncateDoubleToInt64(dest, dest, temp);
   1195 
   1196  loadPtr(dest, temp);
   1197  or64(Imm64(0x8000000000000000), Register64(temp));
   1198  storePtr(temp, dest);
   1199 
   1200  bind(&done);
   1201 }
   1202 
   1203 void MacroAssemblerX64::fallibleUnboxPtrImpl(const Operand& src, Register dest,
   1204                                             JSValueType type, Label* fail) {
   1205  MOZ_ASSERT(type == JSVAL_TYPE_OBJECT || type == JSVAL_TYPE_STRING ||
   1206             type == JSVAL_TYPE_SYMBOL || type == JSVAL_TYPE_BIGINT);
   1207  // dest := src XOR mask
   1208  // scratch := dest >> JSVAL_TAG_SHIFT
   1209  // fail if scratch != 0
   1210  //
   1211  // Note: src and dest can be the same register.
   1212  ScratchRegisterScope scratch(asMasm());
   1213  mov(ImmShiftedTag(type), scratch);
   1214  xorq(src, scratch);
   1215  mov(scratch, dest);
   1216  shrq(Imm32(JSVAL_TAG_SHIFT), scratch);
   1217  j(Assembler::NonZero, fail);
   1218 }
   1219 
   1220 void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
   1221                                      JSValueType type, Label* fail) {
   1222  fallibleUnboxPtrImpl(Operand(src.valueReg()), dest, type, fail);
   1223 }
   1224 
   1225 void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
   1226                                      JSValueType type, Label* fail) {
   1227  fallibleUnboxPtrImpl(Operand(src), dest, type, fail);
   1228 }
   1229 
   1230 void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
   1231                                      JSValueType type, Label* fail) {
   1232  fallibleUnboxPtrImpl(Operand(src), dest, type, fail);
   1233 }
   1234 
   1235 void MacroAssembler::min32(Register lhs, Register rhs, Register dest) {
   1236  minMax32(lhs, rhs, dest, /* isMax = */ false);
   1237 }
   1238 
   1239 void MacroAssembler::min32(Register lhs, Imm32 rhs, Register dest) {
   1240  minMax32(lhs, rhs, dest, /* isMax = */ false);
   1241 }
   1242 
   1243 void MacroAssembler::max32(Register lhs, Register rhs, Register dest) {
   1244  minMax32(lhs, rhs, dest, /* isMax = */ true);
   1245 }
   1246 
   1247 void MacroAssembler::max32(Register lhs, Imm32 rhs, Register dest) {
   1248  minMax32(lhs, rhs, dest, /* isMax = */ true);
   1249 }
   1250 
   1251 void MacroAssembler::minPtr(Register lhs, Register rhs, Register dest) {
   1252  minMaxPtr(lhs, rhs, dest, /* isMax = */ false);
   1253 }
   1254 
   1255 void MacroAssembler::minPtr(Register lhs, ImmWord rhs, Register dest) {
   1256  minMaxPtr(lhs, rhs, dest, /* isMax = */ false);
   1257 }
   1258 
   1259 void MacroAssembler::maxPtr(Register lhs, Register rhs, Register dest) {
   1260  minMaxPtr(lhs, rhs, dest, /* isMax = */ true);
   1261 }
   1262 
   1263 void MacroAssembler::maxPtr(Register lhs, ImmWord rhs, Register dest) {
   1264  minMaxPtr(lhs, rhs, dest, /* isMax = */ true);
   1265 }
   1266 
   1267 //}}} check_macroassembler_style
   1268 // ===============================================================
   1269 
   1270 void MacroAssemblerX64::incrementInt32Value(const Address& addr) {
   1271  asMasm().addPtr(Imm32(1), addr);
   1272 }
   1273 
   1274 void MacroAssemblerX64::unboxValue(const ValueOperand& src, AnyRegister dest,
   1275                                   JSValueType type) {
   1276  if (dest.isFloat()) {
   1277    Label notInt32, end;
   1278    asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
   1279    convertInt32ToDouble(src.valueReg(), dest.fpu());
   1280    jump(&end);
   1281    bind(&notInt32);
   1282    unboxDouble(src, dest.fpu());
   1283    bind(&end);
   1284  } else {
   1285    unboxNonDouble(src, dest.gpr(), type);
   1286  }
   1287 }
   1288 
   1289 template <typename T>
   1290 void MacroAssemblerX64::loadInt32OrDouble(const T& src, FloatRegister dest) {
   1291  Label notInt32, end;
   1292  asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
   1293  convertInt32ToDouble(src, dest);
   1294  jump(&end);
   1295  bind(&notInt32);
   1296  unboxDouble(src, dest);
   1297  bind(&end);
   1298 }
   1299 
   1300 }  // namespace jit
   1301 }  // namespace js
   1302 
   1303 #endif /* jit_x64_MacroAssembler_x64_inl_h */