tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

MacroAssembler-x64.cpp (77005B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "jit/x64/MacroAssembler-x64.h"
      8 
      9 #include "jit/BaselineFrame.h"
     10 #include "jit/JitFrames.h"
     11 #include "jit/JitRuntime.h"
     12 #include "jit/MacroAssembler.h"
     13 #include "jit/MoveEmitter.h"
     14 #include "util/Memory.h"
     15 #include "vm/BigIntType.h"
     16 #include "vm/JitActivation.h"  // js::jit::JitActivation
     17 #include "vm/JSContext.h"
     18 #include "vm/StringType.h"
     19 #include "wasm/WasmStubs.h"
     20 
     21 #include "jit/MacroAssembler-inl.h"
     22 
     23 using namespace js;
     24 using namespace js::jit;
     25 
     26 void MacroAssemblerX64::loadConstantDouble(double d, FloatRegister dest) {
     27  if (maybeInlineDouble(d, dest)) {
     28    return;
     29  }
     30  Double* dbl = getDouble(d);
     31  if (!dbl) {
     32    return;
     33  }
     34  // The constants will be stored in a pool appended to the text (see
     35  // finish()), so they will always be a fixed distance from the
     36  // instructions which reference them. This allows the instructions to use
     37  // PC-relative addressing. Use "jump" label support code, because we need
     38  // the same PC-relative address patching that jumps use.
     39  JmpSrc j = masm.vmovsd_ripr(dest.encoding());
     40  propagateOOM(dbl->uses.append(j));
     41 }
     42 
     43 void MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest) {
     44  if (maybeInlineFloat(f, dest)) {
     45    return;
     46  }
     47  Float* flt = getFloat(f);
     48  if (!flt) {
     49    return;
     50  }
     51  // See comment in loadConstantDouble
     52  JmpSrc j = masm.vmovss_ripr(dest.encoding());
     53  propagateOOM(flt->uses.append(j));
     54 }
     55 
     56 void MacroAssemblerX64::vpRiprOpSimd128(
     57    const SimdConstant& v, FloatRegister reg,
     58    JmpSrc (X86Encoding::BaseAssemblerX64::*op)(
     59        X86Encoding::XMMRegisterID id)) {
     60  SimdData* val = getSimdData(v);
     61  if (!val) {
     62    return;
     63  }
     64  JmpSrc j = (masm.*op)(reg.encoding());
     65  propagateOOM(val->uses.append(j));
     66 }
     67 
     68 void MacroAssemblerX64::vpRiprOpSimd128(
     69    const SimdConstant& v, FloatRegister src, FloatRegister dest,
     70    JmpSrc (X86Encoding::BaseAssemblerX64::*op)(
     71        X86Encoding::XMMRegisterID srcId, X86Encoding::XMMRegisterID destId)) {
     72  SimdData* val = getSimdData(v);
     73  if (!val) {
     74    return;
     75  }
     76  JmpSrc j = (masm.*op)(src.encoding(), dest.encoding());
     77  propagateOOM(val->uses.append(j));
     78 }
     79 
     80 void MacroAssemblerX64::loadConstantSimd128Int(const SimdConstant& v,
     81                                               FloatRegister dest) {
     82  if (maybeInlineSimd128Int(v, dest)) {
     83    return;
     84  }
     85  vpRiprOpSimd128(v, dest, &X86Encoding::BaseAssemblerX64::vmovdqa_ripr);
     86 }
     87 
     88 void MacroAssemblerX64::loadConstantSimd128Float(const SimdConstant& v,
     89                                                 FloatRegister dest) {
     90  if (maybeInlineSimd128Float(v, dest)) {
     91    return;
     92  }
     93  vpRiprOpSimd128(v, dest, &X86Encoding::BaseAssemblerX64::vmovaps_ripr);
     94 }
     95 
     96 void MacroAssemblerX64::vpaddbSimd128(const SimdConstant& v, FloatRegister lhs,
     97                                      FloatRegister dest) {
     98  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddb_ripr);
     99 }
    100 
    101 void MacroAssemblerX64::vpaddwSimd128(const SimdConstant& v, FloatRegister lhs,
    102                                      FloatRegister dest) {
    103  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddw_ripr);
    104 }
    105 
    106 void MacroAssemblerX64::vpadddSimd128(const SimdConstant& v, FloatRegister lhs,
    107                                      FloatRegister dest) {
    108  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddd_ripr);
    109 }
    110 
    111 void MacroAssemblerX64::vpaddqSimd128(const SimdConstant& v, FloatRegister lhs,
    112                                      FloatRegister dest) {
    113  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddq_ripr);
    114 }
    115 
    116 void MacroAssemblerX64::vpsubbSimd128(const SimdConstant& v, FloatRegister lhs,
    117                                      FloatRegister dest) {
    118  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubb_ripr);
    119 }
    120 
    121 void MacroAssemblerX64::vpsubwSimd128(const SimdConstant& v, FloatRegister lhs,
    122                                      FloatRegister dest) {
    123  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubw_ripr);
    124 }
    125 
    126 void MacroAssemblerX64::vpsubdSimd128(const SimdConstant& v, FloatRegister lhs,
    127                                      FloatRegister dest) {
    128  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubd_ripr);
    129 }
    130 
    131 void MacroAssemblerX64::vpsubqSimd128(const SimdConstant& v, FloatRegister lhs,
    132                                      FloatRegister dest) {
    133  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubq_ripr);
    134 }
    135 
    136 void MacroAssemblerX64::vpmullwSimd128(const SimdConstant& v, FloatRegister lhs,
    137                                       FloatRegister dest) {
    138  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmullw_ripr);
    139 }
    140 
    141 void MacroAssemblerX64::vpmulldSimd128(const SimdConstant& v, FloatRegister lhs,
    142                                       FloatRegister dest) {
    143  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmulld_ripr);
    144 }
    145 
    146 void MacroAssemblerX64::vpaddsbSimd128(const SimdConstant& v, FloatRegister lhs,
    147                                       FloatRegister dest) {
    148  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddsb_ripr);
    149 }
    150 
    151 void MacroAssemblerX64::vpaddusbSimd128(const SimdConstant& v,
    152                                        FloatRegister lhs, FloatRegister dest) {
    153  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddusb_ripr);
    154 }
    155 
    156 void MacroAssemblerX64::vpaddswSimd128(const SimdConstant& v, FloatRegister lhs,
    157                                       FloatRegister dest) {
    158  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddsw_ripr);
    159 }
    160 
    161 void MacroAssemblerX64::vpadduswSimd128(const SimdConstant& v,
    162                                        FloatRegister lhs, FloatRegister dest) {
    163  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpaddusw_ripr);
    164 }
    165 
    166 void MacroAssemblerX64::vpsubsbSimd128(const SimdConstant& v, FloatRegister lhs,
    167                                       FloatRegister dest) {
    168  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubsb_ripr);
    169 }
    170 
    171 void MacroAssemblerX64::vpsubusbSimd128(const SimdConstant& v,
    172                                        FloatRegister lhs, FloatRegister dest) {
    173  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubusb_ripr);
    174 }
    175 
    176 void MacroAssemblerX64::vpsubswSimd128(const SimdConstant& v, FloatRegister lhs,
    177                                       FloatRegister dest) {
    178  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubsw_ripr);
    179 }
    180 
    181 void MacroAssemblerX64::vpsubuswSimd128(const SimdConstant& v,
    182                                        FloatRegister lhs, FloatRegister dest) {
    183  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpsubusw_ripr);
    184 }
    185 
    186 void MacroAssemblerX64::vpminsbSimd128(const SimdConstant& v, FloatRegister lhs,
    187                                       FloatRegister dest) {
    188  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminsb_ripr);
    189 }
    190 
    191 void MacroAssemblerX64::vpminubSimd128(const SimdConstant& v, FloatRegister lhs,
    192                                       FloatRegister dest) {
    193  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminub_ripr);
    194 }
    195 
    196 void MacroAssemblerX64::vpminswSimd128(const SimdConstant& v, FloatRegister lhs,
    197                                       FloatRegister dest) {
    198  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminsw_ripr);
    199 }
    200 
    201 void MacroAssemblerX64::vpminuwSimd128(const SimdConstant& v, FloatRegister lhs,
    202                                       FloatRegister dest) {
    203  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminuw_ripr);
    204 }
    205 
    206 void MacroAssemblerX64::vpminsdSimd128(const SimdConstant& v, FloatRegister lhs,
    207                                       FloatRegister dest) {
    208  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminsd_ripr);
    209 }
    210 
    211 void MacroAssemblerX64::vpminudSimd128(const SimdConstant& v, FloatRegister lhs,
    212                                       FloatRegister dest) {
    213  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpminud_ripr);
    214 }
    215 
    216 void MacroAssemblerX64::vpmaxsbSimd128(const SimdConstant& v, FloatRegister lhs,
    217                                       FloatRegister dest) {
    218  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxsb_ripr);
    219 }
    220 
    221 void MacroAssemblerX64::vpmaxubSimd128(const SimdConstant& v, FloatRegister lhs,
    222                                       FloatRegister dest) {
    223  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxub_ripr);
    224 }
    225 
    226 void MacroAssemblerX64::vpmaxswSimd128(const SimdConstant& v, FloatRegister lhs,
    227                                       FloatRegister dest) {
    228  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxsw_ripr);
    229 }
    230 
    231 void MacroAssemblerX64::vpmaxuwSimd128(const SimdConstant& v, FloatRegister lhs,
    232                                       FloatRegister dest) {
    233  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxuw_ripr);
    234 }
    235 
    236 void MacroAssemblerX64::vpmaxsdSimd128(const SimdConstant& v, FloatRegister lhs,
    237                                       FloatRegister dest) {
    238  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxsd_ripr);
    239 }
    240 
    241 void MacroAssemblerX64::vpmaxudSimd128(const SimdConstant& v, FloatRegister lhs,
    242                                       FloatRegister dest) {
    243  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaxud_ripr);
    244 }
    245 
    246 void MacroAssemblerX64::vpandSimd128(const SimdConstant& v, FloatRegister lhs,
    247                                     FloatRegister dest) {
    248  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpand_ripr);
    249 }
    250 
    251 void MacroAssemblerX64::vpxorSimd128(const SimdConstant& v, FloatRegister lhs,
    252                                     FloatRegister dest) {
    253  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpxor_ripr);
    254 }
    255 
    256 void MacroAssemblerX64::vporSimd128(const SimdConstant& v, FloatRegister lhs,
    257                                    FloatRegister dest) {
    258  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpor_ripr);
    259 }
    260 
    261 void MacroAssemblerX64::vaddpsSimd128(const SimdConstant& v, FloatRegister lhs,
    262                                      FloatRegister dest) {
    263  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vaddps_ripr);
    264 }
    265 
    266 void MacroAssemblerX64::vaddpdSimd128(const SimdConstant& v, FloatRegister lhs,
    267                                      FloatRegister dest) {
    268  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vaddpd_ripr);
    269 }
    270 
    271 void MacroAssemblerX64::vsubpsSimd128(const SimdConstant& v, FloatRegister lhs,
    272                                      FloatRegister dest) {
    273  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vsubps_ripr);
    274 }
    275 
    276 void MacroAssemblerX64::vsubpdSimd128(const SimdConstant& v, FloatRegister lhs,
    277                                      FloatRegister dest) {
    278  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vsubpd_ripr);
    279 }
    280 
    281 void MacroAssemblerX64::vdivpsSimd128(const SimdConstant& v, FloatRegister lhs,
    282                                      FloatRegister dest) {
    283  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vdivps_ripr);
    284 }
    285 
    286 void MacroAssemblerX64::vdivpdSimd128(const SimdConstant& v, FloatRegister lhs,
    287                                      FloatRegister dest) {
    288  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vdivpd_ripr);
    289 }
    290 
    291 void MacroAssemblerX64::vmulpsSimd128(const SimdConstant& v, FloatRegister lhs,
    292                                      FloatRegister dest) {
    293  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vmulps_ripr);
    294 }
    295 
    296 void MacroAssemblerX64::vmulpdSimd128(const SimdConstant& v, FloatRegister lhs,
    297                                      FloatRegister dest) {
    298  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vmulpd_ripr);
    299 }
    300 
    301 void MacroAssemblerX64::vandpsSimd128(const SimdConstant& v, FloatRegister lhs,
    302                                      FloatRegister dest) {
    303  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vandps_ripr);
    304 }
    305 
    306 void MacroAssemblerX64::vandpdSimd128(const SimdConstant& v, FloatRegister lhs,
    307                                      FloatRegister dest) {
    308  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vandpd_ripr);
    309 }
    310 
    311 void MacroAssemblerX64::vxorpsSimd128(const SimdConstant& v, FloatRegister lhs,
    312                                      FloatRegister dest) {
    313  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vxorps_ripr);
    314 }
    315 
    316 void MacroAssemblerX64::vxorpdSimd128(const SimdConstant& v, FloatRegister lhs,
    317                                      FloatRegister dest) {
    318  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vxorpd_ripr);
    319 }
    320 
    321 void MacroAssemblerX64::vminpdSimd128(const SimdConstant& v, FloatRegister lhs,
    322                                      FloatRegister dest) {
    323  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vminpd_ripr);
    324 }
    325 
    326 void MacroAssemblerX64::vpacksswbSimd128(const SimdConstant& v,
    327                                         FloatRegister lhs,
    328                                         FloatRegister dest) {
    329  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpacksswb_ripr);
    330 }
    331 
    332 void MacroAssemblerX64::vpackuswbSimd128(const SimdConstant& v,
    333                                         FloatRegister lhs,
    334                                         FloatRegister dest) {
    335  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpackuswb_ripr);
    336 }
    337 
    338 void MacroAssemblerX64::vpackssdwSimd128(const SimdConstant& v,
    339                                         FloatRegister lhs,
    340                                         FloatRegister dest) {
    341  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpackssdw_ripr);
    342 }
    343 
    344 void MacroAssemblerX64::vpackusdwSimd128(const SimdConstant& v,
    345                                         FloatRegister lhs,
    346                                         FloatRegister dest) {
    347  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpackusdw_ripr);
    348 }
    349 
    350 void MacroAssemblerX64::vpunpckldqSimd128(const SimdConstant& v,
    351                                          FloatRegister lhs,
    352                                          FloatRegister dest) {
    353  vpRiprOpSimd128(v, lhs, dest,
    354                  &X86Encoding::BaseAssemblerX64::vpunpckldq_ripr);
    355 }
    356 
    357 void MacroAssemblerX64::vunpcklpsSimd128(const SimdConstant& v,
    358                                         FloatRegister lhs,
    359                                         FloatRegister dest) {
    360  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vunpcklps_ripr);
    361 }
    362 
    363 void MacroAssemblerX64::vpshufbSimd128(const SimdConstant& v, FloatRegister lhs,
    364                                       FloatRegister dest) {
    365  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpshufb_ripr);
    366 }
    367 
    368 void MacroAssemblerX64::vptestSimd128(const SimdConstant& v,
    369                                      FloatRegister lhs) {
    370  vpRiprOpSimd128(v, lhs, &X86Encoding::BaseAssemblerX64::vptest_ripr);
    371 }
    372 
    373 void MacroAssemblerX64::vpmaddwdSimd128(const SimdConstant& v,
    374                                        FloatRegister lhs, FloatRegister dest) {
    375  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmaddwd_ripr);
    376 }
    377 
    378 void MacroAssemblerX64::vpcmpeqbSimd128(const SimdConstant& v,
    379                                        FloatRegister lhs, FloatRegister dest) {
    380  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpeqb_ripr);
    381 }
    382 
    383 void MacroAssemblerX64::vpcmpgtbSimd128(const SimdConstant& v,
    384                                        FloatRegister lhs, FloatRegister dest) {
    385  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpgtb_ripr);
    386 }
    387 
    388 void MacroAssemblerX64::vpcmpeqwSimd128(const SimdConstant& v,
    389                                        FloatRegister lhs, FloatRegister dest) {
    390  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpeqw_ripr);
    391 }
    392 
    393 void MacroAssemblerX64::vpcmpgtwSimd128(const SimdConstant& v,
    394                                        FloatRegister lhs, FloatRegister dest) {
    395  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpgtw_ripr);
    396 }
    397 
    398 void MacroAssemblerX64::vpcmpeqdSimd128(const SimdConstant& v,
    399                                        FloatRegister lhs, FloatRegister dest) {
    400  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpeqd_ripr);
    401 }
    402 
    403 void MacroAssemblerX64::vpcmpgtdSimd128(const SimdConstant& v,
    404                                        FloatRegister lhs, FloatRegister dest) {
    405  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpcmpgtd_ripr);
    406 }
    407 
    408 void MacroAssemblerX64::vcmpeqpsSimd128(const SimdConstant& v,
    409                                        FloatRegister lhs, FloatRegister dest) {
    410  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpeqps_ripr);
    411 }
    412 
    413 void MacroAssemblerX64::vcmpneqpsSimd128(const SimdConstant& v,
    414                                         FloatRegister lhs,
    415                                         FloatRegister dest) {
    416  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpneqps_ripr);
    417 }
    418 
    419 void MacroAssemblerX64::vcmpltpsSimd128(const SimdConstant& v,
    420                                        FloatRegister lhs, FloatRegister dest) {
    421  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpltps_ripr);
    422 }
    423 
    424 void MacroAssemblerX64::vcmplepsSimd128(const SimdConstant& v,
    425                                        FloatRegister lhs, FloatRegister dest) {
    426  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpleps_ripr);
    427 }
    428 
    429 void MacroAssemblerX64::vcmpgepsSimd128(const SimdConstant& v,
    430                                        FloatRegister lhs, FloatRegister dest) {
    431  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpgeps_ripr);
    432 }
    433 
    434 void MacroAssemblerX64::vcmpeqpdSimd128(const SimdConstant& v,
    435                                        FloatRegister lhs, FloatRegister dest) {
    436  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpeqpd_ripr);
    437 }
    438 
    439 void MacroAssemblerX64::vcmpneqpdSimd128(const SimdConstant& v,
    440                                         FloatRegister lhs,
    441                                         FloatRegister dest) {
    442  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpneqpd_ripr);
    443 }
    444 
    445 void MacroAssemblerX64::vcmpltpdSimd128(const SimdConstant& v,
    446                                        FloatRegister lhs, FloatRegister dest) {
    447  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmpltpd_ripr);
    448 }
    449 
    450 void MacroAssemblerX64::vcmplepdSimd128(const SimdConstant& v,
    451                                        FloatRegister lhs, FloatRegister dest) {
    452  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vcmplepd_ripr);
    453 }
    454 
    455 void MacroAssemblerX64::vpmaddubswSimd128(const SimdConstant& v,
    456                                          FloatRegister lhs,
    457                                          FloatRegister dest) {
    458  vpRiprOpSimd128(v, lhs, dest,
    459                  &X86Encoding::BaseAssemblerX64::vpmaddubsw_ripr);
    460 }
    461 
    462 void MacroAssemblerX64::vpmuludqSimd128(const SimdConstant& v,
    463                                        FloatRegister lhs, FloatRegister dest) {
    464  vpRiprOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX64::vpmuludq_ripr);
    465 }
    466 
    467 void MacroAssemblerX64::bindOffsets(
    468    const MacroAssemblerX86Shared::UsesVector& uses) {
    469  for (JmpSrc src : uses) {
    470    JmpDst dst(currentOffset());
    471    // Using linkJump here is safe, as explained in the comment in
    472    // loadConstantDouble.
    473    masm.linkJump(src, dst);
    474  }
    475 }
    476 
    477 void MacroAssemblerX64::finish() {
    478  if (!doubles_.empty()) {
    479    masm.haltingAlign(sizeof(double));
    480  }
    481  for (const Double& d : doubles_) {
    482    bindOffsets(d.uses);
    483    masm.doubleConstant(d.value);
    484  }
    485 
    486  if (!floats_.empty()) {
    487    masm.haltingAlign(sizeof(float));
    488  }
    489  for (const Float& f : floats_) {
    490    bindOffsets(f.uses);
    491    masm.floatConstant(f.value);
    492  }
    493 
    494  // SIMD memory values must be suitably aligned.
    495  if (!simds_.empty()) {
    496    masm.haltingAlign(SimdMemoryAlignment);
    497  }
    498  for (const SimdData& v : simds_) {
    499    bindOffsets(v.uses);
    500    masm.simd128Constant(v.value.bytes());
    501  }
    502 
    503  MacroAssemblerX86Shared::finish();
    504 }
    505 
    506 #ifdef DEBUG
    507 static constexpr int32_t PayloadSize(JSValueType type) {
    508  switch (type) {
    509    case JSVAL_TYPE_UNDEFINED:
    510    case JSVAL_TYPE_NULL:
    511      return 0;
    512    case JSVAL_TYPE_BOOLEAN:
    513      return 1;
    514    case JSVAL_TYPE_INT32:
    515    case JSVAL_TYPE_MAGIC:
    516      return 32;
    517    case JSVAL_TYPE_STRING:
    518    case JSVAL_TYPE_SYMBOL:
    519    case JSVAL_TYPE_PRIVATE_GCTHING:
    520    case JSVAL_TYPE_BIGINT:
    521    case JSVAL_TYPE_OBJECT:
    522      return JSVAL_TAG_SHIFT;
    523    case JSVAL_TYPE_DOUBLE:
    524    case JSVAL_TYPE_UNKNOWN:
    525      break;
    526  }
    527  MOZ_CRASH("bad value type");
    528 }
    529 #endif
    530 
    531 static void AssertValidPayload(MacroAssemblerX64& masm, JSValueType type,
    532                               Register payload, Register scratch) {
    533 #ifdef DEBUG
    534  // All bits above the payload must be zeroed.
    535  Label upperBitsZeroed;
    536  masm.movq(payload, scratch);
    537  masm.shrq(Imm32(PayloadSize(type)), scratch);
    538  masm.cmpPtr(scratch, scratch);
    539  masm.j(Assembler::Zero, &upperBitsZeroed);
    540  masm.breakpoint();
    541  masm.bind(&upperBitsZeroed);
    542 #endif
    543 }
    544 
    545 void MacroAssemblerX64::tagValue(JSValueType type, Register payload,
    546                                 ValueOperand dest) {
    547  MOZ_ASSERT(type != JSVAL_TYPE_UNDEFINED && type != JSVAL_TYPE_NULL);
    548 
    549  if (payload == dest.valueReg()) {
    550    ScratchRegisterScope scratch(asMasm());
    551    MOZ_ASSERT(dest.valueReg() != scratch);
    552 
    553    AssertValidPayload(*this, type, payload, scratch);
    554 
    555    mov(ImmShiftedTag(type), scratch);
    556    orq(scratch, dest.valueReg());
    557  } else {
    558    boxNonDouble(type, payload, dest);
    559  }
    560 }
    561 
    562 void MacroAssemblerX64::boxValue(JSValueType type, Register src,
    563                                 Register dest) {
    564  MOZ_ASSERT(type != JSVAL_TYPE_UNDEFINED && type != JSVAL_TYPE_NULL);
    565  MOZ_ASSERT(src != dest);
    566 
    567  AssertValidPayload(*this, type, src, dest);
    568 
    569  mov(ImmShiftedTag(type), dest);
    570  orq(src, dest);
    571 }
    572 
    573 void MacroAssemblerX64::boxValue(Register type, Register src, Register dest) {
    574  MOZ_ASSERT(src != dest);
    575 
    576 #ifdef DEBUG
    577  {
    578    ScratchRegisterScope scratch(asMasm());
    579 
    580    movq(src, scratch);
    581 
    582    Label check, isNullOrUndefined, isBoolean, isInt32OrMagic, isPointerSized;
    583 
    584    asMasm().branch32(Assembler::Equal, type, Imm32(JSVAL_TYPE_NULL),
    585                      &isNullOrUndefined);
    586    asMasm().branch32(Assembler::Equal, type, Imm32(JSVAL_TYPE_UNDEFINED),
    587                      &isNullOrUndefined);
    588    asMasm().branch32(Assembler::Equal, type, Imm32(JSVAL_TYPE_BOOLEAN),
    589                      &isBoolean);
    590    asMasm().branch32(Assembler::Equal, type, Imm32(JSVAL_TYPE_INT32),
    591                      &isInt32OrMagic);
    592    asMasm().branch32(Assembler::Equal, type, Imm32(JSVAL_TYPE_MAGIC),
    593                      &isInt32OrMagic);
    594    asMasm().branch32(Assembler::Equal, type, Imm32(JSVAL_TYPE_STRING),
    595                      &isPointerSized);
    596    asMasm().branch32(Assembler::Equal, type, Imm32(JSVAL_TYPE_SYMBOL),
    597                      &isPointerSized);
    598    asMasm().branch32(Assembler::Equal, type, Imm32(JSVAL_TYPE_PRIVATE_GCTHING),
    599                      &isPointerSized);
    600    asMasm().branch32(Assembler::Equal, type, Imm32(JSVAL_TYPE_BIGINT),
    601                      &isPointerSized);
    602    asMasm().branch32(Assembler::Equal, type, Imm32(JSVAL_TYPE_OBJECT),
    603                      &isPointerSized);
    604    breakpoint();
    605    {
    606      bind(&isNullOrUndefined);
    607      shrq(Imm32(PayloadSize(JSVAL_TYPE_NULL)), scratch);
    608      jump(&check);
    609    }
    610    {
    611      bind(&isBoolean);
    612      shrq(Imm32(PayloadSize(JSVAL_TYPE_BOOLEAN)), scratch);
    613      jump(&check);
    614    }
    615    {
    616      bind(&isInt32OrMagic);
    617      shrq(Imm32(PayloadSize(JSVAL_TYPE_INT32)), scratch);
    618      jump(&check);
    619    }
    620    {
    621      bind(&isPointerSized);
    622      shrq(Imm32(PayloadSize(JSVAL_TYPE_STRING)), scratch);
    623      // fall-through
    624    }
    625    bind(&check);
    626 
    627    // All bits above the payload must be zeroed.
    628    Label upperBitsZeroed;
    629    cmpPtr(scratch, scratch);
    630    j(Assembler::Zero, &upperBitsZeroed);
    631    breakpoint();
    632    bind(&upperBitsZeroed);
    633  }
    634 #endif
    635 
    636  if (type != dest) {
    637    movq(type, dest);
    638  }
    639  orq(Imm32(JSVAL_TAG_MAX_DOUBLE), dest);
    640  shlq(Imm32(JSVAL_TAG_SHIFT), dest);
    641  orq(src, dest);
    642 }
    643 
    644 void MacroAssemblerX64::handleFailureWithHandlerTail(
    645    Label* profilerExitTail, Label* bailoutTail,
    646    uint32_t* returnValueCheckOffset) {
    647  // Reserve space for exception information.
    648  subq(Imm32(sizeof(ResumeFromException)), rsp);
    649  movq(rsp, rax);
    650 
    651  // Call the handler.
    652  using Fn = void (*)(ResumeFromException* rfe);
    653  asMasm().setupUnalignedABICall(rcx);
    654  asMasm().passABIArg(rax);
    655  asMasm().callWithABI<Fn, HandleException>(
    656      ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
    657 
    658  *returnValueCheckOffset = asMasm().currentOffset();
    659 
    660  Label entryFrame;
    661  Label catch_;
    662  Label finally;
    663  Label returnBaseline;
    664  Label returnIon;
    665  Label bailout;
    666  Label wasmInterpEntry;
    667  Label wasmCatch;
    668 
    669  load32(Address(rsp, ResumeFromException::offsetOfKind()), rax);
    670  asMasm().branch32(Assembler::Equal, rax,
    671                    Imm32(ExceptionResumeKind::EntryFrame), &entryFrame);
    672  asMasm().branch32(Assembler::Equal, rax, Imm32(ExceptionResumeKind::Catch),
    673                    &catch_);
    674  asMasm().branch32(Assembler::Equal, rax, Imm32(ExceptionResumeKind::Finally),
    675                    &finally);
    676  asMasm().branch32(Assembler::Equal, rax,
    677                    Imm32(ExceptionResumeKind::ForcedReturnBaseline),
    678                    &returnBaseline);
    679  asMasm().branch32(Assembler::Equal, rax,
    680                    Imm32(ExceptionResumeKind::ForcedReturnIon), &returnIon);
    681  asMasm().branch32(Assembler::Equal, rax, Imm32(ExceptionResumeKind::Bailout),
    682                    &bailout);
    683  asMasm().branch32(Assembler::Equal, rax,
    684                    Imm32(ExceptionResumeKind::WasmInterpEntry),
    685                    &wasmInterpEntry);
    686  asMasm().branch32(Assembler::Equal, rax,
    687                    Imm32(ExceptionResumeKind::WasmCatch), &wasmCatch);
    688 
    689  breakpoint();  // Invalid kind.
    690 
    691  // No exception handler. Load the error value, restore state and return from
    692  // the entry frame.
    693  bind(&entryFrame);
    694  asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
    695  loadPtr(Address(rsp, ResumeFromException::offsetOfFramePointer()), rbp);
    696  loadPtr(Address(rsp, ResumeFromException::offsetOfStackPointer()), rsp);
    697  ret();
    698 
    699  // If we found a catch handler, this must be a baseline frame. Restore state
    700  // and jump to the catch block.
    701  bind(&catch_);
    702  loadPtr(Address(rsp, ResumeFromException::offsetOfTarget()), rax);
    703  loadPtr(Address(rsp, ResumeFromException::offsetOfFramePointer()), rbp);
    704  loadPtr(Address(rsp, ResumeFromException::offsetOfStackPointer()), rsp);
    705  jmp(Operand(rax));
    706 
    707  // If we found a finally block, this must be a baseline frame. Push three
    708  // values expected by the finally block: the exception, the exception stack,
    709  // and BooleanValue(true).
    710  bind(&finally);
    711  ValueOperand exception = ValueOperand(rcx);
    712  loadValue(Address(rsp, ResumeFromException::offsetOfException()), exception);
    713 
    714  ValueOperand exceptionStack = ValueOperand(rdx);
    715  loadValue(Address(rsp, ResumeFromException::offsetOfExceptionStack()),
    716            exceptionStack);
    717 
    718  loadPtr(Address(rsp, ResumeFromException::offsetOfTarget()), rax);
    719  loadPtr(Address(rsp, ResumeFromException::offsetOfFramePointer()), rbp);
    720  loadPtr(Address(rsp, ResumeFromException::offsetOfStackPointer()), rsp);
    721 
    722  pushValue(exception);
    723  pushValue(exceptionStack);
    724  pushValue(BooleanValue(true));
    725  jmp(Operand(rax));
    726 
    727  // Return BaselineFrame->returnValue() to the caller.
    728  // Used in debug mode and for GeneratorReturn.
    729  Label profilingInstrumentation;
    730  bind(&returnBaseline);
    731  loadPtr(Address(rsp, ResumeFromException::offsetOfFramePointer()), rbp);
    732  loadPtr(Address(rsp, ResumeFromException::offsetOfStackPointer()), rsp);
    733  loadValue(Address(rbp, BaselineFrame::reverseOffsetOfReturnValue()),
    734            JSReturnOperand);
    735  jmp(&profilingInstrumentation);
    736 
    737  // Return the given value to the caller.
    738  bind(&returnIon);
    739  loadValue(Address(rsp, ResumeFromException::offsetOfException()),
    740            JSReturnOperand);
    741  loadPtr(Address(rsp, ResumeFromException::offsetOfFramePointer()), rbp);
    742  loadPtr(Address(rsp, ResumeFromException::offsetOfStackPointer()), rsp);
    743 
    744  // If profiling is enabled, then update the lastProfilingFrame to refer to
    745  // caller frame before returning. This code is shared by ForcedReturnIon
    746  // and ForcedReturnBaseline.
    747  bind(&profilingInstrumentation);
    748  {
    749    Label skipProfilingInstrumentation;
    750    AbsoluteAddress addressOfEnabled(
    751        asMasm().runtime()->geckoProfiler().addressOfEnabled());
    752    asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
    753                      &skipProfilingInstrumentation);
    754    jump(profilerExitTail);
    755    bind(&skipProfilingInstrumentation);
    756  }
    757 
    758  movq(rbp, rsp);
    759  pop(rbp);
    760  ret();
    761 
    762  // If we are bailing out to baseline to handle an exception, jump to the
    763  // bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
    764  bind(&bailout);
    765  loadPtr(Address(rsp, ResumeFromException::offsetOfBailoutInfo()), r9);
    766  loadPtr(Address(rsp, ResumeFromException::offsetOfStackPointer()), rsp);
    767  move32(Imm32(1), ReturnReg);
    768  jump(bailoutTail);
    769 
    770  // Reset SP and FP; SP is pointing to the unwound return address to the wasm
    771  // interpreter entry, so we can just ret().
    772  bind(&wasmInterpEntry);
    773  loadPtr(Address(rsp, ResumeFromException::offsetOfFramePointer()), rbp);
    774  loadPtr(Address(rsp, ResumeFromException::offsetOfStackPointer()), rsp);
    775  movePtr(ImmPtr((const void*)wasm::InterpFailInstanceReg), InstanceReg);
    776  masm.ret();
    777 
    778  // Found a wasm catch handler, restore state and jump to it.
    779  bind(&wasmCatch);
    780  wasm::GenerateJumpToCatchHandler(asMasm(), rsp, rax, rbx);
    781 }
    782 
    783 void MacroAssemblerX64::profilerEnterFrame(Register framePtr,
    784                                           Register scratch) {
    785  asMasm().loadJSContext(scratch);
    786  loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
    787  storePtr(framePtr,
    788           Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
    789  storePtr(ImmPtr(nullptr),
    790           Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
    791 }
    792 
    793 void MacroAssemblerX64::profilerExitFrame() {
    794  jump(asMasm().runtime()->jitRuntime()->getProfilerExitFrameTail());
    795 }
    796 
    797 Assembler::Condition MacroAssemblerX64::testStringTruthy(
    798    bool truthy, const ValueOperand& value) {
    799  ScratchRegisterScope scratch(asMasm());
    800  unboxString(value, scratch);
    801  cmp32(Operand(scratch, JSString::offsetOfLength()), Imm32(0));
    802  return truthy ? Assembler::NotEqual : Assembler::Equal;
    803 }
    804 
    805 Assembler::Condition MacroAssemblerX64::testBigIntTruthy(
    806    bool truthy, const ValueOperand& value) {
    807  ScratchRegisterScope scratch(asMasm());
    808  unboxBigInt(value, scratch);
    809  cmp32(Operand(scratch, JS::BigInt::offsetOfDigitLength()), Imm32(0));
    810  return truthy ? Assembler::NotEqual : Assembler::Equal;
    811 }
    812 
    813 MacroAssembler& MacroAssemblerX64::asMasm() {
    814  return *static_cast<MacroAssembler*>(this);
    815 }
    816 
    817 const MacroAssembler& MacroAssemblerX64::asMasm() const {
    818  return *static_cast<const MacroAssembler*>(this);
    819 }
    820 
    821 void MacroAssembler::subFromStackPtr(Imm32 imm32) {
    822  if (imm32.value) {
    823    // On windows, we cannot skip very far down the stack without touching the
    824    // memory pages in-between.  This is a corner-case code for situations where
    825    // the Ion frame data for a piece of code is very large.  To handle this
    826    // special case, for frames over 4k in size we allocate memory on the stack
    827    // incrementally, touching it as we go.
    828    //
    829    // When the amount is quite large, which it can be, we emit an actual loop,
    830    // in order to keep the function prologue compact.  Compactness is a
    831    // requirement for eg Wasm's CodeRange data structure, which can encode only
    832    // 8-bit offsets.
    833    uint32_t amountLeft = imm32.value;
    834    uint32_t fullPages = amountLeft / 4096;
    835    if (fullPages <= 8) {
    836      while (amountLeft > 4096) {
    837        subq(Imm32(4096), StackPointer);
    838        store32(Imm32(0), Address(StackPointer, 0));
    839        amountLeft -= 4096;
    840      }
    841      subq(Imm32(amountLeft), StackPointer);
    842    } else {
    843      ScratchRegisterScope scratch(*this);
    844      Label top;
    845      move32(Imm32(fullPages), scratch);
    846      bind(&top);
    847      subq(Imm32(4096), StackPointer);
    848      store32(Imm32(0), Address(StackPointer, 0));
    849      subl(Imm32(1), scratch);
    850      j(Assembler::NonZero, &top);
    851      amountLeft -= fullPages * 4096;
    852      if (amountLeft) {
    853        subq(Imm32(amountLeft), StackPointer);
    854      }
    855    }
    856  }
    857 }
    858 
    859 void MacroAssemblerX64::convertDoubleToPtr(FloatRegister src, Register dest,
    860                                           Label* fail,
    861                                           bool negativeZeroCheck) {
    862  // Check for -0.0
    863  if (negativeZeroCheck) {
    864    branchNegativeZero(src, dest, fail);
    865  }
    866 
    867  ScratchDoubleScope scratch(asMasm());
    868  vcvttsd2sq(src, dest);
    869  asMasm().convertInt64ToDouble(Register64(dest), scratch);
    870  vucomisd(scratch, src);
    871  j(Assembler::Parity, fail);
    872  j(Assembler::NotEqual, fail);
    873 }
    874 
    875 // This operation really consists of five phases, in order to enforce the
    876 // restriction that on x64, the dividend must be rax and both rax and rdx will
    877 // be clobbered.
    878 //
    879 //     Input: { lhs, rhs }
    880 //
    881 //  [PUSH] Preserve registers
    882 //  [MOVE] Generate moves to specific registers
    883 //
    884 //  [DIV] Input: { regForRhs, RAX }
    885 //  [DIV] extend RAX into RDX
    886 //  [DIV] x64 Division operator
    887 //  [DIV] Output: { RAX, RDX }
    888 //
    889 //  [MOVE] Move specific registers to outputs
    890 //  [POP] Restore registers
    891 //
    892 //    Output: { output }
    893 void MacroAssemblerX64::flexibleDivMod64(Register lhs, Register rhs,
    894                                         Register output, bool isUnsigned,
    895                                         bool isDiv) {
    896  if (lhs == rhs) {
    897    movq(ImmWord(isDiv ? 1 : 0), output);
    898    return;
    899  }
    900 
    901  // Choose a register that is neither rdx nor rax to hold the rhs;
    902  // rbx is chosen arbitrarily, and will be preserved if necessary.
    903  Register regForRhs = (rhs == rax || rhs == rdx) ? rbx : rhs;
    904 
    905  // Add registers we will be clobbering as live, but also remove the set we
    906  // do not restore.
    907  LiveGeneralRegisterSet preserve;
    908  preserve.add(rdx);
    909  preserve.add(rax);
    910  if (rhs != regForRhs) {
    911    preserve.add(regForRhs);
    912  }
    913 
    914  preserve.takeUnchecked(output);
    915 
    916  asMasm().PushRegsInMask(preserve);
    917 
    918  // Shuffle input into place.
    919  asMasm().moveRegPair(lhs, rhs, rax, regForRhs);
    920  if (oom()) {
    921    return;
    922  }
    923 
    924  // Sign extend rax into rdx to make (rdx:rax): idiv/udiv are 128-bit.
    925  if (isUnsigned) {
    926    movq(ImmWord(0), rdx);
    927    udivq(regForRhs);
    928  } else {
    929    cqo();
    930    idivq(regForRhs);
    931  }
    932 
    933  Register result = isDiv ? rax : rdx;
    934  if (result != output) {
    935    movq(result, output);
    936  }
    937 
    938  asMasm().PopRegsInMask(preserve);
    939 }
    940 
    941 //{{{ check_macroassembler_style
    942 // ===============================================================
    943 // ABI function calls.
    944 
    945 void MacroAssembler::setupUnalignedABICall(Register scratch) {
    946  setupNativeABICall();
    947  dynamicAlignment_ = true;
    948 
    949  movq(rsp, scratch);
    950  andq(Imm32(~(ABIStackAlignment - 1)), rsp);
    951  push(scratch);
    952 }
    953 
    954 void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
    955  MOZ_ASSERT(inCall_);
    956  uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
    957 
    958  if (dynamicAlignment_) {
    959    // sizeof(intptr_t) accounts for the saved stack pointer pushed by
    960    // setupUnalignedABICall.
    961    stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
    962                                         ABIStackAlignment);
    963  } else {
    964    uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
    965    stackForCall += ComputeByteAlignment(
    966        stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
    967  }
    968 
    969  *stackAdjust = stackForCall;
    970  reserveStack(stackForCall);
    971 
    972  // Position all arguments.
    973  {
    974    enoughMemory_ &= moveResolver_.resolve();
    975    if (!enoughMemory_) {
    976      return;
    977    }
    978 
    979    MoveEmitter emitter(*this);
    980    emitter.emit(moveResolver_);
    981    emitter.finish();
    982  }
    983 
    984  assertStackAlignment(ABIStackAlignment);
    985 }
    986 
    987 void MacroAssembler::callWithABIPost(uint32_t stackAdjust, ABIType result) {
    988  freeStack(stackAdjust);
    989  if (dynamicAlignment_) {
    990    pop(rsp);
    991  }
    992 
    993 #ifdef DEBUG
    994  MOZ_ASSERT(inCall_);
    995  inCall_ = false;
    996 #endif
    997 }
    998 
    999 static bool IsIntArgReg(Register reg) {
   1000  for (uint32_t i = 0; i < NumIntArgRegs; i++) {
   1001    if (IntArgRegs[i] == reg) {
   1002      return true;
   1003    }
   1004  }
   1005 
   1006  return false;
   1007 }
   1008 
   1009 void MacroAssembler::callWithABINoProfiler(Register fun, ABIType result) {
   1010  if (IsIntArgReg(fun)) {
   1011    // Callee register may be clobbered for an argument. Move the callee to
   1012    // r10, a volatile, non-argument register.
   1013    propagateOOM(moveResolver_.addMove(MoveOperand(fun), MoveOperand(r10),
   1014                                       MoveOp::GENERAL));
   1015    fun = r10;
   1016  }
   1017 
   1018  MOZ_ASSERT(!IsIntArgReg(fun));
   1019 
   1020  uint32_t stackAdjust;
   1021  callWithABIPre(&stackAdjust);
   1022  call(fun);
   1023  callWithABIPost(stackAdjust, result);
   1024 }
   1025 
   1026 void MacroAssembler::callWithABINoProfiler(const Address& fun, ABIType result) {
   1027  Address safeFun = fun;
   1028  if (IsIntArgReg(safeFun.base)) {
   1029    // Callee register may be clobbered for an argument. Move the callee to
   1030    // r10, a volatile, non-argument register.
   1031    propagateOOM(moveResolver_.addMove(MoveOperand(fun.base), MoveOperand(r10),
   1032                                       MoveOp::GENERAL));
   1033    safeFun.base = r10;
   1034  }
   1035 
   1036  MOZ_ASSERT(!IsIntArgReg(safeFun.base));
   1037 
   1038  uint32_t stackAdjust;
   1039  callWithABIPre(&stackAdjust);
   1040  call(safeFun);
   1041  callWithABIPost(stackAdjust, result);
   1042 }
   1043 
   1044 // ===============================================================
   1045 // Move instructions
   1046 
   1047 void MacroAssembler::moveValue(const ValueOperand& src,
   1048                               const ValueOperand& dest) {
   1049  if (src == dest) {
   1050    return;
   1051  }
   1052  movq(src.valueReg(), dest.valueReg());
   1053 }
   1054 
   1055 void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
   1056  if (!src.isGCThing()) {
   1057    movePtr(ImmWord(src.asRawBits()), dest.valueReg());
   1058    return;
   1059  }
   1060 
   1061  movWithPatch(ImmWord(src.asRawBits()), dest.valueReg());
   1062  writeDataRelocation(src);
   1063 }
   1064 
   1065 // ===============================================================
   1066 // Arithmetic functions
   1067 
   1068 void MacroAssembler::flexibleQuotientPtr(
   1069    Register lhs, Register rhs, Register dest, bool isUnsigned,
   1070    const LiveRegisterSet& volatileLiveRegs) {
   1071  flexibleDivMod64(lhs, rhs, dest, isUnsigned, /* isDiv= */ true);
   1072 }
   1073 
   1074 void MacroAssembler::flexibleRemainderPtr(
   1075    Register lhs, Register rhs, Register dest, bool isUnsigned,
   1076    const LiveRegisterSet& volatileLiveRegs) {
   1077  flexibleDivMod64(lhs, rhs, dest, isUnsigned, /* isDiv= */ false);
   1078 }
   1079 
   1080 // ===============================================================
   1081 // Branch functions
   1082 
   1083 void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
   1084  if (ptr != buffer) {
   1085    movePtr(ptr, buffer);
   1086  }
   1087  andPtr(Imm32(int32_t(~gc::ChunkMask)), buffer);
   1088  loadPtr(Address(buffer, gc::ChunkStoreBufferOffset), buffer);
   1089 }
   1090 
   1091 void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
   1092                                             Register temp, Label* label) {
   1093  MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
   1094 
   1095  ScratchRegisterScope scratch(*this);
   1096  MOZ_ASSERT(ptr != temp);
   1097  MOZ_ASSERT(ptr != scratch);
   1098 
   1099  movePtr(ptr, scratch);
   1100  andPtr(Imm32(int32_t(~gc::ChunkMask)), scratch);
   1101  branchPtr(InvertCondition(cond), Address(scratch, gc::ChunkStoreBufferOffset),
   1102            ImmWord(0), label);
   1103 }
   1104 
   1105 template <typename T>
   1106 void MacroAssembler::branchValueIsNurseryCellImpl(Condition cond,
   1107                                                  const T& value, Register temp,
   1108                                                  Label* label) {
   1109  MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
   1110  MOZ_ASSERT(temp != InvalidReg);
   1111 
   1112  Label done;
   1113  branchTestGCThing(Assembler::NotEqual, value,
   1114                    cond == Assembler::Equal ? &done : label);
   1115 
   1116  getGCThingValueChunk(value, temp);
   1117  branchPtr(InvertCondition(cond), Address(temp, gc::ChunkStoreBufferOffset),
   1118            ImmWord(0), label);
   1119 
   1120  bind(&done);
   1121 }
   1122 
   1123 void MacroAssembler::branchValueIsNurseryCell(Condition cond,
   1124                                              const Address& address,
   1125                                              Register temp, Label* label) {
   1126  branchValueIsNurseryCellImpl(cond, address, temp, label);
   1127 }
   1128 
   1129 void MacroAssembler::branchValueIsNurseryCell(Condition cond,
   1130                                              ValueOperand value, Register temp,
   1131                                              Label* label) {
   1132  branchValueIsNurseryCellImpl(cond, value, temp, label);
   1133 }
   1134 
   1135 void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
   1136                                     const Value& rhs, Label* label) {
   1137  MOZ_ASSERT(cond == Equal || cond == NotEqual);
   1138  MOZ_ASSERT(!rhs.isNaN());
   1139 
   1140  if (!rhs.isGCThing()) {
   1141    cmpPtr(lhs.valueReg(), ImmWord(rhs.asRawBits()));
   1142  } else {
   1143    ScratchRegisterScope scratch(*this);
   1144    MOZ_ASSERT(lhs.valueReg() != scratch);
   1145    moveValue(rhs, ValueOperand(scratch));
   1146    cmpPtr(lhs.valueReg(), scratch);
   1147  }
   1148  j(cond, label);
   1149 }
   1150 
   1151 void MacroAssembler::branchTestNaNValue(Condition cond, const ValueOperand& val,
   1152                                        Register temp, Label* label) {
   1153  MOZ_ASSERT(cond == Equal || cond == NotEqual);
   1154  ScratchRegisterScope scratch(*this);
   1155 
   1156  // When testing for NaN, we want to ignore the sign bit.
   1157  movq(ImmWord(~mozilla::FloatingPoint<double>::kSignBit), scratch);
   1158  andq(val.valueReg(), scratch);
   1159 
   1160  // Compare against a NaN with sign bit 0.
   1161  static_assert(JS::detail::CanonicalizedNaNSignBit == 0);
   1162  moveValue(DoubleValue(JS::GenericNaN()), ValueOperand(temp));
   1163  cmpPtr(scratch, temp);
   1164  j(cond, label);
   1165 }
   1166 
   1167 // ========================================================================
   1168 // Memory access primitives.
   1169 template <typename T>
   1170 void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
   1171                                       MIRType valueType, const T& dest) {
   1172  MOZ_ASSERT(valueType < MIRType::Value);
   1173 
   1174  if (valueType == MIRType::Double) {
   1175    boxDouble(value.reg().typedReg().fpu(), dest);
   1176    return;
   1177  }
   1178 
   1179  if (value.constant()) {
   1180    storeValue(value.value(), dest);
   1181  } else {
   1182    storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(),
   1183               dest);
   1184  }
   1185 }
   1186 
   1187 template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
   1188                                                MIRType valueType,
   1189                                                const Address& dest);
   1190 template void MacroAssembler::storeUnboxedValue(
   1191    const ConstantOrRegister& value, MIRType valueType,
   1192    const BaseObjectElementIndex& dest);
   1193 
   1194 void MacroAssembler::PushBoxed(FloatRegister reg) {
   1195  subq(Imm32(sizeof(double)), StackPointer);
   1196  boxDouble(reg, Address(StackPointer, 0));
   1197  adjustFrame(sizeof(double));
   1198 }
   1199 
   1200 // ========================================================================
   1201 // wasm support
   1202 
   1203 void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
   1204                              Operand srcAddr, AnyRegister out) {
   1205  // NOTE: the generated code must match the assembly code in gen_load in
   1206  // GenerateAtomicOperations.py
   1207  memoryBarrierBefore(access.sync());
   1208 
   1209  MOZ_ASSERT_IF(
   1210      access.isZeroExtendSimd128Load(),
   1211      access.type() == Scalar::Float32 || access.type() == Scalar::Float64);
   1212  MOZ_ASSERT_IF(
   1213      access.isSplatSimd128Load(),
   1214      access.type() == Scalar::Uint8 || access.type() == Scalar::Uint16 ||
   1215          access.type() == Scalar::Float32 || access.type() == Scalar::Float64);
   1216  MOZ_ASSERT_IF(access.isWidenSimd128Load(), access.type() == Scalar::Float64);
   1217 
   1218  switch (access.type()) {
   1219    case Scalar::Int8:
   1220      append(access, wasm::TrapMachineInsn::Load8,
   1221             FaultingCodeOffset(currentOffset()));
   1222      movsbl(srcAddr, out.gpr());
   1223      break;
   1224    case Scalar::Uint8:
   1225      append(access, wasm::TrapMachineInsn::Load8,
   1226             FaultingCodeOffset(currentOffset()));
   1227      if (access.isSplatSimd128Load()) {
   1228        vbroadcastb(srcAddr, out.fpu());
   1229      } else {
   1230        movzbl(srcAddr, out.gpr());
   1231      }
   1232      break;
   1233    case Scalar::Int16:
   1234      append(access, wasm::TrapMachineInsn::Load16,
   1235             FaultingCodeOffset(currentOffset()));
   1236      movswl(srcAddr, out.gpr());
   1237      break;
   1238    case Scalar::Uint16:
   1239      append(access, wasm::TrapMachineInsn::Load16,
   1240             FaultingCodeOffset(currentOffset()));
   1241      if (access.isSplatSimd128Load()) {
   1242        vbroadcastw(srcAddr, out.fpu());
   1243      } else {
   1244        movzwl(srcAddr, out.gpr());
   1245      }
   1246      break;
   1247    case Scalar::Int32:
   1248    case Scalar::Uint32:
   1249      append(access, wasm::TrapMachineInsn::Load32,
   1250             FaultingCodeOffset(currentOffset()));
   1251      movl(srcAddr, out.gpr());
   1252      break;
   1253    case Scalar::Float32:
   1254      append(access, wasm::TrapMachineInsn::Load32,
   1255             FaultingCodeOffset(currentOffset()));
   1256      if (access.isSplatSimd128Load()) {
   1257        vbroadcastss(srcAddr, out.fpu());
   1258      } else {
   1259        // vmovss does the right thing also for access.isZeroExtendSimd128Load()
   1260        vmovss(srcAddr, out.fpu());
   1261      }
   1262      break;
   1263    case Scalar::Float64:
   1264      append(access, wasm::TrapMachineInsn::Load64,
   1265             FaultingCodeOffset(currentOffset()));
   1266      if (access.isSplatSimd128Load()) {
   1267        vmovddup(srcAddr, out.fpu());
   1268      } else if (access.isWidenSimd128Load()) {
   1269        switch (access.widenSimdOp()) {
   1270          case wasm::SimdOp::V128Load8x8S:
   1271            vpmovsxbw(srcAddr, out.fpu());
   1272            break;
   1273          case wasm::SimdOp::V128Load8x8U:
   1274            vpmovzxbw(srcAddr, out.fpu());
   1275            break;
   1276          case wasm::SimdOp::V128Load16x4S:
   1277            vpmovsxwd(srcAddr, out.fpu());
   1278            break;
   1279          case wasm::SimdOp::V128Load16x4U:
   1280            vpmovzxwd(srcAddr, out.fpu());
   1281            break;
   1282          case wasm::SimdOp::V128Load32x2S:
   1283            vpmovsxdq(srcAddr, out.fpu());
   1284            break;
   1285          case wasm::SimdOp::V128Load32x2U:
   1286            vpmovzxdq(srcAddr, out.fpu());
   1287            break;
   1288          default:
   1289            MOZ_CRASH("Unexpected widening op for wasmLoad");
   1290        }
   1291      } else {
   1292        // vmovsd does the right thing also for access.isZeroExtendSimd128Load()
   1293        vmovsd(srcAddr, out.fpu());
   1294      }
   1295      break;
   1296    case Scalar::Simd128: {
   1297      FaultingCodeOffset fco =
   1298          MacroAssemblerX64::loadUnalignedSimd128(srcAddr, out.fpu());
   1299      append(access, wasm::TrapMachineInsn::Load128, fco);
   1300      break;
   1301    }
   1302    case Scalar::Int64:
   1303      MOZ_CRASH("int64 loads must use load64");
   1304    case Scalar::Float16:
   1305    case Scalar::BigInt64:
   1306    case Scalar::BigUint64:
   1307    case Scalar::Uint8Clamped:
   1308    case Scalar::MaxTypedArrayViewType:
   1309      MOZ_CRASH("unexpected scalar type for wasmLoad");
   1310  }
   1311 
   1312  memoryBarrierAfter(access.sync());
   1313 }
   1314 
   1315 void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
   1316                                 Operand srcAddr, Register64 out) {
   1317  // NOTE: the generated code must match the assembly code in gen_load in
   1318  // GenerateAtomicOperations.py
   1319  memoryBarrierBefore(access.sync());
   1320 
   1321  switch (access.type()) {
   1322    case Scalar::Int8:
   1323      append(access, wasm::TrapMachineInsn::Load8,
   1324             FaultingCodeOffset(currentOffset()));
   1325      movsbq(srcAddr, out.reg);
   1326      break;
   1327    case Scalar::Uint8:
   1328      append(access, wasm::TrapMachineInsn::Load8,
   1329             FaultingCodeOffset(currentOffset()));
   1330      movzbq(srcAddr, out.reg);
   1331      break;
   1332    case Scalar::Int16:
   1333      append(access, wasm::TrapMachineInsn::Load16,
   1334             FaultingCodeOffset(currentOffset()));
   1335      movswq(srcAddr, out.reg);
   1336      break;
   1337    case Scalar::Uint16:
   1338      append(access, wasm::TrapMachineInsn::Load16,
   1339             FaultingCodeOffset(currentOffset()));
   1340      movzwq(srcAddr, out.reg);
   1341      break;
   1342    case Scalar::Int32:
   1343      append(access, wasm::TrapMachineInsn::Load32,
   1344             FaultingCodeOffset(currentOffset()));
   1345      movslq(srcAddr, out.reg);
   1346      break;
   1347    // Int32 to int64 moves zero-extend by default.
   1348    case Scalar::Uint32:
   1349      append(access, wasm::TrapMachineInsn::Load32,
   1350             FaultingCodeOffset(currentOffset()));
   1351      movl(srcAddr, out.reg);
   1352      break;
   1353    case Scalar::Int64:
   1354      append(access, wasm::TrapMachineInsn::Load64,
   1355             FaultingCodeOffset(currentOffset()));
   1356      movq(srcAddr, out.reg);
   1357      break;
   1358    case Scalar::Float16:
   1359    case Scalar::Float32:
   1360    case Scalar::Float64:
   1361    case Scalar::Simd128:
   1362      MOZ_CRASH("float loads must use wasmLoad");
   1363    case Scalar::Uint8Clamped:
   1364    case Scalar::BigInt64:
   1365    case Scalar::BigUint64:
   1366    case Scalar::MaxTypedArrayViewType:
   1367      MOZ_CRASH("unexpected scalar type for wasmLoadI64");
   1368  }
   1369 
   1370  memoryBarrierAfter(access.sync());
   1371 }
   1372 
   1373 void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
   1374                               AnyRegister value, Operand dstAddr) {
   1375  // NOTE: the generated code must match the assembly code in gen_store in
   1376  // GenerateAtomicOperations.py
   1377  memoryBarrierBefore(access.sync());
   1378 
   1379  switch (access.type()) {
   1380    case Scalar::Int8:
   1381    case Scalar::Uint8:
   1382      append(access, wasm::TrapMachineInsn::Store8,
   1383             FaultingCodeOffset(currentOffset()));
   1384      movb(value.gpr(), dstAddr);
   1385      break;
   1386    case Scalar::Int16:
   1387    case Scalar::Uint16:
   1388      append(access, wasm::TrapMachineInsn::Store16,
   1389             FaultingCodeOffset(currentOffset()));
   1390      movw(value.gpr(), dstAddr);
   1391      break;
   1392    case Scalar::Int32:
   1393    case Scalar::Uint32:
   1394      append(access, wasm::TrapMachineInsn::Store32,
   1395             FaultingCodeOffset(currentOffset()));
   1396      movl(value.gpr(), dstAddr);
   1397      break;
   1398    case Scalar::Int64:
   1399      append(access, wasm::TrapMachineInsn::Store64,
   1400             FaultingCodeOffset(currentOffset()));
   1401      movq(value.gpr(), dstAddr);
   1402      break;
   1403    case Scalar::Float32: {
   1404      FaultingCodeOffset fco = storeFloat32(value.fpu(), dstAddr);
   1405      append(access, wasm::TrapMachineInsn::Store32, fco);
   1406      break;
   1407    }
   1408    case Scalar::Float64: {
   1409      FaultingCodeOffset fco = storeDouble(value.fpu(), dstAddr);
   1410      append(access, wasm::TrapMachineInsn::Store64, fco);
   1411      break;
   1412    }
   1413    case Scalar::Simd128: {
   1414      FaultingCodeOffset fco =
   1415          MacroAssemblerX64::storeUnalignedSimd128(value.fpu(), dstAddr);
   1416      append(access, wasm::TrapMachineInsn::Store128, fco);
   1417      break;
   1418    }
   1419    case Scalar::Uint8Clamped:
   1420    case Scalar::BigInt64:
   1421    case Scalar::BigUint64:
   1422    case Scalar::Float16:
   1423    case Scalar::MaxTypedArrayViewType:
   1424      MOZ_CRASH("unexpected array type");
   1425  }
   1426 
   1427  memoryBarrierAfter(access.sync());
   1428 }
   1429 
   1430 void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
   1431                                                Register output,
   1432                                                bool isSaturating,
   1433                                                Label* oolEntry) {
   1434  vcvttsd2sq(input, output);
   1435 
   1436  // Check that the result is in the uint32_t range.
   1437  ScratchRegisterScope scratch(*this);
   1438  move32(Imm32(0xffffffff), scratch);
   1439  cmpq(scratch, output);
   1440  j(Assembler::Above, oolEntry);
   1441 }
   1442 
   1443 void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
   1444                                                 Register output,
   1445                                                 bool isSaturating,
   1446                                                 Label* oolEntry) {
   1447  vcvttss2sq(input, output);
   1448 
   1449  // Check that the result is in the uint32_t range.
   1450  ScratchRegisterScope scratch(*this);
   1451  move32(Imm32(0xffffffff), scratch);
   1452  cmpq(scratch, output);
   1453  j(Assembler::Above, oolEntry);
   1454 }
   1455 
   1456 void MacroAssembler::wasmTruncateDoubleToInt64(
   1457    FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
   1458    Label* oolRejoin, FloatRegister tempReg) {
   1459  vcvttsd2sq(input, output.reg);
   1460  cmpq(Imm32(1), output.reg);
   1461  j(Assembler::Overflow, oolEntry);
   1462  bind(oolRejoin);
   1463 }
   1464 
   1465 void MacroAssembler::wasmTruncateFloat32ToInt64(
   1466    FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
   1467    Label* oolRejoin, FloatRegister tempReg) {
   1468  vcvttss2sq(input, output.reg);
   1469  cmpq(Imm32(1), output.reg);
   1470  j(Assembler::Overflow, oolEntry);
   1471  bind(oolRejoin);
   1472 }
   1473 
   1474 void MacroAssembler::wasmTruncateDoubleToUInt64(
   1475    FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
   1476    Label* oolRejoin, FloatRegister tempReg) {
   1477  // If the input < INT64_MAX, vcvttsd2sq will do the right thing, so
   1478  // we use it directly. Else, we subtract INT64_MAX, convert to int64,
   1479  // and then add INT64_MAX to the result.
   1480 
   1481  Label isLarge;
   1482 
   1483  ScratchDoubleScope scratch(*this);
   1484  loadConstantDouble(double(0x8000000000000000), scratch);
   1485  branchDouble(Assembler::DoubleGreaterThanOrEqual, input, scratch, &isLarge);
   1486  vcvttsd2sq(input, output.reg);
   1487  testq(output.reg, output.reg);
   1488  j(Assembler::Signed, oolEntry);
   1489  jump(oolRejoin);
   1490 
   1491  bind(&isLarge);
   1492 
   1493  moveDouble(input, tempReg);
   1494  vsubsd(scratch, tempReg, tempReg);
   1495  vcvttsd2sq(tempReg, output.reg);
   1496  testq(output.reg, output.reg);
   1497  j(Assembler::Signed, oolEntry);
   1498  or64(Imm64(0x8000000000000000), output);
   1499 
   1500  bind(oolRejoin);
   1501 }
   1502 
   1503 void MacroAssembler::wasmTruncateFloat32ToUInt64(
   1504    FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
   1505    Label* oolRejoin, FloatRegister tempReg) {
   1506  // If the input < INT64_MAX, vcvttss2sq will do the right thing, so
   1507  // we use it directly. Else, we subtract INT64_MAX, convert to int64,
   1508  // and then add INT64_MAX to the result.
   1509 
   1510  Label isLarge;
   1511 
   1512  ScratchFloat32Scope scratch(*this);
   1513  loadConstantFloat32(float(0x8000000000000000), scratch);
   1514  branchFloat(Assembler::DoubleGreaterThanOrEqual, input, scratch, &isLarge);
   1515  vcvttss2sq(input, output.reg);
   1516  testq(output.reg, output.reg);
   1517  j(Assembler::Signed, oolEntry);
   1518  jump(oolRejoin);
   1519 
   1520  bind(&isLarge);
   1521 
   1522  moveFloat32(input, tempReg);
   1523  vsubss(scratch, tempReg, tempReg);
   1524  vcvttss2sq(tempReg, output.reg);
   1525  testq(output.reg, output.reg);
   1526  j(Assembler::Signed, oolEntry);
   1527  or64(Imm64(0x8000000000000000), output);
   1528 
   1529  bind(oolRejoin);
   1530 }
   1531 
   1532 void MacroAssembler::widenInt32(Register r) {
   1533  move32To64ZeroExtend(r, Register64(r));
   1534 }
   1535 
   1536 // ========================================================================
   1537 // Convert floating point.
   1538 
   1539 void MacroAssembler::convertInt64ToDouble(Register64 input,
   1540                                          FloatRegister output) {
   1541  // Zero the output register to break dependencies, see convertInt32ToDouble.
   1542  zeroDouble(output);
   1543 
   1544  vcvtsq2sd(input.reg, output, output);
   1545 }
   1546 
   1547 void MacroAssembler::convertInt64ToFloat32(Register64 input,
   1548                                           FloatRegister output) {
   1549  // Zero the output register to break dependencies, see convertInt32ToDouble.
   1550  zeroFloat32(output);
   1551 
   1552  vcvtsq2ss(input.reg, output, output);
   1553 }
   1554 
   1555 bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return true; }
   1556 
   1557 void MacroAssembler::convertUInt64ToDouble(Register64 input,
   1558                                           FloatRegister output,
   1559                                           Register temp) {
   1560  // Zero the output register to break dependencies, see convertInt32ToDouble.
   1561  zeroDouble(output);
   1562 
   1563  // If the input's sign bit is not set we use vcvtsq2sd directly.
   1564  // Else, we divide by 2 and keep the LSB, convert to double, and multiply
   1565  // the result by 2.
   1566  Label done;
   1567  Label isSigned;
   1568 
   1569  testq(input.reg, input.reg);
   1570  j(Assembler::Signed, &isSigned);
   1571  vcvtsq2sd(input.reg, output, output);
   1572  jump(&done);
   1573 
   1574  bind(&isSigned);
   1575 
   1576  ScratchRegisterScope scratch(*this);
   1577  mov(input.reg, scratch);
   1578  mov(input.reg, temp);
   1579  shrq(Imm32(1), scratch);
   1580  andl(Imm32(1), temp);
   1581  orq(temp, scratch);
   1582 
   1583  vcvtsq2sd(scratch, output, output);
   1584  vaddsd(output, output, output);
   1585 
   1586  bind(&done);
   1587 }
   1588 
   1589 void MacroAssembler::convertUInt64ToFloat32(Register64 input,
   1590                                            FloatRegister output,
   1591                                            Register temp) {
   1592  // Zero the output register to break dependencies, see convertInt32ToDouble.
   1593  zeroFloat32(output);
   1594 
   1595  // See comment in convertUInt64ToDouble.
   1596  Label done;
   1597  Label isSigned;
   1598 
   1599  testq(input.reg, input.reg);
   1600  j(Assembler::Signed, &isSigned);
   1601  vcvtsq2ss(input.reg, output, output);
   1602  jump(&done);
   1603 
   1604  bind(&isSigned);
   1605 
   1606  ScratchRegisterScope scratch(*this);
   1607  mov(input.reg, scratch);
   1608  mov(input.reg, temp);
   1609  shrq(Imm32(1), scratch);
   1610  andl(Imm32(1), temp);
   1611  orq(temp, scratch);
   1612 
   1613  vcvtsq2ss(scratch, output, output);
   1614  vaddss(output, output, output);
   1615 
   1616  bind(&done);
   1617 }
   1618 
   1619 void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
   1620  convertInt64ToDouble(Register64(src), dest);
   1621 }
   1622 
   1623 // ========================================================================
   1624 // Primitive atomic operations.
   1625 
   1626 void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
   1627                                           const Address& mem,
   1628                                           Register64 expected,
   1629                                           Register64 replacement,
   1630                                           Register64 output) {
   1631  MOZ_ASSERT(output.reg == rax);
   1632  if (expected != output) {
   1633    movq(expected.reg, output.reg);
   1634  }
   1635  append(access, wasm::TrapMachineInsn::Atomic,
   1636         FaultingCodeOffset(currentOffset()));
   1637  lock_cmpxchgq(replacement.reg, Operand(mem));
   1638 }
   1639 
   1640 void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
   1641                                           const BaseIndex& mem,
   1642                                           Register64 expected,
   1643                                           Register64 replacement,
   1644                                           Register64 output) {
   1645  MOZ_ASSERT(output.reg == rax);
   1646  if (expected != output) {
   1647    movq(expected.reg, output.reg);
   1648  }
   1649  append(access, wasm::TrapMachineInsn::Atomic,
   1650         FaultingCodeOffset(currentOffset()));
   1651  lock_cmpxchgq(replacement.reg, Operand(mem));
   1652 }
   1653 
   1654 void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
   1655                                          const Address& mem, Register64 value,
   1656                                          Register64 output) {
   1657  if (value != output) {
   1658    movq(value.reg, output.reg);
   1659  }
   1660  append(access, wasm::TrapMachineInsn::Atomic,
   1661         FaultingCodeOffset(masm.currentOffset()));
   1662  xchgq(output.reg, Operand(mem));
   1663 }
   1664 
   1665 void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
   1666                                          const BaseIndex& mem,
   1667                                          Register64 value, Register64 output) {
   1668  if (value != output) {
   1669    movq(value.reg, output.reg);
   1670  }
   1671  append(access, wasm::TrapMachineInsn::Atomic,
   1672         FaultingCodeOffset(masm.currentOffset()));
   1673  xchgq(output.reg, Operand(mem));
   1674 }
   1675 
   1676 template <typename T>
   1677 static void AtomicFetchOp64(MacroAssembler& masm,
   1678                            const wasm::MemoryAccessDesc* access, AtomicOp op,
   1679                            Register value, const T& mem, Register temp,
   1680                            Register output) {
   1681  // NOTE: the generated code must match the assembly code in gen_fetchop in
   1682  // GenerateAtomicOperations.py
   1683  if (op == AtomicOp::Add) {
   1684    if (value != output) {
   1685      masm.movq(value, output);
   1686    }
   1687    if (access) {
   1688      masm.append(*access, wasm::TrapMachineInsn::Atomic,
   1689                  FaultingCodeOffset(masm.currentOffset()));
   1690    }
   1691    masm.lock_xaddq(output, Operand(mem));
   1692  } else if (op == AtomicOp::Sub) {
   1693    if (value != output) {
   1694      masm.movq(value, output);
   1695    }
   1696    masm.negq(output);
   1697    if (access) {
   1698      masm.append(*access, wasm::TrapMachineInsn::Atomic,
   1699                  FaultingCodeOffset(masm.currentOffset()));
   1700    }
   1701    masm.lock_xaddq(output, Operand(mem));
   1702  } else {
   1703    Label again;
   1704    MOZ_ASSERT(output == rax);
   1705    MOZ_ASSERT(value != output);
   1706    MOZ_ASSERT(value != temp);
   1707    MOZ_ASSERT(temp != output);
   1708    if (access) {
   1709      masm.append(*access, wasm::TrapMachineInsn::Load64,
   1710                  FaultingCodeOffset(masm.currentOffset()));
   1711    }
   1712    masm.movq(Operand(mem), rax);
   1713    masm.bind(&again);
   1714    masm.movq(rax, temp);
   1715    switch (op) {
   1716      case AtomicOp::And:
   1717        masm.andq(value, temp);
   1718        break;
   1719      case AtomicOp::Or:
   1720        masm.orq(value, temp);
   1721        break;
   1722      case AtomicOp::Xor:
   1723        masm.xorq(value, temp);
   1724        break;
   1725      default:
   1726        MOZ_CRASH();
   1727    }
   1728    masm.lock_cmpxchgq(temp, Operand(mem));
   1729    masm.j(MacroAssembler::NonZero, &again);
   1730  }
   1731 }
   1732 
   1733 void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
   1734                                         AtomicOp op, Register64 value,
   1735                                         const Address& mem, Register64 temp,
   1736                                         Register64 output) {
   1737  AtomicFetchOp64(*this, &access, op, value.reg, mem, temp.reg, output.reg);
   1738 }
   1739 
   1740 void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
   1741                                         AtomicOp op, Register64 value,
   1742                                         const BaseIndex& mem, Register64 temp,
   1743                                         Register64 output) {
   1744  AtomicFetchOp64(*this, &access, op, value.reg, mem, temp.reg, output.reg);
   1745 }
   1746 
   1747 template <typename T>
   1748 static void AtomicEffectOp64(MacroAssembler& masm,
   1749                             const wasm::MemoryAccessDesc* access, AtomicOp op,
   1750                             Register value, const T& mem) {
   1751  if (access) {
   1752    masm.append(*access, wasm::TrapMachineInsn::Atomic,
   1753                FaultingCodeOffset(masm.currentOffset()));
   1754  }
   1755  switch (op) {
   1756    case AtomicOp::Add:
   1757      masm.lock_addq(value, Operand(mem));
   1758      break;
   1759    case AtomicOp::Sub:
   1760      masm.lock_subq(value, Operand(mem));
   1761      break;
   1762    case AtomicOp::And:
   1763      masm.lock_andq(value, Operand(mem));
   1764      break;
   1765    case AtomicOp::Or:
   1766      masm.lock_orq(value, Operand(mem));
   1767      break;
   1768    case AtomicOp::Xor:
   1769      masm.lock_xorq(value, Operand(mem));
   1770      break;
   1771    default:
   1772      MOZ_CRASH();
   1773  }
   1774 }
   1775 
   1776 void MacroAssembler::wasmAtomicEffectOp64(const wasm::MemoryAccessDesc& access,
   1777                                          AtomicOp op, Register64 value,
   1778                                          const BaseIndex& mem) {
   1779  AtomicEffectOp64(*this, &access, op, value.reg, mem);
   1780 }
   1781 
   1782 void MacroAssembler::compareExchange64(Synchronization, const Address& mem,
   1783                                       Register64 expected,
   1784                                       Register64 replacement,
   1785                                       Register64 output) {
   1786  // NOTE: the generated code must match the assembly code in gen_cmpxchg in
   1787  // GenerateAtomicOperations.py
   1788  MOZ_ASSERT(output.reg == rax);
   1789  if (expected != output) {
   1790    movq(expected.reg, output.reg);
   1791  }
   1792  lock_cmpxchgq(replacement.reg, Operand(mem));
   1793 }
   1794 
   1795 void MacroAssembler::compareExchange64(Synchronization, const BaseIndex& mem,
   1796                                       Register64 expected,
   1797                                       Register64 replacement,
   1798                                       Register64 output) {
   1799  MOZ_ASSERT(output.reg == rax);
   1800  if (expected != output) {
   1801    movq(expected.reg, output.reg);
   1802  }
   1803  lock_cmpxchgq(replacement.reg, Operand(mem));
   1804 }
   1805 
   1806 void MacroAssembler::atomicExchange64(Synchronization, const Address& mem,
   1807                                      Register64 value, Register64 output) {
   1808  // NOTE: the generated code must match the assembly code in gen_exchange in
   1809  // GenerateAtomicOperations.py
   1810  if (value != output) {
   1811    movq(value.reg, output.reg);
   1812  }
   1813  xchgq(output.reg, Operand(mem));
   1814 }
   1815 
   1816 void MacroAssembler::atomicExchange64(Synchronization, const BaseIndex& mem,
   1817                                      Register64 value, Register64 output) {
   1818  if (value != output) {
   1819    movq(value.reg, output.reg);
   1820  }
   1821  xchgq(output.reg, Operand(mem));
   1822 }
   1823 
   1824 void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
   1825                                     Register64 value, const Address& mem,
   1826                                     Register64 temp, Register64 output) {
   1827  AtomicFetchOp64(*this, nullptr, op, value.reg, mem, temp.reg, output.reg);
   1828 }
   1829 
   1830 void MacroAssembler::atomicFetchOp64(Synchronization sync, AtomicOp op,
   1831                                     Register64 value, const BaseIndex& mem,
   1832                                     Register64 temp, Register64 output) {
   1833  AtomicFetchOp64(*this, nullptr, op, value.reg, mem, temp.reg, output.reg);
   1834 }
   1835 
   1836 void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
   1837                                      Register64 value, const Address& mem) {
   1838  AtomicEffectOp64(*this, nullptr, op, value.reg, mem);
   1839 }
   1840 
   1841 void MacroAssembler::atomicEffectOp64(Synchronization sync, AtomicOp op,
   1842                                      Register64 value, const BaseIndex& mem) {
   1843  AtomicEffectOp64(*this, nullptr, op, value.reg, mem);
   1844 }
   1845 
   1846 CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
   1847  return leaRipRelative(dest);
   1848 }
   1849 
   1850 void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
   1851                                          CodeLocationLabel target) {
   1852  ptrdiff_t off = target - loc;
   1853  MOZ_ASSERT(off > ptrdiff_t(INT32_MIN));
   1854  MOZ_ASSERT(off < ptrdiff_t(INT32_MAX));
   1855  PatchWrite_Imm32(loc, Imm32(off));
   1856 }
   1857 
   1858 void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
   1859                                       Register64 boundsCheckLimit,
   1860                                       Label* label) {
   1861  cmpPtr(index.reg, boundsCheckLimit.reg);
   1862  j(cond, label);
   1863  if (JitOptions.spectreIndexMasking) {
   1864    cmovCCq(cond, Operand(boundsCheckLimit.reg), index.reg);
   1865  }
   1866 }
   1867 
   1868 void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
   1869                                       Address boundsCheckLimit, Label* label) {
   1870  cmpPtr(index.reg, Operand(boundsCheckLimit));
   1871  j(cond, label);
   1872  if (JitOptions.spectreIndexMasking) {
   1873    cmovCCq(cond, Operand(boundsCheckLimit), index.reg);
   1874  }
   1875 }
   1876 
   1877 void MacroAssembler::wasmMarkCallAsSlow() {
   1878  static_assert(InstanceReg == r14);
   1879  orPtr(Imm32(0), r14);
   1880 }
   1881 
   1882 const int32_t SlowCallMarker = 0x00ce8349;  // OR r14, 0
   1883 
   1884 void MacroAssembler::wasmCheckSlowCallsite(Register ra, Label* notSlow,
   1885                                           Register temp1, Register temp2) {
   1886  // Check if RA has slow marker.
   1887  cmp32(Address(ra, 0), Imm32(SlowCallMarker));
   1888  j(Assembler::NotEqual, notSlow);
   1889 }
   1890 
   1891 //}}} check_macroassembler_style
   1892 
   1893 // ========================================================================
   1894 // Integer compare-then-conditionally-load/move operations.
   1895 
   1896 // cmpMove, Cond-Reg-Reg-Reg-Reg cases
   1897 
   1898 template <size_t CmpSize, size_t MoveSize>
   1899 void MacroAssemblerX64::cmpMove(Condition cond, Register lhs, Register rhs,
   1900                                Register falseVal, Register trueValAndDest) {
   1901  if constexpr (CmpSize == 32) {
   1902    cmp32(lhs, rhs);
   1903  } else {
   1904    static_assert(CmpSize == 64);
   1905    cmpPtr(lhs, rhs);
   1906  }
   1907  if constexpr (MoveSize == 32) {
   1908    cmovCCl(cond, Operand(falseVal), trueValAndDest);
   1909  } else {
   1910    static_assert(MoveSize == 64);
   1911    cmovCCq(cond, Operand(falseVal), trueValAndDest);
   1912  }
   1913 }
   1914 template void MacroAssemblerX64::cmpMove<32, 32>(Condition cond, Register lhs,
   1915                                                 Register rhs,
   1916                                                 Register falseVal,
   1917                                                 Register trueValAndDest);
   1918 template void MacroAssemblerX64::cmpMove<32, 64>(Condition cond, Register lhs,
   1919                                                 Register rhs,
   1920                                                 Register falseVal,
   1921                                                 Register trueValAndDest);
   1922 template void MacroAssemblerX64::cmpMove<64, 32>(Condition cond, Register lhs,
   1923                                                 Register rhs,
   1924                                                 Register falseVal,
   1925                                                 Register trueValAndDest);
   1926 template void MacroAssemblerX64::cmpMove<64, 64>(Condition cond, Register lhs,
   1927                                                 Register rhs,
   1928                                                 Register falseVal,
   1929                                                 Register trueValAndDest);
   1930 
   1931 // cmpMove, Cond-Reg-Addr-Reg-Reg cases
   1932 
   1933 template <size_t CmpSize, size_t MoveSize>
   1934 void MacroAssemblerX64::cmpMove(Condition cond, Register lhs,
   1935                                const Address& rhs, Register falseVal,
   1936                                Register trueValAndDest) {
   1937  if constexpr (CmpSize == 32) {
   1938    cmp32(lhs, Operand(rhs));
   1939  } else {
   1940    static_assert(CmpSize == 64);
   1941    cmpPtr(lhs, Operand(rhs));
   1942  }
   1943  if constexpr (MoveSize == 32) {
   1944    cmovCCl(cond, Operand(falseVal), trueValAndDest);
   1945  } else {
   1946    static_assert(MoveSize == 64);
   1947    cmovCCq(cond, Operand(falseVal), trueValAndDest);
   1948  }
   1949 }
   1950 template void MacroAssemblerX64::cmpMove<32, 32>(Condition cond, Register lhs,
   1951                                                 const Address& rhs,
   1952                                                 Register falseVal,
   1953                                                 Register trueValAndDest);
   1954 template void MacroAssemblerX64::cmpMove<32, 64>(Condition cond, Register lhs,
   1955                                                 const Address& rhs,
   1956                                                 Register falseVal,
   1957                                                 Register trueValAndDest);
   1958 template void MacroAssemblerX64::cmpMove<64, 32>(Condition cond, Register lhs,
   1959                                                 const Address& rhs,
   1960                                                 Register falseVal,
   1961                                                 Register trueValAndDest);
   1962 template void MacroAssemblerX64::cmpMove<64, 64>(Condition cond, Register lhs,
   1963                                                 const Address& rhs,
   1964                                                 Register falseVal,
   1965                                                 Register trueValAndDest);
   1966 
   1967 // cmpLoad, Cond-Reg-Reg-Addr-Reg cases
   1968 
   1969 template <size_t CmpSize, size_t LoadSize>
   1970 void MacroAssemblerX64::cmpLoad(Condition cond, Register lhs, Register rhs,
   1971                                const Address& falseVal,
   1972                                Register trueValAndDest) {
   1973  if constexpr (CmpSize == 32) {
   1974    cmp32(lhs, rhs);
   1975  } else {
   1976    static_assert(CmpSize == 64);
   1977    cmpPtr(lhs, rhs);
   1978  }
   1979  if constexpr (LoadSize == 32) {
   1980    cmovCCl(cond, Operand(falseVal), trueValAndDest);
   1981  } else {
   1982    static_assert(LoadSize == 64);
   1983    cmovCCq(cond, Operand(falseVal), trueValAndDest);
   1984  }
   1985 }
   1986 template void MacroAssemblerX64::cmpLoad<32, 32>(Condition cond, Register lhs,
   1987                                                 Register rhs,
   1988                                                 const Address& falseVal,
   1989                                                 Register trueValAndDest);
   1990 template void MacroAssemblerX64::cmpLoad<32, 64>(Condition cond, Register lhs,
   1991                                                 Register rhs,
   1992                                                 const Address& falseVal,
   1993                                                 Register trueValAndDest);
   1994 template void MacroAssemblerX64::cmpLoad<64, 32>(Condition cond, Register lhs,
   1995                                                 Register rhs,
   1996                                                 const Address& falseVal,
   1997                                                 Register trueValAndDest);
   1998 template void MacroAssemblerX64::cmpLoad<64, 64>(Condition cond, Register lhs,
   1999                                                 Register rhs,
   2000                                                 const Address& falseVal,
   2001                                                 Register trueValAndDest);
   2002 
   2003 // cmpLoad, Cond-Reg-Addr-Addr-Reg cases
   2004 
   2005 template <size_t CmpSize, size_t LoadSize>
   2006 void MacroAssemblerX64::cmpLoad(Condition cond, Register lhs,
   2007                                const Address& rhs, const Address& falseVal,
   2008                                Register trueValAndDest) {
   2009  if constexpr (CmpSize == 32) {
   2010    cmp32(lhs, Operand(rhs));
   2011  } else {
   2012    static_assert(CmpSize == 64);
   2013    cmpPtr(lhs, Operand(rhs));
   2014  }
   2015  if constexpr (LoadSize == 32) {
   2016    cmovCCl(cond, Operand(falseVal), trueValAndDest);
   2017  } else {
   2018    static_assert(LoadSize == 64);
   2019    cmovCCq(cond, Operand(falseVal), trueValAndDest);
   2020  }
   2021 }
   2022 template void MacroAssemblerX64::cmpLoad<32, 32>(Condition cond, Register lhs,
   2023                                                 const Address& rhs,
   2024                                                 const Address& falseVal,
   2025                                                 Register trueValAndDest);
   2026 template void MacroAssemblerX64::cmpLoad<32, 64>(Condition cond, Register lhs,
   2027                                                 const Address& rhs,
   2028                                                 const Address& falseVal,
   2029                                                 Register trueValAndDest);
   2030 template void MacroAssemblerX64::cmpLoad<64, 32>(Condition cond, Register lhs,
   2031                                                 const Address& rhs,
   2032                                                 const Address& falseVal,
   2033                                                 Register trueValAndDest);
   2034 template void MacroAssemblerX64::cmpLoad<64, 64>(Condition cond, Register lhs,
   2035                                                 const Address& rhs,
   2036                                                 const Address& falseVal,
   2037                                                 Register trueValAndDest);
   2038 
   2039 void MacroAssemblerX64::minMax32(Register lhs, Register rhs, Register dest,
   2040                                 bool isMax) {
   2041  if (rhs == dest) {
   2042    std::swap(lhs, rhs);
   2043  }
   2044 
   2045  auto cond = isMax ? Assembler::GreaterThan : Assembler::LessThan;
   2046  if (lhs != dest) {
   2047    movl(lhs, dest);
   2048  }
   2049  cmpl(lhs, rhs);
   2050  cmovCCl(cond, rhs, dest);
   2051 }
   2052 
   2053 void MacroAssemblerX64::minMax32(Register lhs, Imm32 rhs, Register dest,
   2054                                 bool isMax) {
   2055  ScratchRegisterScope scratch(asMasm());
   2056 
   2057  if (rhs.value == 0) {
   2058    if (isMax) {
   2059      // dest = ~(lhs >> 31) & lhs
   2060      if (HasBMI1()) {
   2061        movl(lhs, scratch);
   2062        sarl(Imm32(31), scratch);
   2063        andnl(scratch, lhs, dest);
   2064      } else {
   2065        if (lhs != dest) {
   2066          movl(lhs, dest);
   2067        }
   2068        movl(lhs, scratch);
   2069        sarl(Imm32(31), scratch);
   2070        notl(scratch);
   2071        andl(scratch, dest);
   2072      }
   2073    } else {
   2074      // dest = (lhs >> 31) & lhs
   2075      if (lhs != dest) {
   2076        movl(lhs, dest);
   2077      }
   2078      movl(lhs, scratch);
   2079      sarl(Imm32(31), scratch);
   2080      andl(scratch, dest);
   2081    }
   2082    return;
   2083  }
   2084 
   2085  auto cond = isMax ? Assembler::LessThan : Assembler::GreaterThan;
   2086  move32(rhs, scratch);
   2087  if (lhs != dest) {
   2088    movl(lhs, dest);
   2089  }
   2090  cmpl(rhs, lhs);
   2091  cmovCCl(cond, scratch, dest);
   2092 }
   2093 
   2094 void MacroAssemblerX64::minMaxPtr(Register lhs, Register rhs, Register dest,
   2095                                  bool isMax) {
   2096  if (rhs == dest) {
   2097    std::swap(lhs, rhs);
   2098  }
   2099 
   2100  auto cond = isMax ? Assembler::GreaterThan : Assembler::LessThan;
   2101  if (lhs != dest) {
   2102    movq(lhs, dest);
   2103  }
   2104  cmpq(lhs, rhs);
   2105  cmovCCq(cond, rhs, dest);
   2106 }
   2107 
   2108 void MacroAssemblerX64::minMaxPtr(Register lhs, ImmWord rhs, Register dest,
   2109                                  bool isMax) {
   2110  ScratchRegisterScope scratch(asMasm());
   2111 
   2112  if (rhs.value == 0) {
   2113    if (isMax) {
   2114      // dest = ~(lhs >> 63) & lhs
   2115      if (HasBMI1()) {
   2116        movq(lhs, scratch);
   2117        sarq(Imm32(63), scratch);
   2118        andnq(scratch, lhs, dest);
   2119      } else {
   2120        if (lhs != dest) {
   2121          movq(lhs, dest);
   2122        }
   2123        movq(lhs, scratch);
   2124        sarq(Imm32(63), scratch);
   2125        notq(scratch);
   2126        andq(scratch, dest);
   2127      }
   2128    } else {
   2129      // dest = (lhs >> 63) & lhs
   2130      if (lhs != dest) {
   2131        movq(lhs, dest);
   2132      }
   2133      movq(lhs, scratch);
   2134      sarq(Imm32(63), scratch);
   2135      andq(scratch, dest);
   2136    }
   2137    return;
   2138  }
   2139 
   2140  auto cond = isMax ? Assembler::LessThan : Assembler::GreaterThan;
   2141  movePtr(rhs, scratch);
   2142  if (lhs != dest) {
   2143    movq(lhs, dest);
   2144  }
   2145  if (intptr_t(rhs.value) <= INT32_MAX && intptr_t(rhs.value) >= INT32_MIN) {
   2146    cmpq(Imm32(int32_t(rhs.value)), lhs);
   2147  } else {
   2148    cmpq(scratch, lhs);
   2149  }
   2150  cmovCCq(cond, scratch, dest);
   2151 }