tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

BaseAssembler-x64.h (55693B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
      2 * vim: set ts=8 sts=2 et sw=2 tw=80:
      3 * This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #ifndef jit_x64_BaseAssembler_x64_h
      8 #define jit_x64_BaseAssembler_x64_h
      9 
     10 #include "jit/x86-shared/BaseAssembler-x86-shared.h"
     11 
     12 namespace js {
     13 namespace jit {
     14 
     15 namespace X86Encoding {
     16 
     17 class BaseAssemblerX64 : public BaseAssembler {
     18 public:
     19  // Arithmetic operations:
     20 
     21  void addq_rr(RegisterID src, RegisterID dst) {
     22    spew("addq       %s, %s", GPReg64Name(src), GPReg64Name(dst));
     23    m_formatter.oneByteOp64(OP_ADD_GvEv, src, dst);
     24  }
     25 
     26  void addq_mr(int32_t offset, RegisterID base, RegisterID dst) {
     27    spew("addq       " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
     28    m_formatter.oneByteOp64(OP_ADD_GvEv, offset, base, dst);
     29  }
     30 
     31  void addq_mr(const void* addr, RegisterID dst) {
     32    spew("addq       %p, %s", addr, GPReg64Name(dst));
     33    m_formatter.oneByteOp64(OP_ADD_GvEv, addr, dst);
     34  }
     35 
     36  void addq_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
     37               RegisterID dst) {
     38    spew("addq       " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
     39         GPReg64Name(dst));
     40    m_formatter.oneByteOp64(OP_ADD_GvEv, offset, base, index, scale, dst);
     41  }
     42 
     43  void addq_rm(RegisterID src, int32_t offset, RegisterID base) {
     44    spew("addq       %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
     45    m_formatter.oneByteOp64(OP_ADD_EvGv, offset, base, src);
     46  }
     47 
     48  void addq_rm(RegisterID src, int32_t offset, RegisterID base,
     49               RegisterID index, int scale) {
     50    spew("addq       %s, " MEM_obs, GPReg64Name(src),
     51         ADDR_obs(offset, base, index, scale));
     52    m_formatter.oneByteOp64(OP_ADD_EvGv, offset, base, index, scale, src);
     53  }
     54 
     55  void addq_ir(int32_t imm, RegisterID dst) {
     56    spew("addq       $%d, %s", imm, GPReg64Name(dst));
     57    if (CAN_SIGN_EXTEND_8_32(imm)) {
     58      m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_ADD);
     59      m_formatter.immediate8s(imm);
     60    } else {
     61      if (dst == rax) {
     62        m_formatter.oneByteOp64(OP_ADD_EAXIv);
     63      } else {
     64        m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD);
     65      }
     66      m_formatter.immediate32(imm);
     67    }
     68  }
     69 
     70  void addq_i32r(int32_t imm, RegisterID dst) {
     71    // 32-bit immediate always, for patching.
     72    spew("addq       $0x%04x, %s", uint32_t(imm), GPReg64Name(dst));
     73    if (dst == rax) {
     74      m_formatter.oneByteOp64(OP_ADD_EAXIv);
     75    } else {
     76      m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_ADD);
     77    }
     78    m_formatter.immediate32(imm);
     79  }
     80 
     81  void addq_im(int32_t imm, int32_t offset, RegisterID base) {
     82    spew("addq       $%d, " MEM_ob, imm, ADDR_ob(offset, base));
     83    if (CAN_SIGN_EXTEND_8_32(imm)) {
     84      m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, GROUP1_OP_ADD);
     85      m_formatter.immediate8s(imm);
     86    } else {
     87      m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, GROUP1_OP_ADD);
     88      m_formatter.immediate32(imm);
     89    }
     90  }
     91 
     92  void addq_im(int32_t imm, const void* addr) {
     93    spew("addq       $%d, %p", imm, addr);
     94    if (CAN_SIGN_EXTEND_8_32(imm)) {
     95      m_formatter.oneByteOp64(OP_GROUP1_EvIb, addr, GROUP1_OP_ADD);
     96      m_formatter.immediate8s(imm);
     97    } else {
     98      m_formatter.oneByteOp64(OP_GROUP1_EvIz, addr, GROUP1_OP_ADD);
     99      m_formatter.immediate32(imm);
    100    }
    101  }
    102 
    103  void andq_rr(RegisterID src, RegisterID dst) {
    104    spew("andq       %s, %s", GPReg64Name(src), GPReg64Name(dst));
    105    m_formatter.oneByteOp64(OP_AND_GvEv, src, dst);
    106  }
    107 
    108  void andq_mr(int32_t offset, RegisterID base, RegisterID dst) {
    109    spew("andq       " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
    110    m_formatter.oneByteOp64(OP_AND_GvEv, offset, base, dst);
    111  }
    112 
    113  void andq_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
    114               RegisterID dst) {
    115    spew("andq       " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
    116         GPReg64Name(dst));
    117    m_formatter.oneByteOp64(OP_AND_GvEv, offset, base, index, scale, dst);
    118  }
    119 
    120  void andq_mr(const void* addr, RegisterID dst) {
    121    spew("andq       %p, %s", addr, GPReg64Name(dst));
    122    m_formatter.oneByteOp64(OP_AND_GvEv, addr, dst);
    123  }
    124 
    125  void andq_rm(RegisterID src, int32_t offset, RegisterID base) {
    126    spew("andq       %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
    127    m_formatter.oneByteOp64(OP_AND_EvGv, offset, base, src);
    128  }
    129 
    130  void andq_rm(RegisterID src, int32_t offset, RegisterID base,
    131               RegisterID index, int scale) {
    132    spew("andq       %s, " MEM_obs, GPReg64Name(src),
    133         ADDR_obs(offset, base, index, scale));
    134    m_formatter.oneByteOp64(OP_AND_EvGv, offset, base, index, scale, src);
    135  }
    136 
    137  void orq_mr(int32_t offset, RegisterID base, RegisterID dst) {
    138    spew("orq        " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
    139    m_formatter.oneByteOp64(OP_OR_GvEv, offset, base, dst);
    140  }
    141 
    142  void orq_mr(const void* addr, RegisterID dst) {
    143    spew("orq        %p, %s", addr, GPReg64Name(dst));
    144    m_formatter.oneByteOp64(OP_OR_GvEv, addr, dst);
    145  }
    146 
    147  void orq_rm(RegisterID src, int32_t offset, RegisterID base) {
    148    spew("orq       %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
    149    m_formatter.oneByteOp64(OP_OR_EvGv, offset, base, src);
    150  }
    151 
    152  void orq_rm(RegisterID src, int32_t offset, RegisterID base, RegisterID index,
    153              int scale) {
    154    spew("orq       %s, " MEM_obs, GPReg64Name(src),
    155         ADDR_obs(offset, base, index, scale));
    156    m_formatter.oneByteOp64(OP_OR_EvGv, offset, base, index, scale, src);
    157  }
    158 
    159  void xorq_mr(int32_t offset, RegisterID base, RegisterID dst) {
    160    spew("xorq       " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
    161    m_formatter.oneByteOp64(OP_XOR_GvEv, offset, base, dst);
    162  }
    163 
    164  void xorq_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
    165               RegisterID dst) {
    166    spew("xorq       " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
    167         GPReg64Name(dst));
    168    m_formatter.oneByteOp64(OP_XOR_GvEv, offset, base, index, scale, dst);
    169  }
    170 
    171  void xorq_mr(const void* addr, RegisterID dst) {
    172    spew("xorq       %p, %s", addr, GPReg64Name(dst));
    173    m_formatter.oneByteOp64(OP_XOR_GvEv, addr, dst);
    174  }
    175 
    176  void xorq_rm(RegisterID src, int32_t offset, RegisterID base) {
    177    spew("xorq       %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
    178    m_formatter.oneByteOp64(OP_XOR_EvGv, offset, base, src);
    179  }
    180 
    181  void xorq_rm(RegisterID src, int32_t offset, RegisterID base,
    182               RegisterID index, int scale) {
    183    spew("xorq       %s, " MEM_obs, GPReg64Name(src),
    184         ADDR_obs(offset, base, index, scale));
    185    m_formatter.oneByteOp64(OP_XOR_EvGv, offset, base, index, scale, src);
    186  }
    187 
    188  void bswapq_r(RegisterID dst) {
    189    spew("bswapq     %s", GPReg64Name(dst));
    190    m_formatter.twoByteOp64(OP2_BSWAP, dst);
    191  }
    192 
    193  void bsrq_rr(RegisterID src, RegisterID dst) {
    194    spew("bsrq       %s, %s", GPReg64Name(src), GPReg64Name(dst));
    195    m_formatter.twoByteOp64(OP2_BSR_GvEv, src, dst);
    196  }
    197 
    198  void bsfq_rr(RegisterID src, RegisterID dst) {
    199    spew("bsfq       %s, %s", GPReg64Name(src), GPReg64Name(dst));
    200    m_formatter.twoByteOp64(OP2_BSF_GvEv, src, dst);
    201  }
    202 
    203  void lzcntq_rr(RegisterID src, RegisterID dst) {
    204    spew("lzcntq     %s, %s", GPReg64Name(src), GPReg64Name(dst));
    205    m_formatter.legacySSEPrefix(VEX_SS);
    206    m_formatter.twoByteOp64(OP2_LZCNT_GvEv, src, dst);
    207  }
    208 
    209  void tzcntq_rr(RegisterID src, RegisterID dst) {
    210    spew("tzcntq     %s, %s", GPReg64Name(src), GPReg64Name(dst));
    211    m_formatter.legacySSEPrefix(VEX_SS);
    212    m_formatter.twoByteOp64(OP2_TZCNT_GvEv, src, dst);
    213  }
    214 
    215  void popcntq_rr(RegisterID src, RegisterID dst) {
    216    spew("popcntq    %s, %s", GPReg64Name(src), GPReg64Name(dst));
    217    m_formatter.legacySSEPrefix(VEX_SS);
    218    m_formatter.twoByteOp64(OP2_POPCNT_GvEv, src, dst);
    219  }
    220 
    221  void andq_ir(int32_t imm, RegisterID dst) {
    222    spew("andq       $0x%" PRIx64 ", %s", uint64_t(imm), GPReg64Name(dst));
    223    if (CAN_SIGN_EXTEND_8_32(imm)) {
    224      m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_AND);
    225      m_formatter.immediate8s(imm);
    226    } else {
    227      if (dst == rax) {
    228        m_formatter.oneByteOp64(OP_AND_EAXIv);
    229      } else {
    230        m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_AND);
    231      }
    232      m_formatter.immediate32(imm);
    233    }
    234  }
    235 
    236  void negq_r(RegisterID dst) {
    237    spew("negq       %s", GPReg64Name(dst));
    238    m_formatter.oneByteOp64(OP_GROUP3_Ev, dst, GROUP3_OP_NEG);
    239  }
    240 
    241  void orq_rr(RegisterID src, RegisterID dst) {
    242    spew("orq        %s, %s", GPReg64Name(src), GPReg64Name(dst));
    243    m_formatter.oneByteOp64(OP_OR_GvEv, src, dst);
    244  }
    245 
    246  void orq_ir(int32_t imm, RegisterID dst) {
    247    spew("orq        $0x%" PRIx64 ", %s", uint64_t(imm), GPReg64Name(dst));
    248    if (CAN_SIGN_EXTEND_8_32(imm)) {
    249      m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_OR);
    250      m_formatter.immediate8s(imm);
    251    } else {
    252      if (dst == rax) {
    253        m_formatter.oneByteOp64(OP_OR_EAXIv);
    254      } else {
    255        m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_OR);
    256      }
    257      m_formatter.immediate32(imm);
    258    }
    259  }
    260 
    261  void notq_r(RegisterID dst) {
    262    spew("notq       %s", GPReg64Name(dst));
    263    m_formatter.oneByteOp64(OP_GROUP3_Ev, dst, GROUP3_OP_NOT);
    264  }
    265 
    266  void subq_rr(RegisterID src, RegisterID dst) {
    267    spew("subq       %s, %s", GPReg64Name(src), GPReg64Name(dst));
    268    m_formatter.oneByteOp64(OP_SUB_GvEv, src, dst);
    269  }
    270 
    271  void subq_rm(RegisterID src, int32_t offset, RegisterID base) {
    272    spew("subq       %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
    273    m_formatter.oneByteOp64(OP_SUB_EvGv, offset, base, src);
    274  }
    275 
    276  void subq_rm(RegisterID src, int32_t offset, RegisterID base,
    277               RegisterID index, int scale) {
    278    spew("subq       %s, " MEM_obs, GPReg64Name(src),
    279         ADDR_obs(offset, base, index, scale));
    280    m_formatter.oneByteOp64(OP_SUB_EvGv, offset, base, index, scale, src);
    281  }
    282 
    283  void subq_mr(int32_t offset, RegisterID base, RegisterID dst) {
    284    spew("subq       " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
    285    m_formatter.oneByteOp64(OP_SUB_GvEv, offset, base, dst);
    286  }
    287 
    288  void subq_mr(const void* addr, RegisterID dst) {
    289    spew("subq       %p, %s", addr, GPReg64Name(dst));
    290    m_formatter.oneByteOp64(OP_SUB_GvEv, addr, dst);
    291  }
    292 
    293  void subq_ir(int32_t imm, RegisterID dst) {
    294    spew("subq       $%d, %s", imm, GPReg64Name(dst));
    295    if (CAN_SIGN_EXTEND_8_32(imm)) {
    296      m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_SUB);
    297      m_formatter.immediate8s(imm);
    298    } else {
    299      if (dst == rax) {
    300        m_formatter.oneByteOp64(OP_SUB_EAXIv);
    301      } else {
    302        m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_SUB);
    303      }
    304      m_formatter.immediate32(imm);
    305    }
    306  }
    307 
    308  void xorq_rr(RegisterID src, RegisterID dst) {
    309    spew("xorq       %s, %s", GPReg64Name(src), GPReg64Name(dst));
    310    m_formatter.oneByteOp64(OP_XOR_GvEv, src, dst);
    311  }
    312 
    313  void xorq_ir(int32_t imm, RegisterID dst) {
    314    spew("xorq       $0x%" PRIx64 ", %s", uint64_t(imm), GPReg64Name(dst));
    315    if (CAN_SIGN_EXTEND_8_32(imm)) {
    316      m_formatter.oneByteOp64(OP_GROUP1_EvIb, dst, GROUP1_OP_XOR);
    317      m_formatter.immediate8s(imm);
    318    } else {
    319      if (dst == rax) {
    320        m_formatter.oneByteOp64(OP_XOR_EAXIv);
    321      } else {
    322        m_formatter.oneByteOp64(OP_GROUP1_EvIz, dst, GROUP1_OP_XOR);
    323      }
    324      m_formatter.immediate32(imm);
    325    }
    326  }
    327 
    328  void sarq_CLr(RegisterID dst) {
    329    spew("sarq       %%cl, %s", GPReg64Name(dst));
    330    m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SAR);
    331  }
    332 
    333  void shlq_CLr(RegisterID dst) {
    334    spew("shlq       %%cl, %s", GPReg64Name(dst));
    335    m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SHL);
    336  }
    337 
    338  void shrq_CLr(RegisterID dst) {
    339    spew("shrq       %%cl, %s", GPReg64Name(dst));
    340    m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_SHR);
    341  }
    342 
    343  void sarq_ir(int32_t imm, RegisterID dst) {
    344    MOZ_ASSERT(imm < 64);
    345    spew("sarq       $%d, %s", imm, GPReg64Name(dst));
    346    if (imm == 1) {
    347      m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SAR);
    348    } else {
    349      m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SAR);
    350      m_formatter.immediate8u(imm);
    351    }
    352  }
    353 
    354  void shlq_ir(int32_t imm, RegisterID dst) {
    355    MOZ_ASSERT(imm < 64);
    356    spew("shlq       $%d, %s", imm, GPReg64Name(dst));
    357    if (imm == 1) {
    358      m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SHL);
    359    } else {
    360      m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SHL);
    361      m_formatter.immediate8u(imm);
    362    }
    363  }
    364 
    365  void shrq_ir(int32_t imm, RegisterID dst) {
    366    MOZ_ASSERT(imm < 64);
    367    spew("shrq       $%d, %s", imm, GPReg64Name(dst));
    368    if (imm == 1) {
    369      m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_SHR);
    370    } else {
    371      m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_SHR);
    372      m_formatter.immediate8u(imm);
    373    }
    374  }
    375 
    376  void rolq_ir(int32_t imm, RegisterID dst) {
    377    MOZ_ASSERT(imm < 64);
    378    spew("rolq       $%d, %s", imm, GPReg64Name(dst));
    379    if (imm == 1) {
    380      m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_ROL);
    381    } else {
    382      m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_ROL);
    383      m_formatter.immediate8u(imm);
    384    }
    385  }
    386  void rolq_CLr(RegisterID dst) {
    387    spew("rolq       %%cl, %s", GPReg64Name(dst));
    388    m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_ROL);
    389  }
    390 
    391  void rorq_ir(int32_t imm, RegisterID dst) {
    392    MOZ_ASSERT(imm < 64);
    393    spew("rorq       $%d, %s", imm, GPReg64Name(dst));
    394    if (imm == 1) {
    395      m_formatter.oneByteOp64(OP_GROUP2_Ev1, dst, GROUP2_OP_ROR);
    396    } else {
    397      m_formatter.oneByteOp64(OP_GROUP2_EvIb, dst, GROUP2_OP_ROR);
    398      m_formatter.immediate8u(imm);
    399    }
    400  }
    401  void rorq_CLr(RegisterID dst) {
    402    spew("rorq       %%cl, %s", GPReg64Name(dst));
    403    m_formatter.oneByteOp64(OP_GROUP2_EvCL, dst, GROUP2_OP_ROR);
    404  }
    405 
    406  void imulq_rr(RegisterID src, RegisterID dst) {
    407    spew("imulq      %s, %s", GPReg64Name(src), GPReg64Name(dst));
    408    m_formatter.twoByteOp64(OP2_IMUL_GvEv, src, dst);
    409  }
    410 
    411  void imulq_r(RegisterID multiplier) {
    412    spew("imulq      %s", GPReg64Name(multiplier));
    413    m_formatter.oneByteOp64(OP_GROUP3_Ev, multiplier, GROUP3_OP_IMUL);
    414  }
    415 
    416  void imulq_mr(int32_t offset, RegisterID base, RegisterID dst) {
    417    spew("imulq      " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
    418    m_formatter.twoByteOp64(OP2_IMUL_GvEv, offset, base, dst);
    419  }
    420 
    421  void imulq_ir(int32_t value, RegisterID src, RegisterID dst) {
    422    spew("imulq      $%d, %s, %s", value, GPReg64Name(src), GPReg64Name(dst));
    423    if (CAN_SIGN_EXTEND_8_32(value)) {
    424      m_formatter.oneByteOp64(OP_IMUL_GvEvIb, src, dst);
    425      m_formatter.immediate8s(value);
    426    } else {
    427      m_formatter.oneByteOp64(OP_IMUL_GvEvIz, src, dst);
    428      m_formatter.immediate32(value);
    429    }
    430  }
    431 
    432  void mulq_r(RegisterID multiplier) {
    433    spew("mulq       %s", GPReg64Name(multiplier));
    434    m_formatter.oneByteOp64(OP_GROUP3_Ev, multiplier, GROUP3_OP_MUL);
    435  }
    436 
    437  void cqo() {
    438    spew("cqo        ");
    439    m_formatter.oneByteOp64(OP_CDQ);
    440  }
    441 
    442  void idivq_r(RegisterID divisor) {
    443    spew("idivq      %s", GPReg64Name(divisor));
    444    m_formatter.oneByteOp64(OP_GROUP3_Ev, divisor, GROUP3_OP_IDIV);
    445  }
    446 
    447  void divq_r(RegisterID divisor) {
    448    spew("divq       %s", GPReg64Name(divisor));
    449    m_formatter.oneByteOp64(OP_GROUP3_Ev, divisor, GROUP3_OP_DIV);
    450  }
    451 
    452  // Comparisons:
    453 
    454  void cmpq_rr(RegisterID rhs, RegisterID lhs) {
    455    spew("cmpq       %s, %s", GPReg64Name(rhs), GPReg64Name(lhs));
    456    m_formatter.oneByteOp64(OP_CMP_GvEv, rhs, lhs);
    457  }
    458 
    459  void cmpq_rm(RegisterID rhs, int32_t offset, RegisterID base) {
    460    spew("cmpq       %s, " MEM_ob, GPReg64Name(rhs), ADDR_ob(offset, base));
    461    m_formatter.oneByteOp64(OP_CMP_EvGv, offset, base, rhs);
    462  }
    463 
    464  void cmpq_rm(RegisterID rhs, int32_t offset, RegisterID base,
    465               RegisterID index, int scale) {
    466    spew("cmpq       %s, " MEM_obs, GPReg64Name(rhs),
    467         ADDR_obs(offset, base, index, scale));
    468    m_formatter.oneByteOp64(OP_CMP_EvGv, offset, base, index, scale, rhs);
    469  }
    470 
    471  void cmpq_mr(int32_t offset, RegisterID base, RegisterID lhs) {
    472    spew("cmpq       " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(lhs));
    473    m_formatter.oneByteOp64(OP_CMP_GvEv, offset, base, lhs);
    474  }
    475 
    476  void cmpq_ir(int32_t rhs, RegisterID lhs) {
    477    if (rhs == 0) {
    478      testq_rr(lhs, lhs);
    479      return;
    480    }
    481 
    482    spew("cmpq       $0x%" PRIx64 ", %s", uint64_t(rhs), GPReg64Name(lhs));
    483    if (CAN_SIGN_EXTEND_8_32(rhs)) {
    484      m_formatter.oneByteOp64(OP_GROUP1_EvIb, lhs, GROUP1_OP_CMP);
    485      m_formatter.immediate8s(rhs);
    486    } else {
    487      if (lhs == rax) {
    488        m_formatter.oneByteOp64(OP_CMP_EAXIv);
    489      } else {
    490        m_formatter.oneByteOp64(OP_GROUP1_EvIz, lhs, GROUP1_OP_CMP);
    491      }
    492      m_formatter.immediate32(rhs);
    493    }
    494  }
    495 
    496  void cmpq_im(int32_t rhs, int32_t offset, RegisterID base) {
    497    spew("cmpq       $0x%" PRIx64 ", " MEM_ob, uint64_t(rhs),
    498         ADDR_ob(offset, base));
    499    if (CAN_SIGN_EXTEND_8_32(rhs)) {
    500      m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, GROUP1_OP_CMP);
    501      m_formatter.immediate8s(rhs);
    502    } else {
    503      m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, GROUP1_OP_CMP);
    504      m_formatter.immediate32(rhs);
    505    }
    506  }
    507 
    508  void cmpq_im(int32_t rhs, int32_t offset, RegisterID base, RegisterID index,
    509               int scale) {
    510    spew("cmpq       $0x%x, " MEM_obs, uint32_t(rhs),
    511         ADDR_obs(offset, base, index, scale));
    512    if (CAN_SIGN_EXTEND_8_32(rhs)) {
    513      m_formatter.oneByteOp64(OP_GROUP1_EvIb, offset, base, index, scale,
    514                              GROUP1_OP_CMP);
    515      m_formatter.immediate8s(rhs);
    516    } else {
    517      m_formatter.oneByteOp64(OP_GROUP1_EvIz, offset, base, index, scale,
    518                              GROUP1_OP_CMP);
    519      m_formatter.immediate32(rhs);
    520    }
    521  }
    522  void cmpq_im(int32_t rhs, const void* addr) {
    523    spew("cmpq       $0x%" PRIx64 ", %p", uint64_t(rhs), addr);
    524    if (CAN_SIGN_EXTEND_8_32(rhs)) {
    525      m_formatter.oneByteOp64(OP_GROUP1_EvIb, addr, GROUP1_OP_CMP);
    526      m_formatter.immediate8s(rhs);
    527    } else {
    528      m_formatter.oneByteOp64(OP_GROUP1_EvIz, addr, GROUP1_OP_CMP);
    529      m_formatter.immediate32(rhs);
    530    }
    531  }
    532  void cmpq_rm(RegisterID rhs, const void* addr) {
    533    spew("cmpq       %s, %p", GPReg64Name(rhs), addr);
    534    m_formatter.oneByteOp64(OP_CMP_EvGv, addr, rhs);
    535  }
    536 
    537  void testq_rr(RegisterID rhs, RegisterID lhs) {
    538    spew("testq      %s, %s", GPReg64Name(rhs), GPReg64Name(lhs));
    539    m_formatter.oneByteOp64(OP_TEST_EvGv, lhs, rhs);
    540  }
    541 
    542  void testq_ir(int32_t rhs, RegisterID lhs) {
    543    // If the mask fits in a 32-bit immediate, we can use testl with a
    544    // 32-bit subreg.
    545    if (CAN_ZERO_EXTEND_32_64(rhs)) {
    546      testl_ir(rhs, lhs);
    547      return;
    548    }
    549    spew("testq      $0x%" PRIx64 ", %s", uint64_t(rhs), GPReg64Name(lhs));
    550    if (lhs == rax) {
    551      m_formatter.oneByteOp64(OP_TEST_EAXIv);
    552    } else {
    553      m_formatter.oneByteOp64(OP_GROUP3_EvIz, lhs, GROUP3_OP_TEST);
    554    }
    555    m_formatter.immediate32(rhs);
    556  }
    557 
    558  void testq_i32m(int32_t rhs, int32_t offset, RegisterID base) {
    559    spew("testq      $0x%" PRIx64 ", " MEM_ob, uint64_t(rhs),
    560         ADDR_ob(offset, base));
    561    m_formatter.oneByteOp64(OP_GROUP3_EvIz, offset, base, GROUP3_OP_TEST);
    562    m_formatter.immediate32(rhs);
    563  }
    564 
    565  void testq_i32m(int32_t rhs, int32_t offset, RegisterID base,
    566                  RegisterID index, int scale) {
    567    spew("testq      $0x%4x, " MEM_obs, uint32_t(rhs),
    568         ADDR_obs(offset, base, index, scale));
    569    m_formatter.oneByteOp64(OP_GROUP3_EvIz, offset, base, index, scale,
    570                            GROUP3_OP_TEST);
    571    m_formatter.immediate32(rhs);
    572  }
    573 
    574  // Various move ops:
    575 
    576  void cmovCCq_rr(Condition cond, RegisterID src, RegisterID dst) {
    577    spew("cmov%s     %s, %s", CCName(cond), GPReg64Name(src), GPReg64Name(dst));
    578    m_formatter.twoByteOp64(cmovccOpcode(cond), src, dst);
    579  }
    580  void cmovCCq_mr(Condition cond, int32_t offset, RegisterID base,
    581                  RegisterID dst) {
    582    spew("cmov%s     " MEM_ob ", %s", CCName(cond), ADDR_ob(offset, base),
    583         GPReg64Name(dst));
    584    m_formatter.twoByteOp64(cmovccOpcode(cond), offset, base, dst);
    585  }
    586  void cmovCCq_mr(Condition cond, int32_t offset, RegisterID base,
    587                  RegisterID index, int scale, RegisterID dst) {
    588    spew("cmov%s     " MEM_obs ", %s", CCName(cond),
    589         ADDR_obs(offset, base, index, scale), GPReg64Name(dst));
    590    m_formatter.twoByteOp64(cmovccOpcode(cond), offset, base, index, scale,
    591                            dst);
    592  }
    593 
    594  void cmpxchgq(RegisterID src, int32_t offset, RegisterID base) {
    595    spew("cmpxchgq   %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
    596    m_formatter.twoByteOp64(OP2_CMPXCHG_GvEw, offset, base, src);
    597  }
    598 
    599  void cmpxchgq(RegisterID src, int32_t offset, RegisterID base,
    600                RegisterID index, int scale) {
    601    spew("cmpxchgq   %s, " MEM_obs, GPReg64Name(src),
    602         ADDR_obs(offset, base, index, scale));
    603    m_formatter.twoByteOp64(OP2_CMPXCHG_GvEw, offset, base, index, scale, src);
    604  }
    605 
    606  void lock_xaddq_rm(RegisterID srcdest, int32_t offset, RegisterID base) {
    607    spew("lock xaddq %s, " MEM_ob, GPReg64Name(srcdest), ADDR_ob(offset, base));
    608    m_formatter.oneByteOp(PRE_LOCK);
    609    m_formatter.twoByteOp64(OP2_XADD_EvGv, offset, base, srcdest);
    610  }
    611 
    612  void lock_xaddq_rm(RegisterID srcdest, int32_t offset, RegisterID base,
    613                     RegisterID index, int scale) {
    614    spew("lock xaddq %s, " MEM_obs, GPReg64Name(srcdest),
    615         ADDR_obs(offset, base, index, scale));
    616    m_formatter.oneByteOp(PRE_LOCK);
    617    m_formatter.twoByteOp64(OP2_XADD_EvGv, offset, base, index, scale, srcdest);
    618  }
    619 
    620  void xchgq_rr(RegisterID src, RegisterID dst) {
    621    spew("xchgq      %s, %s", GPReg64Name(src), GPReg64Name(dst));
    622    m_formatter.oneByteOp64(OP_XCHG_GvEv, src, dst);
    623  }
    624  void xchgq_rm(RegisterID src, int32_t offset, RegisterID base) {
    625    spew("xchgq      %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
    626    m_formatter.oneByteOp64(OP_XCHG_GvEv, offset, base, src);
    627  }
    628  void xchgq_rm(RegisterID src, int32_t offset, RegisterID base,
    629                RegisterID index, int scale) {
    630    spew("xchgq      %s, " MEM_obs, GPReg64Name(src),
    631         ADDR_obs(offset, base, index, scale));
    632    m_formatter.oneByteOp64(OP_XCHG_GvEv, offset, base, index, scale, src);
    633  }
    634 
    635  void movq_rr(RegisterID src, RegisterID dst) {
    636    spew("movq       %s, %s", GPReg64Name(src), GPReg64Name(dst));
    637    m_formatter.oneByteOp64(OP_MOV_EvGv, dst, src);
    638  }
    639 
    640  void movq_rm(RegisterID src, int32_t offset, RegisterID base) {
    641    spew("movq       %s, " MEM_ob, GPReg64Name(src), ADDR_ob(offset, base));
    642    m_formatter.oneByteOp64(OP_MOV_EvGv, offset, base, src);
    643  }
    644 
    645  void movq_rm_disp32(RegisterID src, int32_t offset, RegisterID base) {
    646    spew("movq       %s, " MEM_o32b, GPReg64Name(src), ADDR_o32b(offset, base));
    647    m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, offset, base, src);
    648  }
    649 
    650  void movq_rm(RegisterID src, int32_t offset, RegisterID base,
    651               RegisterID index, int scale) {
    652    spew("movq       %s, " MEM_obs, GPReg64Name(src),
    653         ADDR_obs(offset, base, index, scale));
    654    m_formatter.oneByteOp64(OP_MOV_EvGv, offset, base, index, scale, src);
    655  }
    656 
    657  void movq_rm(RegisterID src, const void* addr) {
    658    if (src == rax && !IsAddressImmediate(addr)) {
    659      movq_EAXm(addr);
    660      return;
    661    }
    662 
    663    spew("movq       %s, %p", GPReg64Name(src), addr);
    664    m_formatter.oneByteOp64(OP_MOV_EvGv, addr, src);
    665  }
    666 
    667  void movq_mEAX(const void* addr) {
    668    if (IsAddressImmediate(addr)) {
    669      movq_mr(addr, rax);
    670      return;
    671    }
    672 
    673    spew("movq       %p, %%rax", addr);
    674    m_formatter.oneByteOp64(OP_MOV_EAXOv);
    675    m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
    676  }
    677 
    678  void movq_EAXm(const void* addr) {
    679    if (IsAddressImmediate(addr)) {
    680      movq_rm(rax, addr);
    681      return;
    682    }
    683 
    684    spew("movq       %%rax, %p", addr);
    685    m_formatter.oneByteOp64(OP_MOV_OvEAX);
    686    m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
    687  }
    688 
    689  void movq_mr(int32_t offset, RegisterID base, RegisterID dst) {
    690    spew("movq       " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
    691    m_formatter.oneByteOp64(OP_MOV_GvEv, offset, base, dst);
    692  }
    693 
    694  void movq_mr_disp32(int32_t offset, RegisterID base, RegisterID dst) {
    695    spew("movq       " MEM_o32b ", %s", ADDR_o32b(offset, base),
    696         GPReg64Name(dst));
    697    m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, offset, base, dst);
    698  }
    699 
    700  void movq_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
    701               RegisterID dst) {
    702    spew("movq       " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
    703         GPReg64Name(dst));
    704    m_formatter.oneByteOp64(OP_MOV_GvEv, offset, base, index, scale, dst);
    705  }
    706 
    707  void movq_mr(const void* addr, RegisterID dst) {
    708    if (dst == rax && !IsAddressImmediate(addr)) {
    709      movq_mEAX(addr);
    710      return;
    711    }
    712 
    713    spew("movq       %p, %s", addr, GPReg64Name(dst));
    714    m_formatter.oneByteOp64(OP_MOV_GvEv, addr, dst);
    715  }
    716 
    717  void leaq_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
    718               RegisterID dst) {
    719    spew("leaq       " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
    720         GPReg64Name(dst));
    721    m_formatter.oneByteOp64(OP_LEA, offset, base, index, scale, dst);
    722  }
    723 
    724  void leaq_mr(int32_t offset, RegisterID index, int scale, RegisterID dst) {
    725    spew("leaq       " MEM_os ", %s", ADDR_os(offset, index, scale),
    726         GPReg64Name(dst));
    727    m_formatter.oneByteOp64_disp32(OP_LEA, offset, index, scale, dst);
    728  }
    729 
    730  void movq_i32m(int32_t imm, int32_t offset, RegisterID base) {
    731    spew("movq       $%d, " MEM_ob, imm, ADDR_ob(offset, base));
    732    m_formatter.oneByteOp64(OP_GROUP11_EvIz, offset, base, GROUP11_MOV);
    733    m_formatter.immediate32(imm);
    734  }
    735  void movq_i32m(int32_t imm, int32_t offset, RegisterID base, RegisterID index,
    736                 int scale) {
    737    spew("movq       $%d, " MEM_obs, imm, ADDR_obs(offset, base, index, scale));
    738    m_formatter.oneByteOp64(OP_GROUP11_EvIz, offset, base, index, scale,
    739                            GROUP11_MOV);
    740    m_formatter.immediate32(imm);
    741  }
    742  void movq_i32m(int32_t imm, const void* addr) {
    743    spew("movq       $%d, %p", imm, addr);
    744    m_formatter.oneByteOp64(OP_GROUP11_EvIz, addr, GROUP11_MOV);
    745    m_formatter.immediate32(imm);
    746  }
    747 
    748  // Note that this instruction sign-extends its 32-bit immediate field to 64
    749  // bits and loads the 64-bit value into a 64-bit register.
    750  //
    751  // Note also that this is similar to the movl_i32r instruction, except that
    752  // movl_i32r *zero*-extends its 32-bit immediate, and it has smaller code
    753  // size, so it's preferred for values which could use either.
    754  void movq_i32r(int32_t imm, RegisterID dst) {
    755    spew("movq       $%d, %s", imm, GPRegName(dst));
    756    m_formatter.oneByteOp64(OP_GROUP11_EvIz, dst, GROUP11_MOV);
    757    m_formatter.immediate32(imm);
    758  }
    759 
    760  void movq_i64r(int64_t imm, RegisterID dst) {
    761    spew("movabsq    $0x%" PRIx64 ", %s", uint64_t(imm), GPReg64Name(dst));
    762    m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
    763    m_formatter.immediate64(imm);
    764  }
    765 
    766  void movsbq_rr(RegisterID src, RegisterID dst) {
    767    spew("movsbq     %s, %s", GPReg32Name(src), GPReg64Name(dst));
    768    m_formatter.twoByteOp64(OP2_MOVSX_GvEb, src, dst);
    769  }
    770  void movsbq_mr(int32_t offset, RegisterID base, RegisterID dst) {
    771    spew("movsbq     " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
    772    m_formatter.twoByteOp64(OP2_MOVSX_GvEb, offset, base, dst);
    773  }
    774  void movsbq_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
    775                 RegisterID dst) {
    776    spew("movsbq     " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
    777         GPReg64Name(dst));
    778    m_formatter.twoByteOp64(OP2_MOVSX_GvEb, offset, base, index, scale, dst);
    779  }
    780 
    781  void movswq_rr(RegisterID src, RegisterID dst) {
    782    spew("movswq     %s, %s", GPReg32Name(src), GPReg64Name(dst));
    783    m_formatter.twoByteOp64(OP2_MOVSX_GvEw, src, dst);
    784  }
    785  void movswq_mr(int32_t offset, RegisterID base, RegisterID dst) {
    786    spew("movswq     " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
    787    m_formatter.twoByteOp64(OP2_MOVSX_GvEw, offset, base, dst);
    788  }
    789  void movswq_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
    790                 RegisterID dst) {
    791    spew("movswq     " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
    792         GPReg64Name(dst));
    793    m_formatter.twoByteOp64(OP2_MOVSX_GvEw, offset, base, index, scale, dst);
    794  }
    795 
    796  void movslq_rr(RegisterID src, RegisterID dst) {
    797    spew("movslq     %s, %s", GPReg32Name(src), GPReg64Name(dst));
    798    m_formatter.oneByteOp64(OP_MOVSXD_GvEv, src, dst);
    799  }
    800  void movslq_mr(int32_t offset, RegisterID base, RegisterID dst) {
    801    spew("movslq     " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
    802    m_formatter.oneByteOp64(OP_MOVSXD_GvEv, offset, base, dst);
    803  }
    804  void movslq_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
    805                 RegisterID dst) {
    806    spew("movslq     " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
    807         GPReg64Name(dst));
    808    m_formatter.oneByteOp64(OP_MOVSXD_GvEv, offset, base, index, scale, dst);
    809  }
    810 
    811  [[nodiscard]] JmpSrc movl_ripr(RegisterID dst) {
    812    m_formatter.oneByteRipOp(OP_MOV_GvEv, 0, (RegisterID)dst);
    813    JmpSrc label(m_formatter.size());
    814    spew("movl       " MEM_o32r ", %s", ADDR_o32r(label.offset()),
    815         GPReg32Name(dst));
    816    return label;
    817  }
    818 
    819  [[nodiscard]] JmpSrc movl_rrip(RegisterID src) {
    820    m_formatter.oneByteRipOp(OP_MOV_EvGv, 0, (RegisterID)src);
    821    JmpSrc label(m_formatter.size());
    822    spew("movl       %s, " MEM_o32r "", GPReg32Name(src),
    823         ADDR_o32r(label.offset()));
    824    return label;
    825  }
    826 
    827  [[nodiscard]] JmpSrc movq_ripr(RegisterID dst) {
    828    m_formatter.oneByteRipOp64(OP_MOV_GvEv, 0, dst);
    829    JmpSrc label(m_formatter.size());
    830    spew("movq       " MEM_o32r ", %s", ADDR_o32r(label.offset()),
    831         GPRegName(dst));
    832    return label;
    833  }
    834 
    835  [[nodiscard]] JmpSrc movq_rrip(RegisterID src) {
    836    m_formatter.oneByteRipOp64(OP_MOV_EvGv, 0, (RegisterID)src);
    837    JmpSrc label(m_formatter.size());
    838    spew("movq       %s, " MEM_o32r "", GPRegName(src),
    839         ADDR_o32r(label.offset()));
    840    return label;
    841  }
    842 
    843  void leaq_mr(int32_t offset, RegisterID base, RegisterID dst) {
    844    spew("leaq       " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
    845    m_formatter.oneByteOp64(OP_LEA, offset, base, dst);
    846  }
    847 
    848  [[nodiscard]] JmpSrc leaq_rip(RegisterID dst) {
    849    m_formatter.oneByteRipOp64(OP_LEA, 0, dst);
    850    JmpSrc label(m_formatter.size());
    851    spew("leaq       " MEM_o32r ", %s", ADDR_o32r(label.offset()),
    852         GPRegName(dst));
    853    return label;
    854  }
    855 
    856  // Flow control:
    857 
    858  void jmp_rip(int ripOffset) {
    859    // rip-relative addressing.
    860    spew("jmp        *%d(%%rip)", ripOffset);
    861    m_formatter.oneByteRipOp(OP_GROUP5_Ev, ripOffset, GROUP5_OP_JMPN);
    862  }
    863 
    864  void immediate64(int64_t imm) {
    865    spew(".quad      %lld", (long long)imm);
    866    m_formatter.immediate64(imm);
    867  }
    868 
    869  // SSE operations:
    870 
    871  void vcvtsq2sd_rr(RegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
    872    twoByteOpInt64Simd("vcvtsi2sd", VEX_SD, OP2_CVTSI2SD_VsdEd, src1, src0,
    873                       dst);
    874  }
    875  void vcvtsq2ss_rr(RegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
    876    twoByteOpInt64Simd("vcvtsi2ss", VEX_SS, OP2_CVTSI2SD_VsdEd, src1, src0,
    877                       dst);
    878  }
    879 
    880  void vcvtsi2sdq_rr(RegisterID src, XMMRegisterID dst) {
    881    twoByteOpInt64Simd("vcvtsi2sdq", VEX_SD, OP2_CVTSI2SD_VsdEd, src,
    882                       invalid_xmm, dst);
    883  }
    884 
    885  void vcvttsd2sq_rr(XMMRegisterID src, RegisterID dst) {
    886    twoByteOpSimdInt64("vcvttsd2si", VEX_SD, OP2_CVTTSD2SI_GdWsd, src, dst);
    887  }
    888 
    889  void vcvttss2sq_rr(XMMRegisterID src, RegisterID dst) {
    890    twoByteOpSimdInt64("vcvttss2si", VEX_SS, OP2_CVTTSD2SI_GdWsd, src, dst);
    891  }
    892 
    893  void vmovq_rr(XMMRegisterID src, RegisterID dst) {
    894    // While this is called "vmovq", it actually uses the vmovd encoding
    895    // with a REX prefix modifying it to be 64-bit.
    896    twoByteOpSimdInt64("vmovq", VEX_PD, OP2_MOVD_EdVd, (XMMRegisterID)dst,
    897                       (RegisterID)src);
    898  }
    899 
    900  void vpextrq_irr(unsigned lane, XMMRegisterID src, RegisterID dst) {
    901    MOZ_ASSERT(lane < 2);
    902    threeByteOpImmSimdInt64("vpextrq", VEX_PD, OP3_PEXTRQ_EvVdqIb, ESCAPE_3A,
    903                            lane, src, dst);
    904  }
    905 
    906  void vpinsrq_irr(unsigned lane, RegisterID src1, XMMRegisterID src0,
    907                   XMMRegisterID dst) {
    908    MOZ_ASSERT(lane < 2);
    909    threeByteOpImmInt64Simd("vpinsrq", VEX_PD, OP3_PINSRQ_VdqEvIb, ESCAPE_3A,
    910                            lane, src1, src0, dst);
    911  }
    912 
    913  void vmovq_rr(RegisterID src, XMMRegisterID dst) {
    914    // While this is called "vmovq", it actually uses the vmovd encoding
    915    // with a REX prefix modifying it to be 64-bit.
    916    twoByteOpInt64Simd("vmovq", VEX_PD, OP2_MOVD_VdEd, src, invalid_xmm, dst);
    917  }
    918 
    919  [[nodiscard]] JmpSrc vmovsd_ripr(XMMRegisterID dst) {
    920    return twoByteRipOpSimd("vmovsd", VEX_SD, OP2_MOVSD_VsdWsd, dst);
    921  }
    922  [[nodiscard]] JmpSrc vmovss_ripr(XMMRegisterID dst) {
    923    return twoByteRipOpSimd("vmovss", VEX_SS, OP2_MOVSD_VsdWsd, dst);
    924  }
    925  [[nodiscard]] JmpSrc vmovaps_ripr(XMMRegisterID dst) {
    926    return twoByteRipOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_VsdWsd, dst);
    927  }
    928  [[nodiscard]] JmpSrc vmovdqa_ripr(XMMRegisterID dst) {
    929    return twoByteRipOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_VdqWdq, dst);
    930  }
    931 
    932  [[nodiscard]] JmpSrc vpaddb_ripr(XMMRegisterID src, XMMRegisterID dst) {
    933    return twoByteRipOpSimd("vpaddb", VEX_PD, OP2_PADDB_VdqWdq, src, dst);
    934  }
    935  [[nodiscard]] JmpSrc vpaddw_ripr(XMMRegisterID src, XMMRegisterID dst) {
    936    return twoByteRipOpSimd("vpaddw", VEX_PD, OP2_PADDW_VdqWdq, src, dst);
    937  }
    938  [[nodiscard]] JmpSrc vpaddd_ripr(XMMRegisterID src, XMMRegisterID dst) {
    939    return twoByteRipOpSimd("vpaddd", VEX_PD, OP2_PADDD_VdqWdq, src, dst);
    940  }
    941  [[nodiscard]] JmpSrc vpaddq_ripr(XMMRegisterID src, XMMRegisterID dst) {
    942    return twoByteRipOpSimd("vpaddq", VEX_PD, OP2_PADDQ_VdqWdq, src, dst);
    943  }
    944  [[nodiscard]] JmpSrc vpsubb_ripr(XMMRegisterID src, XMMRegisterID dst) {
    945    return twoByteRipOpSimd("vpsubb", VEX_PD, OP2_PSUBB_VdqWdq, src, dst);
    946  }
    947  [[nodiscard]] JmpSrc vpsubw_ripr(XMMRegisterID src, XMMRegisterID dst) {
    948    return twoByteRipOpSimd("vpsubw", VEX_PD, OP2_PSUBW_VdqWdq, src, dst);
    949  }
    950  [[nodiscard]] JmpSrc vpsubd_ripr(XMMRegisterID src, XMMRegisterID dst) {
    951    return twoByteRipOpSimd("vpsubd", VEX_PD, OP2_PSUBD_VdqWdq, src, dst);
    952  }
    953  [[nodiscard]] JmpSrc vpsubq_ripr(XMMRegisterID src, XMMRegisterID dst) {
    954    return twoByteRipOpSimd("vpsubq", VEX_PD, OP2_PSUBQ_VdqWdq, src, dst);
    955  }
    956  [[nodiscard]] JmpSrc vpmullw_ripr(XMMRegisterID src, XMMRegisterID dst) {
    957    return twoByteRipOpSimd("vpmullw", VEX_PD, OP2_PMULLW_VdqWdq, src, dst);
    958  }
    959  [[nodiscard]] JmpSrc vpmulld_ripr(XMMRegisterID src, XMMRegisterID dst) {
    960    return threeByteRipOpSimd("vpmulld", VEX_PD, OP3_PMULLD_VdqWdq, ESCAPE_38,
    961                              src, dst);
    962  }
    963  [[nodiscard]] JmpSrc vpaddsb_ripr(XMMRegisterID src, XMMRegisterID dst) {
    964    return twoByteRipOpSimd("vpaddsb", VEX_PD, OP2_PADDSB_VdqWdq, src, dst);
    965  }
    966  [[nodiscard]] JmpSrc vpaddusb_ripr(XMMRegisterID src, XMMRegisterID dst) {
    967    return twoByteRipOpSimd("vpaddusb", VEX_PD, OP2_PADDUSB_VdqWdq, src, dst);
    968  }
    969  [[nodiscard]] JmpSrc vpaddsw_ripr(XMMRegisterID src, XMMRegisterID dst) {
    970    return twoByteRipOpSimd("vpaddsw", VEX_PD, OP2_PADDSW_VdqWdq, src, dst);
    971  }
    972  [[nodiscard]] JmpSrc vpaddusw_ripr(XMMRegisterID src, XMMRegisterID dst) {
    973    return twoByteRipOpSimd("vpaddusw", VEX_PD, OP2_PADDUSW_VdqWdq, src, dst);
    974  }
    975  [[nodiscard]] JmpSrc vpsubsb_ripr(XMMRegisterID src, XMMRegisterID dst) {
    976    return twoByteRipOpSimd("vpsubsb", VEX_PD, OP2_PSUBSB_VdqWdq, src, dst);
    977  }
    978  [[nodiscard]] JmpSrc vpsubusb_ripr(XMMRegisterID src, XMMRegisterID dst) {
    979    return twoByteRipOpSimd("vpsubusb", VEX_PD, OP2_PSUBUSB_VdqWdq, src, dst);
    980  }
    981  [[nodiscard]] JmpSrc vpsubsw_ripr(XMMRegisterID src, XMMRegisterID dst) {
    982    return twoByteRipOpSimd("vpsubsw", VEX_PD, OP2_PSUBSW_VdqWdq, src, dst);
    983  }
    984  [[nodiscard]] JmpSrc vpsubusw_ripr(XMMRegisterID src, XMMRegisterID dst) {
    985    return twoByteRipOpSimd("vpsubusw", VEX_PD, OP2_PSUBUSW_VdqWdq, src, dst);
    986  }
    987  [[nodiscard]] JmpSrc vpminsb_ripr(XMMRegisterID src, XMMRegisterID dst) {
    988    return threeByteRipOpSimd("vpminsb", VEX_PD, OP3_PMINSB_VdqWdq, ESCAPE_38,
    989                              src, dst);
    990  }
    991  [[nodiscard]] JmpSrc vpminub_ripr(XMMRegisterID src, XMMRegisterID dst) {
    992    return twoByteRipOpSimd("vpminub", VEX_PD, OP2_PMINUB_VdqWdq, src, dst);
    993  }
    994  [[nodiscard]] JmpSrc vpminsw_ripr(XMMRegisterID src, XMMRegisterID dst) {
    995    return twoByteRipOpSimd("vpminsw", VEX_PD, OP2_PMINSW_VdqWdq, src, dst);
    996  }
    997  [[nodiscard]] JmpSrc vpminuw_ripr(XMMRegisterID src, XMMRegisterID dst) {
    998    return threeByteRipOpSimd("vpminuw", VEX_PD, OP3_PMINUW_VdqWdq, ESCAPE_38,
    999                              src, dst);
   1000  }
   1001  [[nodiscard]] JmpSrc vpminsd_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1002    return threeByteRipOpSimd("vpminsd", VEX_PD, OP3_PMINSD_VdqWdq, ESCAPE_38,
   1003                              src, dst);
   1004  }
   1005  [[nodiscard]] JmpSrc vpminud_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1006    return threeByteRipOpSimd("vpminud", VEX_PD, OP3_PMINUD_VdqWdq, ESCAPE_38,
   1007                              src, dst);
   1008  }
   1009  [[nodiscard]] JmpSrc vpmaxsb_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1010    return threeByteRipOpSimd("vpmaxsb", VEX_PD, OP3_PMAXSB_VdqWdq, ESCAPE_38,
   1011                              src, dst);
   1012  }
   1013  [[nodiscard]] JmpSrc vpmaxub_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1014    return twoByteRipOpSimd("vpmaxub", VEX_PD, OP2_PMAXUB_VdqWdq, src, dst);
   1015  }
   1016  [[nodiscard]] JmpSrc vpmaxsw_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1017    return twoByteRipOpSimd("vpmaxsw", VEX_PD, OP2_PMAXSW_VdqWdq, src, dst);
   1018  }
   1019  [[nodiscard]] JmpSrc vpmaxuw_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1020    return threeByteRipOpSimd("vpmaxuw", VEX_PD, OP3_PMAXUW_VdqWdq, ESCAPE_38,
   1021                              src, dst);
   1022  }
   1023  [[nodiscard]] JmpSrc vpmaxsd_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1024    return threeByteRipOpSimd("vpmaxsd", VEX_PD, OP3_PMAXSD_VdqWdq, ESCAPE_38,
   1025                              src, dst);
   1026  }
   1027  [[nodiscard]] JmpSrc vpmaxud_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1028    return threeByteRipOpSimd("vpmaxud", VEX_PD, OP3_PMAXUD_VdqWdq, ESCAPE_38,
   1029                              src, dst);
   1030  }
   1031  [[nodiscard]] JmpSrc vpand_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1032    return twoByteRipOpSimd("vpand", VEX_PD, OP2_PANDDQ_VdqWdq, src, dst);
   1033  }
   1034  [[nodiscard]] JmpSrc vpxor_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1035    return twoByteRipOpSimd("vpxor", VEX_PD, OP2_PXORDQ_VdqWdq, src, dst);
   1036  }
   1037  [[nodiscard]] JmpSrc vpor_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1038    return twoByteRipOpSimd("vpor", VEX_PD, OP2_PORDQ_VdqWdq, src, dst);
   1039  }
   1040  [[nodiscard]] JmpSrc vaddps_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1041    return twoByteRipOpSimd("vaddps", VEX_PS, OP2_ADDPS_VpsWps, src, dst);
   1042  }
   1043  [[nodiscard]] JmpSrc vaddpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1044    return twoByteRipOpSimd("vaddpd", VEX_PD, OP2_ADDPD_VpdWpd, src, dst);
   1045  }
   1046  [[nodiscard]] JmpSrc vsubps_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1047    return twoByteRipOpSimd("vsubps", VEX_PS, OP2_SUBPS_VpsWps, src, dst);
   1048  }
   1049  [[nodiscard]] JmpSrc vsubpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1050    return twoByteRipOpSimd("vsubpd", VEX_PD, OP2_SUBPD_VpdWpd, src, dst);
   1051  }
   1052  [[nodiscard]] JmpSrc vdivps_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1053    return twoByteRipOpSimd("vdivps", VEX_PS, OP2_DIVPS_VpsWps, src, dst);
   1054  }
   1055  [[nodiscard]] JmpSrc vdivpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1056    return twoByteRipOpSimd("vdivpd", VEX_PD, OP2_DIVPD_VpdWpd, src, dst);
   1057  }
   1058  [[nodiscard]] JmpSrc vmulps_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1059    return twoByteRipOpSimd("vmulps", VEX_PS, OP2_MULPS_VpsWps, src, dst);
   1060  }
   1061  [[nodiscard]] JmpSrc vmulpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1062    return twoByteRipOpSimd("vmulpd", VEX_PD, OP2_MULPD_VpdWpd, src, dst);
   1063  }
   1064  [[nodiscard]] JmpSrc vandps_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1065    return twoByteRipOpSimd("vandps", VEX_PS, OP2_ANDPS_VpsWps, src, dst);
   1066  }
   1067  [[nodiscard]] JmpSrc vandpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1068    return twoByteRipOpSimd("vandpd", VEX_PD, OP2_ANDPD_VpdWpd, src, dst);
   1069  }
   1070  [[nodiscard]] JmpSrc vxorps_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1071    return twoByteRipOpSimd("vxorps", VEX_PS, OP2_XORPS_VpsWps, src, dst);
   1072  }
   1073  [[nodiscard]] JmpSrc vxorpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1074    return twoByteRipOpSimd("vxorpd", VEX_PD, OP2_XORPD_VpdWpd, src, dst);
   1075  }
   1076  [[nodiscard]] JmpSrc vminpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1077    return twoByteRipOpSimd("vminpd", VEX_PD, OP2_MINPD_VpdWpd, src, dst);
   1078  }
   1079  [[nodiscard]] JmpSrc vpacksswb_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1080    return twoByteRipOpSimd("vpacksswb", VEX_PD, OP2_PACKSSWB_VdqWdq, src, dst);
   1081  }
   1082  [[nodiscard]] JmpSrc vpackuswb_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1083    return twoByteRipOpSimd("vpackuswb", VEX_PD, OP2_PACKUSWB_VdqWdq, src, dst);
   1084  }
   1085  [[nodiscard]] JmpSrc vpackssdw_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1086    return twoByteRipOpSimd("vpackssdw", VEX_PD, OP2_PACKSSDW_VdqWdq, src, dst);
   1087  }
   1088  [[nodiscard]] JmpSrc vpackusdw_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1089    return threeByteRipOpSimd("vpackusdw", VEX_PD, OP3_PACKUSDW_VdqWdq,
   1090                              ESCAPE_38, src, dst);
   1091  }
   1092  [[nodiscard]] JmpSrc vpunpckldq_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1093    return twoByteRipOpSimd("vpunpckldq", VEX_PD, OP2_PUNPCKLDQ_VdqWdq, src,
   1094                            dst);
   1095  }
   1096  [[nodiscard]] JmpSrc vunpcklps_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1097    return twoByteRipOpSimd("vunpcklps", VEX_PS, OP2_UNPCKLPS_VsdWsd, src, dst);
   1098  }
   1099  [[nodiscard]] JmpSrc vptest_ripr(XMMRegisterID lhs) {
   1100    return threeByteRipOpSimd("vptest", VEX_PD, OP3_PTEST_VdVd, ESCAPE_38, lhs);
   1101  }
   1102  [[nodiscard]] JmpSrc vpshufb_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1103    return threeByteRipOpSimd("vpshufb", VEX_PD, OP3_PSHUFB_VdqWdq, ESCAPE_38,
   1104                              src, dst);
   1105  }
   1106  [[nodiscard]] JmpSrc vpmaddwd_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1107    return twoByteRipOpSimd("vpmaddwd", VEX_PD, OP2_PMADDWD_VdqWdq, src, dst);
   1108  }
   1109  [[nodiscard]] JmpSrc vpcmpeqb_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1110    return twoByteRipOpSimd("vpcmpeqb", VEX_PD, OP2_PCMPEQB_VdqWdq, src, dst);
   1111  }
   1112  [[nodiscard]] JmpSrc vpcmpgtb_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1113    return twoByteRipOpSimd("vpcmpgtb", VEX_PD, OP2_PCMPGTB_VdqWdq, src, dst);
   1114  }
   1115  [[nodiscard]] JmpSrc vpcmpeqw_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1116    return twoByteRipOpSimd("vpcmpeqw", VEX_PD, OP2_PCMPEQW_VdqWdq, src, dst);
   1117  }
   1118  [[nodiscard]] JmpSrc vpcmpgtw_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1119    return twoByteRipOpSimd("vpcmpgtw", VEX_PD, OP2_PCMPGTW_VdqWdq, src, dst);
   1120  }
   1121  [[nodiscard]] JmpSrc vpcmpeqd_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1122    return twoByteRipOpSimd("vpcmpeqd", VEX_PD, OP2_PCMPEQD_VdqWdq, src, dst);
   1123  }
   1124  [[nodiscard]] JmpSrc vpcmpgtd_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1125    return twoByteRipOpSimd("vpcmpgtd", VEX_PD, OP2_PCMPGTD_VdqWdq, src, dst);
   1126  }
   1127  [[nodiscard]] JmpSrc vcmpeqps_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1128    return twoByteRipOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps,
   1129                               X86Encoding::ConditionCmp_EQ, src, dst);
   1130  }
   1131  [[nodiscard]] JmpSrc vcmpneqps_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1132    return twoByteRipOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps,
   1133                               X86Encoding::ConditionCmp_NEQ, src, dst);
   1134  }
   1135  [[nodiscard]] JmpSrc vcmpltps_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1136    return twoByteRipOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps,
   1137                               X86Encoding::ConditionCmp_LT, src, dst);
   1138  }
   1139  [[nodiscard]] JmpSrc vcmpleps_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1140    return twoByteRipOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps,
   1141                               X86Encoding::ConditionCmp_LE, src, dst);
   1142  }
   1143  [[nodiscard]] JmpSrc vcmpgeps_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1144    return twoByteRipOpImmSimd("vcmpps", VEX_PS, OP2_CMPPS_VpsWps,
   1145                               X86Encoding::ConditionCmp_GE, src, dst);
   1146  }
   1147  [[nodiscard]] JmpSrc vcmpeqpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1148    return twoByteRipOpImmSimd("vcmppd", VEX_PD, OP2_CMPPD_VpdWpd,
   1149                               X86Encoding::ConditionCmp_EQ, src, dst);
   1150  }
   1151  [[nodiscard]] JmpSrc vcmpneqpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1152    return twoByteRipOpImmSimd("vcmppd", VEX_PD, OP2_CMPPD_VpdWpd,
   1153                               X86Encoding::ConditionCmp_NEQ, src, dst);
   1154  }
   1155  [[nodiscard]] JmpSrc vcmpltpd_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1156    return twoByteRipOpImmSimd("vcmppd", VEX_PD, OP2_CMPPD_VpdWpd,
   1157                               X86Encoding::ConditionCmp_LT, src, dst);
   1158  }
   1159  [[nodiscard]] JmpSrc vcmplepd_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1160    return twoByteRipOpImmSimd("vcmppd", VEX_PD, OP2_CMPPD_VpdWpd,
   1161                               X86Encoding::ConditionCmp_LE, src, dst);
   1162  }
   1163  [[nodiscard]] JmpSrc vpmaddubsw_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1164    return threeByteRipOpSimd("vpmaddubsw", VEX_PD, OP3_PMADDUBSW_VdqWdq,
   1165                              ESCAPE_38, src, dst);
   1166  }
   1167  [[nodiscard]] JmpSrc vpmuludq_ripr(XMMRegisterID src, XMMRegisterID dst) {
   1168    return twoByteRipOpSimd("vpmuludq", VEX_PD, OP2_PMULUDQ_VdqWdq, src, dst);
   1169  }
   1170 
   1171  // BMI instructions:
   1172 
   1173  void sarxq_rrr(RegisterID src, RegisterID shift, RegisterID dst) {
   1174    spew("sarxq      %s, %s, %s", GPReg64Name(src), GPReg64Name(shift),
   1175         GPReg64Name(dst));
   1176 
   1177    RegisterID rm = src;
   1178    XMMRegisterID src0 = static_cast<XMMRegisterID>(shift);
   1179    int reg = dst;
   1180    m_formatter.threeByteOpVex64(VEX_SS /* = F3 */, OP3_SARX_GyEyBy, ESCAPE_38,
   1181                                 rm, src0, reg);
   1182  }
   1183 
   1184  void shlxq_rrr(RegisterID src, RegisterID shift, RegisterID dst) {
   1185    spew("shlxq      %s, %s, %s", GPReg64Name(src), GPReg64Name(shift),
   1186         GPReg64Name(dst));
   1187 
   1188    RegisterID rm = src;
   1189    XMMRegisterID src0 = static_cast<XMMRegisterID>(shift);
   1190    int reg = dst;
   1191    m_formatter.threeByteOpVex64(VEX_PD /* = 66 */, OP3_SHLX_GyEyBy, ESCAPE_38,
   1192                                 rm, src0, reg);
   1193  }
   1194 
   1195  void shrxq_rrr(RegisterID src, RegisterID shift, RegisterID dst) {
   1196    spew("shrxq      %s, %s, %s", GPReg64Name(src), GPReg64Name(shift),
   1197         GPReg64Name(dst));
   1198 
   1199    RegisterID rm = src;
   1200    XMMRegisterID src0 = static_cast<XMMRegisterID>(shift);
   1201    int reg = dst;
   1202    m_formatter.threeByteOpVex64(VEX_SD /* = F2 */, OP3_SHRX_GyEyBy, ESCAPE_38,
   1203                                 rm, src0, reg);
   1204  }
   1205 
   1206  void andnq_rrr(RegisterID src1, RegisterID src2, RegisterID dst) {
   1207    spew("andnq      %s, %s, %s", GPReg64Name(src1), GPReg64Name(src2),
   1208         GPReg64Name(dst));
   1209 
   1210    RegisterID rm = src2;
   1211    XMMRegisterID src0 = static_cast<XMMRegisterID>(src1);
   1212    int reg = dst;
   1213    m_formatter.threeByteOpVex64(VEX_PS, OP3_ANDN_GyByEy, ESCAPE_38, rm, src0,
   1214                                 reg);
   1215  }
   1216 
   1217 private:
   1218  [[nodiscard]] JmpSrc twoByteRipOpSimd(const char* name, VexOperandType ty,
   1219                                        TwoByteOpcodeID opcode,
   1220                                        XMMRegisterID reg) {
   1221    MOZ_ASSERT(!IsXMMReversedOperands(opcode));
   1222    m_formatter.legacySSEPrefix(ty);
   1223    m_formatter.twoByteRipOp(opcode, 0, reg);
   1224    JmpSrc label(m_formatter.size());
   1225    spew("%-11s" MEM_o32r ", %s", legacySSEOpName(name),
   1226         ADDR_o32r(label.offset()), XMMRegName(reg));
   1227    return label;
   1228  }
   1229 
   1230  [[nodiscard]] JmpSrc twoByteRipOpSimd(const char* name, VexOperandType ty,
   1231                                        TwoByteOpcodeID opcode,
   1232                                        XMMRegisterID src0, XMMRegisterID dst) {
   1233    MOZ_ASSERT(src0 != invalid_xmm && !IsXMMReversedOperands(opcode));
   1234    if (useLegacySSEEncoding(src0, dst)) {
   1235      m_formatter.legacySSEPrefix(ty);
   1236      m_formatter.twoByteRipOp(opcode, 0, dst);
   1237      JmpSrc label(m_formatter.size());
   1238      spew("%-11s" MEM_o32r ", %s", legacySSEOpName(name),
   1239           ADDR_o32r(label.offset()), XMMRegName(dst));
   1240      return label;
   1241    }
   1242 
   1243    m_formatter.twoByteRipOpVex(ty, opcode, 0, src0, dst);
   1244    JmpSrc label(m_formatter.size());
   1245    spew("%-11s, " MEM_o32r ", %s, %s", name, ADDR_o32r(label.offset()),
   1246         XMMRegName(src0), XMMRegName(dst));
   1247    return label;
   1248  }
   1249 
   1250  [[nodiscard]] JmpSrc twoByteRipOpImmSimd(const char* name, VexOperandType ty,
   1251                                           TwoByteOpcodeID opcode, uint32_t imm,
   1252                                           XMMRegisterID src0,
   1253                                           XMMRegisterID dst) {
   1254    MOZ_ASSERT(src0 != invalid_xmm && !IsXMMReversedOperands(opcode));
   1255    if (useLegacySSEEncoding(src0, dst)) {
   1256      m_formatter.legacySSEPrefix(ty);
   1257      m_formatter.twoByteRipOp(opcode, 0, dst);
   1258      m_formatter.immediate8u(imm);
   1259      JmpSrc label(m_formatter.size(),
   1260                   /* bytes trailing the patch field = */ 1);
   1261      spew("%-11s$0x%x, " MEM_o32r ", %s", legacySSEOpName(name), imm,
   1262           ADDR_o32r(label.offset()), XMMRegName(dst));
   1263      return label;
   1264    }
   1265 
   1266    m_formatter.twoByteRipOpVex(ty, opcode, 0, src0, dst);
   1267    m_formatter.immediate8u(imm);
   1268    JmpSrc label(m_formatter.size(),
   1269                 /* bytes trailing the patch field = */ 1);
   1270    spew("%-11s$0x%x, " MEM_o32r ", %s, %s", name, imm,
   1271         ADDR_o32r(label.offset()), XMMRegName(src0), XMMRegName(dst));
   1272    return label;
   1273  }
   1274 
   1275  void twoByteOpInt64Simd(const char* name, VexOperandType ty,
   1276                          TwoByteOpcodeID opcode, RegisterID rm,
   1277                          XMMRegisterID src0, XMMRegisterID dst) {
   1278    if (useLegacySSEEncoding(src0, dst)) {
   1279      if (IsXMMReversedOperands(opcode)) {
   1280        spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(dst),
   1281             GPRegName(rm));
   1282      } else {
   1283        spew("%-11s%s, %s", legacySSEOpName(name), GPRegName(rm),
   1284             XMMRegName(dst));
   1285      }
   1286      m_formatter.legacySSEPrefix(ty);
   1287      m_formatter.twoByteOp64(opcode, rm, dst);
   1288      return;
   1289    }
   1290 
   1291    if (src0 == invalid_xmm) {
   1292      if (IsXMMReversedOperands(opcode)) {
   1293        spew("%-11s%s, %s", name, XMMRegName(dst), GPRegName(rm));
   1294      } else {
   1295        spew("%-11s%s, %s", name, GPRegName(rm), XMMRegName(dst));
   1296      }
   1297    } else {
   1298      spew("%-11s%s, %s, %s", name, GPRegName(rm), XMMRegName(src0),
   1299           XMMRegName(dst));
   1300    }
   1301    m_formatter.twoByteOpVex64(ty, opcode, rm, src0, dst);
   1302  }
   1303 
   1304  void twoByteOpSimdInt64(const char* name, VexOperandType ty,
   1305                          TwoByteOpcodeID opcode, XMMRegisterID rm,
   1306                          RegisterID dst) {
   1307    if (useLegacySSEEncodingAlways()) {
   1308      if (IsXMMReversedOperands(opcode)) {
   1309        spew("%-11s%s, %s", legacySSEOpName(name), GPRegName(dst),
   1310             XMMRegName(rm));
   1311      } else if (opcode == OP2_MOVD_EdVd) {
   1312        spew("%-11s%s, %s", legacySSEOpName(name),
   1313             XMMRegName((XMMRegisterID)dst), GPRegName((RegisterID)rm));
   1314      } else {
   1315        spew("%-11s%s, %s", legacySSEOpName(name), XMMRegName(rm),
   1316             GPRegName(dst));
   1317      }
   1318      m_formatter.legacySSEPrefix(ty);
   1319      m_formatter.twoByteOp64(opcode, (RegisterID)rm, dst);
   1320      return;
   1321    }
   1322 
   1323    if (IsXMMReversedOperands(opcode)) {
   1324      spew("%-11s%s, %s", name, GPRegName(dst), XMMRegName(rm));
   1325    } else if (opcode == OP2_MOVD_EdVd) {
   1326      spew("%-11s%s, %s", name, XMMRegName((XMMRegisterID)dst),
   1327           GPRegName((RegisterID)rm));
   1328    } else {
   1329      spew("%-11s%s, %s", name, XMMRegName(rm), GPRegName(dst));
   1330    }
   1331    m_formatter.twoByteOpVex64(ty, opcode, (RegisterID)rm, invalid_xmm,
   1332                               (XMMRegisterID)dst);
   1333  }
   1334 
   1335  [[nodiscard]] JmpSrc threeByteRipOpSimd(const char* name, VexOperandType ty,
   1336                                          ThreeByteOpcodeID opcode,
   1337                                          ThreeByteEscape escape,
   1338                                          XMMRegisterID dst) {
   1339    m_formatter.legacySSEPrefix(ty);
   1340    m_formatter.threeByteRipOp(opcode, escape, 0, dst);
   1341    JmpSrc label(m_formatter.size());
   1342    spew("%-11s" MEM_o32r ", %s", legacySSEOpName(name),
   1343         ADDR_o32r(label.offset()), XMMRegName(dst));
   1344    return label;
   1345  }
   1346 
   1347  [[nodiscard]] JmpSrc threeByteRipOpSimd(const char* name, VexOperandType ty,
   1348                                          ThreeByteOpcodeID opcode,
   1349                                          ThreeByteEscape escape,
   1350                                          XMMRegisterID src0,
   1351                                          XMMRegisterID dst) {
   1352    MOZ_ASSERT(src0 != invalid_xmm);
   1353    if (useLegacySSEEncoding(src0, dst)) {
   1354      m_formatter.legacySSEPrefix(ty);
   1355      m_formatter.threeByteRipOp(opcode, escape, 0, dst);
   1356      JmpSrc label(m_formatter.size());
   1357      spew("%-11s" MEM_o32r ", %s", legacySSEOpName(name),
   1358           ADDR_o32r(label.offset()), XMMRegName(dst));
   1359      return label;
   1360    }
   1361 
   1362    m_formatter.threeByteRipOpVex(ty, opcode, escape, 0, src0, dst);
   1363    JmpSrc label(m_formatter.size());
   1364    spew("%-11s" MEM_o32r ", %s, %s", name, ADDR_o32r(label.offset()),
   1365         XMMRegName(src0), XMMRegName(dst));
   1366    return label;
   1367  }
   1368 
   1369  void threeByteOpImmSimdInt64(const char* name, VexOperandType ty,
   1370                               ThreeByteOpcodeID opcode, ThreeByteEscape escape,
   1371                               uint32_t imm, XMMRegisterID src,
   1372                               RegisterID dst) {
   1373    spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, GPReg64Name(dst),
   1374         XMMRegName(src));
   1375    m_formatter.legacySSEPrefix(ty);
   1376    m_formatter.threeByteOp64(opcode, escape, dst, (RegisterID)src);
   1377    m_formatter.immediate8u(imm);
   1378  }
   1379 
   1380  void threeByteOpImmInt64Simd(const char* name, VexOperandType ty,
   1381                               ThreeByteOpcodeID opcode, ThreeByteEscape escape,
   1382                               uint32_t imm, RegisterID src1,
   1383                               XMMRegisterID src0, XMMRegisterID dst) {
   1384    if (useLegacySSEEncoding(src0, dst)) {
   1385      spew("%-11s$0x%x, %s, %s", legacySSEOpName(name), imm, GPReg64Name(src1),
   1386           XMMRegName(dst));
   1387      m_formatter.legacySSEPrefix(ty);
   1388      m_formatter.threeByteOp64(opcode, escape, src1, (RegisterID)dst);
   1389      m_formatter.immediate8u(imm);
   1390      return;
   1391    }
   1392 
   1393    MOZ_ASSERT(src0 != invalid_xmm);
   1394    spew("%-11s$0x%x, %s, %s, %s", name, imm, GPReg64Name(src1),
   1395         XMMRegName(src0), XMMRegName(dst));
   1396    m_formatter.threeByteOpVex64(ty, opcode, escape, src1, src0,
   1397                                 (RegisterID)dst);
   1398    m_formatter.immediate8u(imm);
   1399  }
   1400 };
   1401 
   1402 using BaseAssemblerSpecific = BaseAssemblerX64;
   1403 
   1404 }  // namespace X86Encoding
   1405 
   1406 }  // namespace jit
   1407 }  // namespace js
   1408 
   1409 #endif /* jit_x64_BaseAssembler_x64_h */