tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

extension-riscv-v.h (17729B)


      1 // Copyright 2022 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef jit_riscv64_extension_Extension_riscv_v_h_
      6 #define jit_riscv64_extension_Extension_riscv_v_h_
      7 #ifdef CAN_USE_RVV
      8 #  include "jit/riscv64/Architecture-riscv64.h"
      9 #  include "jit/riscv64/constant/Constant-riscv64.h"
     10 #  include "jit/riscv64/extension/base-assembler-riscv.h"
     11 
     12 namespace js {
     13 namespace jit {
     14 
     15 class AssemblerRISCVV : public AssemblerRiscvBase {
     16 public:
     17  // RVV
     18  static int32_t GenZimm(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
     19                         MaskAgnosticType mask = mu) {
     20    return (mask << 7) | (tail << 6) | ((vsew & 0x7) << 3) | (vlmul & 0x7);
     21  }
     22 
     23  void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
     24          MaskType mask = NoMask);
     25  void vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
     26           MaskType mask = NoMask);
     27  void vlx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
     28           MaskType mask = NoMask);
     29 
     30  void vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
     31          MaskType mask = NoMask);
     32  void vss(VRegister vd, Register rs1, Register rs2, VSew vsew,
     33           MaskType mask = NoMask);
     34  void vsx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
     35           MaskType mask = NoMask);
     36 
     37  void vsu(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
     38           MaskType mask = NoMask);
     39 
     40 #  define SegInstr(OP)  \
     41    void OP##seg2(ARG); \
     42    void OP##seg3(ARG); \
     43    void OP##seg4(ARG); \
     44    void OP##seg5(ARG); \
     45    void OP##seg6(ARG); \
     46    void OP##seg7(ARG); \
     47    void OP##seg8(ARG);
     48 
     49 #  define ARG \
     50    VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask = NoMask
     51 
     52  SegInstr(vl) SegInstr(vs)
     53 #  undef ARG
     54 
     55 #  define ARG \
     56    VRegister vd, Register rs1, Register rs2, VSew vsew, MaskType mask = NoMask
     57 
     58      SegInstr(vls) SegInstr(vss)
     59 #  undef ARG
     60 
     61 #  define ARG \
     62    VRegister vd, Register rs1, VRegister rs2, VSew vsew, MaskType mask = NoMask
     63 
     64          SegInstr(vsx) SegInstr(vlx)
     65 #  undef ARG
     66 #  undef SegInstr
     67 
     68      // RVV Vector Arithmetic Instruction
     69 
     70      void vmv_vv(VRegister vd, VRegister vs1);
     71  void vmv_vx(VRegister vd, Register rs1);
     72  void vmv_vi(VRegister vd, uint8_t simm5);
     73  void vmv_xs(Register rd, VRegister vs2);
     74  void vmv_sx(VRegister vd, Register rs1);
     75  void vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2);
     76  void vmerge_vx(VRegister vd, Register rs1, VRegister vs2);
     77  void vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2);
     78 
     79  void vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1,
     80                   MaskType mask = NoMask);
     81  void vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1,
     82                  MaskType mask = NoMask);
     83  void vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1,
     84                  MaskType mask = NoMask);
     85  void vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1,
     86                   MaskType mask = NoMask);
     87 
     88  void vadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
     89  void vadc_vx(VRegister vd, Register rs1, VRegister vs2);
     90  void vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
     91 
     92  void vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
     93  void vmadc_vx(VRegister vd, Register rs1, VRegister vs2);
     94  void vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
     95 
     96  void vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask = NoMask);
     97  void vfmv_fs(FPURegister fd, VRegister vs2);
     98  void vfmv_sf(VRegister vd, FPURegister fs);
     99 
    100  void vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
    101                 MaskType mask = NoMask);
    102  void vid_v(VRegister vd, MaskType mask = Mask);
    103 
    104 #  define DEFINE_OPIVV(name, funct6)                           \
    105    void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
    106                   MaskType mask = NoMask);
    107 
    108 #  define DEFINE_OPIVX(name, funct6)                          \
    109    void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
    110                   MaskType mask = NoMask);
    111 
    112 #  define DEFINE_OPIVI(name, funct6)                         \
    113    void name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
    114                   MaskType mask = NoMask);
    115 
    116 #  define DEFINE_OPMVV(name, funct6)                           \
    117    void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
    118                   MaskType mask = NoMask);
    119 
    120 #  define DEFINE_OPMVX(name, funct6)                          \
    121    void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
    122                   MaskType mask = NoMask);
    123 
    124 #  define DEFINE_OPFVV(name, funct6)                           \
    125    void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
    126                   MaskType mask = NoMask);
    127 
    128 #  define DEFINE_OPFWV(name, funct6)                           \
    129    void name##_wv(VRegister vd, VRegister vs2, VRegister vs1, \
    130                   MaskType mask = NoMask);
    131 
    132 #  define DEFINE_OPFRED(name, funct6)                          \
    133    void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
    134                   MaskType mask = NoMask);
    135 
    136 #  define DEFINE_OPFVF(name, funct6)                             \
    137    void name##_vf(VRegister vd, VRegister vs2, FPURegister fs1, \
    138                   MaskType mask = NoMask);
    139 
    140 #  define DEFINE_OPFWF(name, funct6)                             \
    141    void name##_wf(VRegister vd, VRegister vs2, FPURegister fs1, \
    142                   MaskType mask = NoMask);
    143 
    144 #  define DEFINE_OPFVV_FMA(name, funct6)                       \
    145    void name##_vv(VRegister vd, VRegister vs1, VRegister vs2, \
    146                   MaskType mask = NoMask);
    147 
    148 #  define DEFINE_OPFVF_FMA(name, funct6)                         \
    149    void name##_vf(VRegister vd, FPURegister fs1, VRegister vs2, \
    150                   MaskType mask = NoMask);
    151 
    152 #  define DEFINE_OPMVV_VIE(name) \
    153    void name(VRegister vd, VRegister vs2, MaskType mask = NoMask);
    154 
    155  DEFINE_OPIVV(vadd, VADD_FUNCT6)
    156  DEFINE_OPIVX(vadd, VADD_FUNCT6)
    157  DEFINE_OPIVI(vadd, VADD_FUNCT6)
    158  DEFINE_OPIVV(vsub, VSUB_FUNCT6)
    159  DEFINE_OPIVX(vsub, VSUB_FUNCT6)
    160  DEFINE_OPMVX(vdiv, VDIV_FUNCT6)
    161  DEFINE_OPMVX(vdivu, VDIVU_FUNCT6)
    162  DEFINE_OPMVX(vmul, VMUL_FUNCT6)
    163  DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6)
    164  DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6)
    165  DEFINE_OPMVX(vmulh, VMULH_FUNCT6)
    166  DEFINE_OPMVV(vdiv, VDIV_FUNCT6)
    167  DEFINE_OPMVV(vdivu, VDIVU_FUNCT6)
    168  DEFINE_OPMVV(vmul, VMUL_FUNCT6)
    169  DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
    170  DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
    171  DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
    172  DEFINE_OPMVV(vwmul, VWMUL_FUNCT6)
    173  DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6)
    174  DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
    175  DEFINE_OPMVV(vwadd, VWADD_FUNCT6)
    176  DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6)
    177  DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
    178  DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
    179  DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
    180  DEFINE_OPIVX(vsaddu, VSADD_FUNCT6)
    181  DEFINE_OPIVV(vsaddu, VSADDU_FUNCT6)
    182  DEFINE_OPIVI(vsaddu, VSADDU_FUNCT6)
    183  DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
    184  DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
    185  DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
    186  DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6)
    187  DEFINE_OPIVX(vrsub, VRSUB_FUNCT6)
    188  DEFINE_OPIVI(vrsub, VRSUB_FUNCT6)
    189  DEFINE_OPIVV(vminu, VMINU_FUNCT6)
    190  DEFINE_OPIVX(vminu, VMINU_FUNCT6)
    191  DEFINE_OPIVV(vmin, VMIN_FUNCT6)
    192  DEFINE_OPIVX(vmin, VMIN_FUNCT6)
    193  DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6)
    194  DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6)
    195  DEFINE_OPIVV(vmax, VMAX_FUNCT6)
    196  DEFINE_OPIVX(vmax, VMAX_FUNCT6)
    197  DEFINE_OPIVV(vand, VAND_FUNCT6)
    198  DEFINE_OPIVX(vand, VAND_FUNCT6)
    199  DEFINE_OPIVI(vand, VAND_FUNCT6)
    200  DEFINE_OPIVV(vor, VOR_FUNCT6)
    201  DEFINE_OPIVX(vor, VOR_FUNCT6)
    202  DEFINE_OPIVI(vor, VOR_FUNCT6)
    203  DEFINE_OPIVV(vxor, VXOR_FUNCT6)
    204  DEFINE_OPIVX(vxor, VXOR_FUNCT6)
    205  DEFINE_OPIVI(vxor, VXOR_FUNCT6)
    206  DEFINE_OPIVV(vrgather, VRGATHER_FUNCT6)
    207  DEFINE_OPIVX(vrgather, VRGATHER_FUNCT6)
    208  DEFINE_OPIVI(vrgather, VRGATHER_FUNCT6)
    209 
    210  DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
    211  DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
    212  DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6)
    213  DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6)
    214 
    215  DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6)
    216  DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6)
    217  DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6)
    218 
    219  DEFINE_OPIVV(vmsne, VMSNE_FUNCT6)
    220  DEFINE_OPIVX(vmsne, VMSNE_FUNCT6)
    221  DEFINE_OPIVI(vmsne, VMSNE_FUNCT6)
    222 
    223  DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6)
    224  DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6)
    225 
    226  DEFINE_OPIVV(vmslt, VMSLT_FUNCT6)
    227  DEFINE_OPIVX(vmslt, VMSLT_FUNCT6)
    228 
    229  DEFINE_OPIVV(vmsle, VMSLE_FUNCT6)
    230  DEFINE_OPIVX(vmsle, VMSLE_FUNCT6)
    231  DEFINE_OPIVI(vmsle, VMSLE_FUNCT6)
    232 
    233  DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6)
    234  DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6)
    235  DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6)
    236 
    237  DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6)
    238  DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6)
    239 
    240  DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6)
    241  DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6)
    242 
    243  DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
    244  DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
    245  DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
    246 
    247  DEFINE_OPIVV(vsra, VSRA_FUNCT6)
    248  DEFINE_OPIVX(vsra, VSRA_FUNCT6)
    249  DEFINE_OPIVI(vsra, VSRA_FUNCT6)
    250 
    251  DEFINE_OPIVV(vsll, VSLL_FUNCT6)
    252  DEFINE_OPIVX(vsll, VSLL_FUNCT6)
    253  DEFINE_OPIVI(vsll, VSLL_FUNCT6)
    254 
    255  DEFINE_OPIVV(vsmul, VSMUL_FUNCT6)
    256  DEFINE_OPIVX(vsmul, VSMUL_FUNCT6)
    257 
    258  DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
    259  DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
    260  DEFINE_OPFVV(vfsub, VFSUB_FUNCT6)
    261  DEFINE_OPFVF(vfsub, VFSUB_FUNCT6)
    262  DEFINE_OPFVV(vfdiv, VFDIV_FUNCT6)
    263  DEFINE_OPFVF(vfdiv, VFDIV_FUNCT6)
    264  DEFINE_OPFVV(vfmul, VFMUL_FUNCT6)
    265  DEFINE_OPFVF(vfmul, VFMUL_FUNCT6)
    266 
    267  // Vector Widening Floating-Point Add/Subtract Instructions
    268  DEFINE_OPFVV(vfwadd, VFWADD_FUNCT6)
    269  DEFINE_OPFVF(vfwadd, VFWADD_FUNCT6)
    270  DEFINE_OPFVV(vfwsub, VFWSUB_FUNCT6)
    271  DEFINE_OPFVF(vfwsub, VFWSUB_FUNCT6)
    272  DEFINE_OPFWV(vfwadd, VFWADD_W_FUNCT6)
    273  DEFINE_OPFWF(vfwadd, VFWADD_W_FUNCT6)
    274  DEFINE_OPFWV(vfwsub, VFWSUB_W_FUNCT6)
    275  DEFINE_OPFWF(vfwsub, VFWSUB_W_FUNCT6)
    276 
    277  // Vector Widening Floating-Point Reduction Instructions
    278  DEFINE_OPFVV(vfwredusum, VFWREDUSUM_FUNCT6)
    279  DEFINE_OPFVV(vfwredosum, VFWREDOSUM_FUNCT6)
    280 
    281  // Vector Widening Floating-Point Multiply
    282  DEFINE_OPFVV(vfwmul, VFWMUL_FUNCT6)
    283  DEFINE_OPFVF(vfwmul, VFWMUL_FUNCT6)
    284 
    285  DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6)
    286  DEFINE_OPFVV(vmfne, VMFNE_FUNCT6)
    287  DEFINE_OPFVV(vmflt, VMFLT_FUNCT6)
    288  DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
    289  DEFINE_OPFVV(vfmax, VMFMAX_FUNCT6)
    290  DEFINE_OPFVV(vfmin, VMFMIN_FUNCT6)
    291  DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
    292 
    293  DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
    294  DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
    295  DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
    296  DEFINE_OPFVF(vfsngjn, VFSGNJN_FUNCT6)
    297  DEFINE_OPFVV(vfsngjx, VFSGNJX_FUNCT6)
    298  DEFINE_OPFVF(vfsngjx, VFSGNJX_FUNCT6)
    299 
    300  // Vector Single-Width Floating-Point Fused Multiply-Add Instructions
    301  DEFINE_OPFVV_FMA(vfmadd, VFMADD_FUNCT6)
    302  DEFINE_OPFVF_FMA(vfmadd, VFMADD_FUNCT6)
    303  DEFINE_OPFVV_FMA(vfmsub, VFMSUB_FUNCT6)
    304  DEFINE_OPFVF_FMA(vfmsub, VFMSUB_FUNCT6)
    305  DEFINE_OPFVV_FMA(vfmacc, VFMACC_FUNCT6)
    306  DEFINE_OPFVF_FMA(vfmacc, VFMACC_FUNCT6)
    307  DEFINE_OPFVV_FMA(vfmsac, VFMSAC_FUNCT6)
    308  DEFINE_OPFVF_FMA(vfmsac, VFMSAC_FUNCT6)
    309  DEFINE_OPFVV_FMA(vfnmadd, VFNMADD_FUNCT6)
    310  DEFINE_OPFVF_FMA(vfnmadd, VFNMADD_FUNCT6)
    311  DEFINE_OPFVV_FMA(vfnmsub, VFNMSUB_FUNCT6)
    312  DEFINE_OPFVF_FMA(vfnmsub, VFNMSUB_FUNCT6)
    313  DEFINE_OPFVV_FMA(vfnmacc, VFNMACC_FUNCT6)
    314  DEFINE_OPFVF_FMA(vfnmacc, VFNMACC_FUNCT6)
    315  DEFINE_OPFVV_FMA(vfnmsac, VFNMSAC_FUNCT6)
    316  DEFINE_OPFVF_FMA(vfnmsac, VFNMSAC_FUNCT6)
    317 
    318  // Vector Widening Floating-Point Fused Multiply-Add Instructions
    319  DEFINE_OPFVV_FMA(vfwmacc, VFWMACC_FUNCT6)
    320  DEFINE_OPFVF_FMA(vfwmacc, VFWMACC_FUNCT6)
    321  DEFINE_OPFVV_FMA(vfwnmacc, VFWNMACC_FUNCT6)
    322  DEFINE_OPFVF_FMA(vfwnmacc, VFWNMACC_FUNCT6)
    323  DEFINE_OPFVV_FMA(vfwmsac, VFWMSAC_FUNCT6)
    324  DEFINE_OPFVF_FMA(vfwmsac, VFWMSAC_FUNCT6)
    325  DEFINE_OPFVV_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
    326  DEFINE_OPFVF_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
    327 
    328  // Vector Narrowing Fixed-Point Clip Instructions
    329  DEFINE_OPIVV(vnclip, VNCLIP_FUNCT6)
    330  DEFINE_OPIVX(vnclip, VNCLIP_FUNCT6)
    331  DEFINE_OPIVI(vnclip, VNCLIP_FUNCT6)
    332  DEFINE_OPIVV(vnclipu, VNCLIPU_FUNCT6)
    333  DEFINE_OPIVX(vnclipu, VNCLIPU_FUNCT6)
    334  DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6)
    335 
    336  // Vector Integer Extension
    337  DEFINE_OPMVV_VIE(vzext_vf8)
    338  DEFINE_OPMVV_VIE(vsext_vf8)
    339  DEFINE_OPMVV_VIE(vzext_vf4)
    340  DEFINE_OPMVV_VIE(vsext_vf4)
    341  DEFINE_OPMVV_VIE(vzext_vf2)
    342  DEFINE_OPMVV_VIE(vsext_vf2)
    343 
    344 #  undef DEFINE_OPIVI
    345 #  undef DEFINE_OPIVV
    346 #  undef DEFINE_OPIVX
    347 #  undef DEFINE_OPMVV
    348 #  undef DEFINE_OPMVX
    349 #  undef DEFINE_OPFVV
    350 #  undef DEFINE_OPFWV
    351 #  undef DEFINE_OPFVF
    352 #  undef DEFINE_OPFWF
    353 #  undef DEFINE_OPFVV_FMA
    354 #  undef DEFINE_OPFVF_FMA
    355 #  undef DEFINE_OPMVV_VIE
    356 #  undef DEFINE_OPFRED
    357 
    358 #  define DEFINE_VFUNARY(name, funct6, vs1)                          \
    359    void name(VRegister vd, VRegister vs2, MaskType mask = NoMask) { \
    360      GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask);                 \
    361    }
    362 
    363  DEFINE_VFUNARY(vfcvt_xu_f_v, VFUNARY0_FUNCT6, VFCVT_XU_F_V)
    364  DEFINE_VFUNARY(vfcvt_x_f_v, VFUNARY0_FUNCT6, VFCVT_X_F_V)
    365  DEFINE_VFUNARY(vfcvt_f_x_v, VFUNARY0_FUNCT6, VFCVT_F_X_V)
    366  DEFINE_VFUNARY(vfcvt_f_xu_v, VFUNARY0_FUNCT6, VFCVT_F_XU_V)
    367  DEFINE_VFUNARY(vfwcvt_xu_f_v, VFUNARY0_FUNCT6, VFWCVT_XU_F_V)
    368  DEFINE_VFUNARY(vfwcvt_x_f_v, VFUNARY0_FUNCT6, VFWCVT_X_F_V)
    369  DEFINE_VFUNARY(vfwcvt_f_x_v, VFUNARY0_FUNCT6, VFWCVT_F_X_V)
    370  DEFINE_VFUNARY(vfwcvt_f_xu_v, VFUNARY0_FUNCT6, VFWCVT_F_XU_V)
    371  DEFINE_VFUNARY(vfwcvt_f_f_v, VFUNARY0_FUNCT6, VFWCVT_F_F_V)
    372 
    373  DEFINE_VFUNARY(vfncvt_f_f_w, VFUNARY0_FUNCT6, VFNCVT_F_F_W)
    374  DEFINE_VFUNARY(vfncvt_x_f_w, VFUNARY0_FUNCT6, VFNCVT_X_F_W)
    375  DEFINE_VFUNARY(vfncvt_xu_f_w, VFUNARY0_FUNCT6, VFNCVT_XU_F_W)
    376 
    377  DEFINE_VFUNARY(vfclass_v, VFUNARY1_FUNCT6, VFCLASS_V)
    378  DEFINE_VFUNARY(vfsqrt_v, VFUNARY1_FUNCT6, VFSQRT_V)
    379  DEFINE_VFUNARY(vfrsqrt7_v, VFUNARY1_FUNCT6, VFRSQRT7_V)
    380  DEFINE_VFUNARY(vfrec7_v, VFUNARY1_FUNCT6, VFREC7_V)
    381 #  undef DEFINE_VFUNARY
    382 
    383  void vnot_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
    384    vxor_vi(dst, src, -1, mask);
    385  }
    386 
    387  void vneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
    388    vrsub_vx(dst, src, zero_reg, mask);
    389  }
    390 
    391  void vfneg_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
    392    vfsngjn_vv(dst, src, src, mask);
    393  }
    394  void vfabs_vv(VRegister dst, VRegister src, MaskType mask = NoMask) {
    395    vfsngjx_vv(dst, src, src, mask);
    396  }
    397  void vfirst_m(Register rd, VRegister vs2, MaskType mask = NoMask);
    398 
    399  void vcpop_m(Register rd, VRegister vs2, MaskType mask = NoMask);
    400 
    401 protected:
    402  void vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
    403               TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
    404 
    405  void vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul,
    406                TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
    407 
    408  inline void vsetvlmax(Register rd, VSew vsew, Vlmul vlmul,
    409                        TailAgnosticType tail = tu,
    410                        MaskAgnosticType mask = mu) {
    411    vsetvli(rd, zero_reg, vsew, vlmul, tu, mu);
    412  }
    413 
    414  inline void vsetvl(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
    415                     MaskAgnosticType mask = mu) {
    416    vsetvli(zero_reg, zero_reg, vsew, vlmul, tu, mu);
    417  }
    418 
    419  void vsetvl(Register rd, Register rs1, Register rs2);
    420 
    421  // ----------------------------RVV------------------------------------------
    422  // vsetvl
    423  void GenInstrV(Register rd, Register rs1, Register rs2);
    424  // vsetvli
    425  void GenInstrV(Register rd, Register rs1, uint32_t zimm);
    426  // OPIVV OPFVV OPMVV
    427  void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
    428                 VRegister vs1, VRegister vs2, MaskType mask = NoMask);
    429  void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd, int8_t vs1,
    430                 VRegister vs2, MaskType mask = NoMask);
    431  void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
    432                 VRegister vs2, MaskType mask = NoMask);
    433  // OPMVV OPFVV
    434  void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, Register rd,
    435                 VRegister vs1, VRegister vs2, MaskType mask = NoMask);
    436  // OPFVV
    437  void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, FPURegister fd,
    438                 VRegister vs1, VRegister vs2, MaskType mask = NoMask);
    439 
    440  // OPIVX OPMVX
    441  void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
    442                 Register rs1, VRegister vs2, MaskType mask = NoMask);
    443  // OPFVF
    444  void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd,
    445                 FPURegister fs1, VRegister vs2, MaskType mask = NoMask);
    446  // OPMVX
    447  void GenInstrV(uint8_t funct6, Register rd, Register rs1, VRegister vs2,
    448                 MaskType mask = NoMask);
    449  // OPIVI
    450  void GenInstrV(uint8_t funct6, VRegister vd, int8_t simm5, VRegister vs2,
    451                 MaskType mask = NoMask);
    452 
    453  // VL VS
    454  void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
    455                 uint8_t umop, MaskType mask, uint8_t IsMop, bool IsMew,
    456                 uint8_t Nf);
    457 
    458  void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
    459                 Register rs2, MaskType mask, uint8_t IsMop, bool IsMew,
    460                 uint8_t Nf);
    461  // VL VS AMO
    462  void GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd, Register rs1,
    463                 VRegister vs2, MaskType mask, uint8_t IsMop, bool IsMew,
    464                 uint8_t Nf);
    465  // vmv_xs vcpop_m vfirst_m
    466  void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, Register rd, uint8_t vs1,
    467                 VRegister vs2, MaskType mask);
    468 };
    469 
    470 class LoadStoreLaneParams {
    471 public:
    472  int sz;
    473  uint8_t laneidx;
    474 
    475  LoadStoreLaneParams(MachineRepresentation rep, uint8_t laneidx);
    476 
    477 private:
    478  LoadStoreLaneParams(uint8_t laneidx, int sz, int lanes)
    479      : sz(sz), laneidx(laneidx % lanes) {}
    480 };
    481 }  // namespace jit
    482 }  // namespace js
    483 #endif
    484 #endif  // jit_riscv64_extension_Extension_riscv_V_h_