tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

extension-riscv-v.cc (35345B)


      1 // Copyright 2022 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "jit/riscv64/extension/extension-riscv-v.h"
      6 
      7 #ifdef CAN_USE_RVV
      8 #  include "src/codegen/assembler.h"
      9 #  include "jit/riscv64/constant/Constant-riscv64.h"
     10 #  include "jit/riscv64/extension/register-riscv.h"
     11 
     12 namespace js {
     13 namespace jit {
     14 
     15 // RVV
     16 
     17 void AssemblerRISCVV::vredmaxu_vs(VRegister vd, VRegister vs2, VRegister vs1,
     18                                  MaskType mask) {
     19  GenInstrV(VREDMAXU_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
     20 }
     21 
     22 void AssemblerRISCVV::vredmax_vs(VRegister vd, VRegister vs2, VRegister vs1,
     23                                 MaskType mask) {
     24  GenInstrV(VREDMAX_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
     25 }
     26 
     27 void AssemblerRISCVV::vredmin_vs(VRegister vd, VRegister vs2, VRegister vs1,
     28                                 MaskType mask) {
     29  GenInstrV(VREDMIN_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
     30 }
     31 
     32 void AssemblerRISCVV::vredminu_vs(VRegister vd, VRegister vs2, VRegister vs1,
     33                                  MaskType mask) {
     34  GenInstrV(VREDMINU_FUNCT6, OP_MVV, vd, vs1, vs2, mask);
     35 }
     36 
     37 void AssemblerRISCVV::vmv_vv(VRegister vd, VRegister vs1) {
     38  GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, v0, NoMask);
     39 }
     40 
     41 void AssemblerRISCVV::vmv_vx(VRegister vd, Register rs1) {
     42  GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, v0, NoMask);
     43 }
     44 
     45 void AssemblerRISCVV::vmv_vi(VRegister vd, uint8_t simm5) {
     46  GenInstrV(VMV_FUNCT6, vd, simm5, v0, NoMask);
     47 }
     48 
     49 void AssemblerRISCVV::vmv_xs(Register rd, VRegister vs2) {
     50  GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b00000, vs2, NoMask);
     51 }
     52 
     53 void AssemblerRISCVV::vmv_sx(VRegister vd, Register rs1) {
     54  GenInstrV(VRXUNARY0_FUNCT6, OP_MVX, vd, rs1, v0, NoMask);
     55 }
     56 
     57 void AssemblerRISCVV::vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2) {
     58  GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
     59 }
     60 
     61 void AssemblerRISCVV::vmerge_vx(VRegister vd, Register rs1, VRegister vs2) {
     62  GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
     63 }
     64 
     65 void AssemblerRISCVV::vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
     66  GenInstrV(VMV_FUNCT6, vd, imm5, vs2, Mask);
     67 }
     68 
     69 void AssemblerRISCVV::vadc_vv(VRegister vd, VRegister vs1, VRegister vs2) {
     70  GenInstrV(VADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
     71 }
     72 
     73 void AssemblerRISCVV::vadc_vx(VRegister vd, Register rs1, VRegister vs2) {
     74  GenInstrV(VADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
     75 }
     76 
     77 void AssemblerRISCVV::vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
     78  GenInstrV(VADC_FUNCT6, vd, imm5, vs2, Mask);
     79 }
     80 
     81 void AssemblerRISCVV::vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2) {
     82  GenInstrV(VMADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
     83 }
     84 
     85 void AssemblerRISCVV::vmadc_vx(VRegister vd, Register rs1, VRegister vs2) {
     86  GenInstrV(VMADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
     87 }
     88 
     89 void AssemblerRISCVV::vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
     90  GenInstrV(VMADC_FUNCT6, vd, imm5, vs2, Mask);
     91 }
     92 
     93 void AssemblerRISCVV::vrgather_vv(VRegister vd, VRegister vs2, VRegister vs1,
     94                                  MaskType mask) {
     95  DCHECK_NE(vd, vs1);
     96  DCHECK_NE(vd, vs2);
     97  GenInstrV(VRGATHER_FUNCT6, OP_IVV, vd, vs1, vs2, mask);
     98 }
     99 
    100 void AssemblerRISCVV::vrgather_vi(VRegister vd, VRegister vs2, int8_t imm5,
    101                                  MaskType mask) {
    102  DCHECK_NE(vd, vs2);
    103  GenInstrV(VRGATHER_FUNCT6, vd, imm5, vs2, mask);
    104 }
    105 
    106 void AssemblerRISCVV::vrgather_vx(VRegister vd, VRegister vs2, Register rs1,
    107                                  MaskType mask) {
    108  DCHECK_NE(vd, vs2);
    109  GenInstrV(VRGATHER_FUNCT6, OP_IVX, vd, rs1, vs2, mask);
    110 }
    111 
    112 void AssemblerRISCVV::vwaddu_wx(VRegister vd, VRegister vs2, Register rs1,
    113                                MaskType mask) {
    114  GenInstrV(VWADDUW_FUNCT6, OP_MVX, vd, rs1, vs2, mask);
    115 }
    116 
    117 void AssemblerRISCVV::vid_v(VRegister vd, MaskType mask) {
    118  GenInstrV(VMUNARY0_FUNCT6, OP_MVV, vd, VID_V, v0, mask);
    119 }
    120 
    121 #  define DEFINE_OPIVV(name, funct6)                                \
    122    void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2,    \
    123                                    VRegister vs1, MaskType mask) { \
    124      GenInstrV(funct6, OP_IVV, vd, vs1, vs2, mask);                \
    125    }
    126 
    127 #  define DEFINE_OPFVV(name, funct6)                                \
    128    void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2,    \
    129                                    VRegister vs1, MaskType mask) { \
    130      GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask);                \
    131    }
    132 
    133 #  define DEFINE_OPFWV(name, funct6)                                \
    134    void AssemblerRISCVV::name##_wv(VRegister vd, VRegister vs2,    \
    135                                    VRegister vs1, MaskType mask) { \
    136      GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask);                \
    137    }
    138 
    139 #  define DEFINE_OPFRED(name, funct6)                               \
    140    void AssemblerRISCVV::name##_vs(VRegister vd, VRegister vs2,    \
    141                                    VRegister vs1, MaskType mask) { \
    142      GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask);                \
    143    }
    144 
    145 #  define DEFINE_OPIVX(name, funct6)                                           \
    146    void AssemblerRISCVV::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
    147                                    MaskType mask) {                           \
    148      GenInstrV(funct6, OP_IVX, vd, rs1, vs2, mask);                           \
    149    }
    150 
    151 #  define DEFINE_OPIVI(name, funct6)                                          \
    152    void AssemblerRISCVV::name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
    153                                    MaskType mask) {                          \
    154      GenInstrV(funct6, vd, imm5, vs2, mask);                                 \
    155    }
    156 
    157 #  define DEFINE_OPMVV(name, funct6)                                \
    158    void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs2,    \
    159                                    VRegister vs1, MaskType mask) { \
    160      GenInstrV(funct6, OP_MVV, vd, vs1, vs2, mask);                \
    161    }
    162 
    163 // void GenInstrV(uint8_t funct6, OpcodeRISCVV opcode, VRegister vd, Register
    164 // rs1,
    165 //                  VRegister vs2, MaskType mask = NoMask);
    166 #  define DEFINE_OPMVX(name, funct6)                                           \
    167    void AssemblerRISCVV::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
    168                                    MaskType mask) {                           \
    169      GenInstrV(funct6, OP_MVX, vd, rs1, vs2, mask);                           \
    170    }
    171 
    172 #  define DEFINE_OPFVF(name, funct6)                                  \
    173    void AssemblerRISCVV::name##_vf(VRegister vd, VRegister vs2,      \
    174                                    FPURegister fs1, MaskType mask) { \
    175      GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask);                  \
    176    }
    177 
    178 #  define DEFINE_OPFWF(name, funct6)                                  \
    179    void AssemblerRISCVV::name##_wf(VRegister vd, VRegister vs2,      \
    180                                    FPURegister fs1, MaskType mask) { \
    181      GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask);                  \
    182    }
    183 
    184 #  define DEFINE_OPFVV_FMA(name, funct6)                            \
    185    void AssemblerRISCVV::name##_vv(VRegister vd, VRegister vs1,    \
    186                                    VRegister vs2, MaskType mask) { \
    187      GenInstrV(funct6, OP_FVV, vd, vs1, vs2, mask);                \
    188    }
    189 
    190 #  define DEFINE_OPFVF_FMA(name, funct6)                            \
    191    void AssemblerRISCVV::name##_vf(VRegister vd, FPURegister fs1,  \
    192                                    VRegister vs2, MaskType mask) { \
    193      GenInstrV(funct6, OP_FVF, vd, fs1, vs2, mask);                \
    194    }
    195 
    196 // vector integer extension
    197 #  define DEFINE_OPMVV_VIE(name, vs1)                                        \
    198    void AssemblerRISCVV::name(VRegister vd, VRegister vs2, MaskType mask) { \
    199      GenInstrV(VXUNARY0_FUNCT6, OP_MVV, vd, vs1, vs2, mask);                \
    200    }
    201 
    202 void AssemblerRISCVV::vfmv_vf(VRegister vd, FPURegister fs1, MaskType mask) {
    203  GenInstrV(VMV_FUNCT6, OP_FVF, vd, fs1, v0, mask);
    204 }
    205 
    206 void AssemblerRISCVV::vfmv_fs(FPURegister fd, VRegister vs2) {
    207  GenInstrV(VWFUNARY0_FUNCT6, OP_FVV, fd, v0, vs2, NoMask);
    208 }
    209 
    210 void AssemblerRISCVV::vfmv_sf(VRegister vd, FPURegister fs) {
    211  GenInstrV(VRFUNARY0_FUNCT6, OP_FVF, vd, fs, v0, NoMask);
    212 }
    213 
    214 DEFINE_OPIVV(vadd, VADD_FUNCT6)
    215 DEFINE_OPIVX(vadd, VADD_FUNCT6)
    216 DEFINE_OPIVI(vadd, VADD_FUNCT6)
    217 DEFINE_OPIVV(vsub, VSUB_FUNCT6)
    218 DEFINE_OPIVX(vsub, VSUB_FUNCT6)
    219 DEFINE_OPMVX(vdiv, VDIV_FUNCT6)
    220 DEFINE_OPMVX(vdivu, VDIVU_FUNCT6)
    221 DEFINE_OPMVX(vmul, VMUL_FUNCT6)
    222 DEFINE_OPMVX(vmulhu, VMULHU_FUNCT6)
    223 DEFINE_OPMVX(vmulhsu, VMULHSU_FUNCT6)
    224 DEFINE_OPMVX(vmulh, VMULH_FUNCT6)
    225 DEFINE_OPMVV(vdiv, VDIV_FUNCT6)
    226 DEFINE_OPMVV(vdivu, VDIVU_FUNCT6)
    227 DEFINE_OPMVV(vmul, VMUL_FUNCT6)
    228 DEFINE_OPMVV(vmulhu, VMULHU_FUNCT6)
    229 DEFINE_OPMVV(vmulhsu, VMULHSU_FUNCT6)
    230 DEFINE_OPMVV(vwmul, VWMUL_FUNCT6)
    231 DEFINE_OPMVV(vwmulu, VWMULU_FUNCT6)
    232 DEFINE_OPMVV(vmulh, VMULH_FUNCT6)
    233 DEFINE_OPMVV(vwadd, VWADD_FUNCT6)
    234 DEFINE_OPMVV(vwaddu, VWADDU_FUNCT6)
    235 DEFINE_OPMVV(vcompress, VCOMPRESS_FUNCT6)
    236 DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
    237 DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
    238 DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
    239 DEFINE_OPIVX(vsaddu, VSADDU_FUNCT6)
    240 DEFINE_OPIVV(vsaddu, VSADDU_FUNCT6)
    241 DEFINE_OPIVI(vsaddu, VSADDU_FUNCT6)
    242 DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
    243 DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
    244 DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
    245 DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6)
    246 DEFINE_OPIVX(vrsub, VRSUB_FUNCT6)
    247 DEFINE_OPIVI(vrsub, VRSUB_FUNCT6)
    248 DEFINE_OPIVV(vminu, VMINU_FUNCT6)
    249 DEFINE_OPIVX(vminu, VMINU_FUNCT6)
    250 DEFINE_OPIVV(vmin, VMIN_FUNCT6)
    251 DEFINE_OPIVX(vmin, VMIN_FUNCT6)
    252 DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6)
    253 DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6)
    254 DEFINE_OPIVV(vmax, VMAX_FUNCT6)
    255 DEFINE_OPIVX(vmax, VMAX_FUNCT6)
    256 DEFINE_OPIVV(vand, VAND_FUNCT6)
    257 DEFINE_OPIVX(vand, VAND_FUNCT6)
    258 DEFINE_OPIVI(vand, VAND_FUNCT6)
    259 DEFINE_OPIVV(vor, VOR_FUNCT6)
    260 DEFINE_OPIVX(vor, VOR_FUNCT6)
    261 DEFINE_OPIVI(vor, VOR_FUNCT6)
    262 DEFINE_OPIVV(vxor, VXOR_FUNCT6)
    263 DEFINE_OPIVX(vxor, VXOR_FUNCT6)
    264 DEFINE_OPIVI(vxor, VXOR_FUNCT6)
    265 
    266 DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
    267 DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
    268 DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6)
    269 DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6)
    270 
    271 DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6)
    272 DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6)
    273 DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6)
    274 
    275 DEFINE_OPIVV(vmsne, VMSNE_FUNCT6)
    276 DEFINE_OPIVX(vmsne, VMSNE_FUNCT6)
    277 DEFINE_OPIVI(vmsne, VMSNE_FUNCT6)
    278 
    279 DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6)
    280 DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6)
    281 
    282 DEFINE_OPIVV(vmslt, VMSLT_FUNCT6)
    283 DEFINE_OPIVX(vmslt, VMSLT_FUNCT6)
    284 
    285 DEFINE_OPIVV(vmsle, VMSLE_FUNCT6)
    286 DEFINE_OPIVX(vmsle, VMSLE_FUNCT6)
    287 DEFINE_OPIVI(vmsle, VMSLE_FUNCT6)
    288 
    289 DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6)
    290 DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6)
    291 DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6)
    292 
    293 DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6)
    294 DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6)
    295 
    296 DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6)
    297 DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6)
    298 
    299 DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
    300 DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
    301 DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
    302 
    303 DEFINE_OPIVV(vsra, VSRA_FUNCT6)
    304 DEFINE_OPIVX(vsra, VSRA_FUNCT6)
    305 DEFINE_OPIVI(vsra, VSRA_FUNCT6)
    306 
    307 DEFINE_OPIVV(vsll, VSLL_FUNCT6)
    308 DEFINE_OPIVX(vsll, VSLL_FUNCT6)
    309 DEFINE_OPIVI(vsll, VSLL_FUNCT6)
    310 
    311 DEFINE_OPIVV(vsmul, VSMUL_FUNCT6)
    312 DEFINE_OPIVX(vsmul, VSMUL_FUNCT6)
    313 
    314 DEFINE_OPFVV(vfadd, VFADD_FUNCT6)
    315 DEFINE_OPFVF(vfadd, VFADD_FUNCT6)
    316 DEFINE_OPFVV(vfsub, VFSUB_FUNCT6)
    317 DEFINE_OPFVF(vfsub, VFSUB_FUNCT6)
    318 DEFINE_OPFVV(vfdiv, VFDIV_FUNCT6)
    319 DEFINE_OPFVF(vfdiv, VFDIV_FUNCT6)
    320 DEFINE_OPFVV(vfmul, VFMUL_FUNCT6)
    321 DEFINE_OPFVF(vfmul, VFMUL_FUNCT6)
    322 DEFINE_OPFVV(vmfeq, VMFEQ_FUNCT6)
    323 DEFINE_OPFVV(vmfne, VMFNE_FUNCT6)
    324 DEFINE_OPFVV(vmflt, VMFLT_FUNCT6)
    325 DEFINE_OPFVV(vmfle, VMFLE_FUNCT6)
    326 DEFINE_OPFVV(vfmax, VFMAX_FUNCT6)
    327 DEFINE_OPFVV(vfmin, VFMIN_FUNCT6)
    328 
    329 // Vector Widening Floating-Point Add/Subtract Instructions
    330 DEFINE_OPFVV(vfwadd, VFWADD_FUNCT6)
    331 DEFINE_OPFVF(vfwadd, VFWADD_FUNCT6)
    332 DEFINE_OPFVV(vfwsub, VFWSUB_FUNCT6)
    333 DEFINE_OPFVF(vfwsub, VFWSUB_FUNCT6)
    334 DEFINE_OPFWV(vfwadd, VFWADD_W_FUNCT6)
    335 DEFINE_OPFWF(vfwadd, VFWADD_W_FUNCT6)
    336 DEFINE_OPFWV(vfwsub, VFWSUB_W_FUNCT6)
    337 DEFINE_OPFWF(vfwsub, VFWSUB_W_FUNCT6)
    338 
    339 // Vector Widening Floating-Point Reduction Instructions
    340 DEFINE_OPFVV(vfwredusum, VFWREDUSUM_FUNCT6)
    341 DEFINE_OPFVV(vfwredosum, VFWREDOSUM_FUNCT6)
    342 
    343 // Vector Widening Floating-Point Multiply
    344 DEFINE_OPFVV(vfwmul, VFWMUL_FUNCT6)
    345 DEFINE_OPFVF(vfwmul, VFWMUL_FUNCT6)
    346 
    347 DEFINE_OPFRED(vfredmax, VFREDMAX_FUNCT6)
    348 
    349 DEFINE_OPFVV(vfsngj, VFSGNJ_FUNCT6)
    350 DEFINE_OPFVF(vfsngj, VFSGNJ_FUNCT6)
    351 DEFINE_OPFVV(vfsngjn, VFSGNJN_FUNCT6)
    352 DEFINE_OPFVF(vfsngjn, VFSGNJN_FUNCT6)
    353 DEFINE_OPFVV(vfsngjx, VFSGNJX_FUNCT6)
    354 DEFINE_OPFVF(vfsngjx, VFSGNJX_FUNCT6)
    355 
    356 // Vector Single-Width Floating-Point Fused Multiply-Add Instructions
    357 DEFINE_OPFVV_FMA(vfmadd, VFMADD_FUNCT6)
    358 DEFINE_OPFVF_FMA(vfmadd, VFMADD_FUNCT6)
    359 DEFINE_OPFVV_FMA(vfmsub, VFMSUB_FUNCT6)
    360 DEFINE_OPFVF_FMA(vfmsub, VFMSUB_FUNCT6)
    361 DEFINE_OPFVV_FMA(vfmacc, VFMACC_FUNCT6)
    362 DEFINE_OPFVF_FMA(vfmacc, VFMACC_FUNCT6)
    363 DEFINE_OPFVV_FMA(vfmsac, VFMSAC_FUNCT6)
    364 DEFINE_OPFVF_FMA(vfmsac, VFMSAC_FUNCT6)
    365 DEFINE_OPFVV_FMA(vfnmadd, VFNMADD_FUNCT6)
    366 DEFINE_OPFVF_FMA(vfnmadd, VFNMADD_FUNCT6)
    367 DEFINE_OPFVV_FMA(vfnmsub, VFNMSUB_FUNCT6)
    368 DEFINE_OPFVF_FMA(vfnmsub, VFNMSUB_FUNCT6)
    369 DEFINE_OPFVV_FMA(vfnmacc, VFNMACC_FUNCT6)
    370 DEFINE_OPFVF_FMA(vfnmacc, VFNMACC_FUNCT6)
    371 DEFINE_OPFVV_FMA(vfnmsac, VFNMSAC_FUNCT6)
    372 DEFINE_OPFVF_FMA(vfnmsac, VFNMSAC_FUNCT6)
    373 
    374 // Vector Widening Floating-Point Fused Multiply-Add Instructions
    375 DEFINE_OPFVV_FMA(vfwmacc, VFWMACC_FUNCT6)
    376 DEFINE_OPFVF_FMA(vfwmacc, VFWMACC_FUNCT6)
    377 DEFINE_OPFVV_FMA(vfwnmacc, VFWNMACC_FUNCT6)
    378 DEFINE_OPFVF_FMA(vfwnmacc, VFWNMACC_FUNCT6)
    379 DEFINE_OPFVV_FMA(vfwmsac, VFWMSAC_FUNCT6)
    380 DEFINE_OPFVF_FMA(vfwmsac, VFWMSAC_FUNCT6)
    381 DEFINE_OPFVV_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
    382 DEFINE_OPFVF_FMA(vfwnmsac, VFWNMSAC_FUNCT6)
    383 
    384 // Vector Narrowing Fixed-Point Clip Instructions
    385 DEFINE_OPIVV(vnclip, VNCLIP_FUNCT6)
    386 DEFINE_OPIVX(vnclip, VNCLIP_FUNCT6)
    387 DEFINE_OPIVI(vnclip, VNCLIP_FUNCT6)
    388 DEFINE_OPIVV(vnclipu, VNCLIPU_FUNCT6)
    389 DEFINE_OPIVX(vnclipu, VNCLIPU_FUNCT6)
    390 DEFINE_OPIVI(vnclipu, VNCLIPU_FUNCT6)
    391 
    392 // Vector Integer Extension
    393 DEFINE_OPMVV_VIE(vzext_vf8, 0b00010)
    394 DEFINE_OPMVV_VIE(vsext_vf8, 0b00011)
    395 DEFINE_OPMVV_VIE(vzext_vf4, 0b00100)
    396 DEFINE_OPMVV_VIE(vsext_vf4, 0b00101)
    397 DEFINE_OPMVV_VIE(vzext_vf2, 0b00110)
    398 DEFINE_OPMVV_VIE(vsext_vf2, 0b00111)
    399 
    400 #  undef DEFINE_OPIVI
    401 #  undef DEFINE_OPIVV
    402 #  undef DEFINE_OPIVX
    403 #  undef DEFINE_OPFVV
    404 #  undef DEFINE_OPFWV
    405 #  undef DEFINE_OPFVF
    406 #  undef DEFINE_OPFWF
    407 #  undef DEFINE_OPFVV_FMA
    408 #  undef DEFINE_OPFVF_FMA
    409 #  undef DEFINE_OPMVV_VIE
    410 
    411 void AssemblerRISCVV::vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
    412                              TailAgnosticType tail, MaskAgnosticType mask) {
    413  int32_t zimm = GenZimm(vsew, vlmul, tail, mask);
    414  Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
    415                ((rs1.code() & 0x1F) << kRvvRs1Shift) |
    416                (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x0 << 31;
    417  emit(instr);
    418 }
    419 
    420 void AssemblerRISCVV::vsetivli(Register rd, uint8_t uimm, VSew vsew,
    421                               Vlmul vlmul, TailAgnosticType tail,
    422                               MaskAgnosticType mask) {
    423  MOZ_ASSERT(is_uint5(uimm));
    424  int32_t zimm = GenZimm(vsew, vlmul, tail, mask) & 0x3FF;
    425  Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
    426                ((uimm & 0x1F) << kRvvUimmShift) |
    427                (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x3 << 30;
    428  emit(instr);
    429 }
    430 
    431 void AssemblerRISCVV::vsetvl(Register rd, Register rs1, Register rs2) {
    432  Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
    433                ((rs1.code() & 0x1F) << kRvvRs1Shift) |
    434                ((rs2.code() & 0x1F) << kRvvRs2Shift) | 0x40 << 25;
    435  emit(instr);
    436 }
    437 
    438 uint8_t vsew_switch(VSew vsew) {
    439  uint8_t width;
    440  switch (vsew) {
    441    case E8:
    442      width = 0b000;
    443      break;
    444    case E16:
    445      width = 0b101;
    446      break;
    447    case E32:
    448      width = 0b110;
    449      break;
    450    default:
    451      width = 0b111;
    452      break;
    453  }
    454  return width;
    455 }
    456 
    457 // OPIVV OPFVV OPMVV
    458 void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
    459                                VRegister vd, VRegister vs1, VRegister vs2,
    460                                MaskType mask) {
    461  MOZ_ASSERT(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
    462  Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
    463                ((vd.code() & 0x1F) << kRvvVdShift) |
    464                ((vs1.code() & 0x1F) << kRvvVs1Shift) |
    465                ((vs2.code() & 0x1F) << kRvvVs2Shift);
    466  emit(instr);
    467 }
    468 
    469 void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
    470                                VRegister vd, int8_t vs1, VRegister vs2,
    471                                MaskType mask) {
    472  MOZ_ASSERT(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
    473  Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
    474                ((vd.code() & 0x1F) << kRvvVdShift) |
    475                ((vs1 & 0x1F) << kRvvVs1Shift) |
    476                ((vs2.code() & 0x1F) << kRvvVs2Shift);
    477  emit(instr);
    478 }
    479 // OPMVV OPFVV
    480 void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
    481                                Register rd, VRegister vs1, VRegister vs2,
    482                                MaskType mask) {
    483  MOZ_ASSERT(opcode == OP_MVV || opcode == OP_FVV);
    484  Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
    485                ((rd.code() & 0x1F) << kRvvVdShift) |
    486                ((vs1.code() & 0x1F) << kRvvVs1Shift) |
    487                ((vs2.code() & 0x1F) << kRvvVs2Shift);
    488  emit(instr);
    489 }
    490 
    491 // OPFVV
    492 void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
    493                                FPURegister fd, VRegister vs1, VRegister vs2,
    494                                MaskType mask) {
    495  MOZ_ASSERT(opcode == OP_FVV);
    496  Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
    497                ((fd.encoding() & 0x1F) << kRvvVdShift) |
    498                ((vs1.code() & 0x1F) << kRvvVs1Shift) |
    499                ((vs2.code() & 0x1F) << kRvvVs2Shift);
    500  emit(instr);
    501 }
    502 
    503 // OPIVX OPMVX
    504 void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
    505                                VRegister vd, Register rs1, VRegister vs2,
    506                                MaskType mask) {
    507  MOZ_ASSERT(opcode == OP_IVX || opcode == OP_MVX);
    508  Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
    509                ((vd.code() & 0x1F) << kRvvVdShift) |
    510                ((rs1.code() & 0x1F) << kRvvRs1Shift) |
    511                ((vs2.code() & 0x1F) << kRvvVs2Shift);
    512  emit(instr);
    513 }
    514 
    515 // OPFVF
    516 void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
    517                                VRegister vd, FPURegister fs1, VRegister vs2,
    518                                MaskType mask) {
    519  MOZ_ASSERT(opcode == OP_FVF);
    520  Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
    521                ((vd.code() & 0x1F) << kRvvVdShift) |
    522                ((fs1.encoding() & 0x1F) << kRvvRs1Shift) |
    523                ((vs2.code() & 0x1F) << kRvvVs2Shift);
    524  emit(instr);
    525 }
    526 
    527 // OPMVX
    528 void AssemblerRISCVV::GenInstrV(uint8_t funct6, Register rd, Register rs1,
    529                                VRegister vs2, MaskType mask) {
    530  Instr instr = (funct6 << kRvvFunct6Shift) | OP_MVX | (mask << kRvvVmShift) |
    531                ((rd.code() & 0x1F) << kRvvVdShift) |
    532                ((rs1.code() & 0x1F) << kRvvRs1Shift) |
    533                ((vs2.code() & 0x1F) << kRvvVs2Shift);
    534  emit(instr);
    535 }
    536 // OPIVI
    537 void AssemblerRISCVV::GenInstrV(uint8_t funct6, VRegister vd, int8_t imm5,
    538                                VRegister vs2, MaskType mask) {
    539  MOZ_ASSERT(is_uint5(imm5) || is_int5(imm5));
    540  Instr instr = (funct6 << kRvvFunct6Shift) | OP_IVI | (mask << kRvvVmShift) |
    541                ((vd.code() & 0x1F) << kRvvVdShift) |
    542                (((uint32_t)imm5 << kRvvImm5Shift) & kRvvImm5Mask) |
    543                ((vs2.code() & 0x1F) << kRvvVs2Shift);
    544  emit(instr);
    545 }
    546 
    547 // VL VS
    548 void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
    549                                Register rs1, uint8_t umop, MaskType mask,
    550                                uint8_t IsMop, bool IsMew, uint8_t Nf) {
    551  MOZ_ASSERT(opcode == LOAD_FP || opcode == STORE_FP);
    552  Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
    553                ((width << kRvvWidthShift) & kRvvWidthMask) |
    554                ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
    555                ((umop << kRvvRs2Shift) & kRvvRs2Mask) |
    556                ((mask << kRvvVmShift) & kRvvVmMask) |
    557                ((IsMop << kRvvMopShift) & kRvvMopMask) |
    558                ((IsMew << kRvvMewShift) & kRvvMewMask) |
    559                ((Nf << kRvvNfShift) & kRvvNfMask);
    560  emit(instr);
    561 }
    562 void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
    563                                Register rs1, Register rs2, MaskType mask,
    564                                uint8_t IsMop, bool IsMew, uint8_t Nf) {
    565  MOZ_ASSERT(opcode == LOAD_FP || opcode == STORE_FP);
    566  Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
    567                ((width << kRvvWidthShift) & kRvvWidthMask) |
    568                ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
    569                ((rs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
    570                ((mask << kRvvVmShift) & kRvvVmMask) |
    571                ((IsMop << kRvvMopShift) & kRvvMopMask) |
    572                ((IsMew << kRvvMewShift) & kRvvMewMask) |
    573                ((Nf << kRvvNfShift) & kRvvNfMask);
    574  emit(instr);
    575 }
    576 // VL VS AMO
    577 void AssemblerRISCVV::GenInstrV(BaseOpcode opcode, uint8_t width, VRegister vd,
    578                                Register rs1, VRegister vs2, MaskType mask,
    579                                uint8_t IsMop, bool IsMew, uint8_t Nf) {
    580  MOZ_ASSERT(opcode == LOAD_FP || opcode == STORE_FP || opcode == AMO);
    581  Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
    582                ((width << kRvvWidthShift) & kRvvWidthMask) |
    583                ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
    584                ((vs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
    585                ((mask << kRvvVmShift) & kRvvVmMask) |
    586                ((IsMop << kRvvMopShift) & kRvvMopMask) |
    587                ((IsMew << kRvvMewShift) & kRvvMewMask) |
    588                ((Nf << kRvvNfShift) & kRvvNfMask);
    589  emit(instr);
    590 }
    591 // vmv_xs vcpop_m vfirst_m
    592 void AssemblerRISCVV::GenInstrV(uint8_t funct6, OpcodeRISCVV opcode,
    593                                Register rd, uint8_t vs1, VRegister vs2,
    594                                MaskType mask) {
    595  MOZ_ASSERT(opcode == OP_MVV);
    596  Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
    597                ((rd.code() & 0x1F) << kRvvVdShift) |
    598                ((vs1 & 0x1F) << kRvvVs1Shift) |
    599                ((vs2.code() & 0x1F) << kRvvVs2Shift);
    600  emit(instr);
    601 }
    602 
    603 void AssemblerRISCVV::vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
    604                         MaskType mask) {
    605  uint8_t width = vsew_switch(vsew);
    606  GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b000);
    607 }
    608 void AssemblerRISCVV::vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
    609                          MaskType mask) {
    610  uint8_t width = vsew_switch(vsew);
    611  GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b000);
    612 }
    613 void AssemblerRISCVV::vlx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
    614                          MaskType mask) {
    615  uint8_t width = vsew_switch(vsew);
    616  GenInstrV(LOAD_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0);
    617 }
    618 
    619 void AssemblerRISCVV::vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
    620                         MaskType mask) {
    621  uint8_t width = vsew_switch(vsew);
    622  GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b000);
    623 }
    624 void AssemblerRISCVV::vss(VRegister vs3, Register rs1, Register rs2, VSew vsew,
    625                          MaskType mask) {
    626  uint8_t width = vsew_switch(vsew);
    627  GenInstrV(STORE_FP, width, vs3, rs1, rs2, mask, 0b10, 0, 0b000);
    628 }
    629 
    630 void AssemblerRISCVV::vsx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
    631                          MaskType mask) {
    632  uint8_t width = vsew_switch(vsew);
    633  GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b11, 0, 0b000);
    634 }
    635 void AssemblerRISCVV::vsu(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
    636                          MaskType mask) {
    637  uint8_t width = vsew_switch(vsew);
    638  GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b01, 0, 0b000);
    639 }
    640 
    641 void AssemblerRISCVV::vlseg2(VRegister vd, Register rs1, uint8_t lumop,
    642                             VSew vsew, MaskType mask) {
    643  uint8_t width = vsew_switch(vsew);
    644  GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b001);
    645 }
    646 
    647 void AssemblerRISCVV::vlseg3(VRegister vd, Register rs1, uint8_t lumop,
    648                             VSew vsew, MaskType mask) {
    649  uint8_t width = vsew_switch(vsew);
    650  GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b010);
    651 }
    652 
    653 void AssemblerRISCVV::vlseg4(VRegister vd, Register rs1, uint8_t lumop,
    654                             VSew vsew, MaskType mask) {
    655  uint8_t width = vsew_switch(vsew);
    656  GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b011);
    657 }
    658 
    659 void AssemblerRISCVV::vlseg5(VRegister vd, Register rs1, uint8_t lumop,
    660                             VSew vsew, MaskType mask) {
    661  uint8_t width = vsew_switch(vsew);
    662  GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b100);
    663 }
    664 
    665 void AssemblerRISCVV::vlseg6(VRegister vd, Register rs1, uint8_t lumop,
    666                             VSew vsew, MaskType mask) {
    667  uint8_t width = vsew_switch(vsew);
    668  GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b101);
    669 }
    670 
    671 void AssemblerRISCVV::vlseg7(VRegister vd, Register rs1, uint8_t lumop,
    672                             VSew vsew, MaskType mask) {
    673  uint8_t width = vsew_switch(vsew);
    674  GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b110);
    675 }
    676 
    677 void AssemblerRISCVV::vlseg8(VRegister vd, Register rs1, uint8_t lumop,
    678                             VSew vsew, MaskType mask) {
    679  uint8_t width = vsew_switch(vsew);
    680  GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, 0, 0b111);
    681 }
    682 void AssemblerRISCVV::vsseg2(VRegister vd, Register rs1, uint8_t sumop,
    683                             VSew vsew, MaskType mask) {
    684  uint8_t width = vsew_switch(vsew);
    685  GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b001);
    686 }
    687 void AssemblerRISCVV::vsseg3(VRegister vd, Register rs1, uint8_t sumop,
    688                             VSew vsew, MaskType mask) {
    689  uint8_t width = vsew_switch(vsew);
    690  GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b010);
    691 }
    692 void AssemblerRISCVV::vsseg4(VRegister vd, Register rs1, uint8_t sumop,
    693                             VSew vsew, MaskType mask) {
    694  uint8_t width = vsew_switch(vsew);
    695  GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b011);
    696 }
    697 void AssemblerRISCVV::vsseg5(VRegister vd, Register rs1, uint8_t sumop,
    698                             VSew vsew, MaskType mask) {
    699  uint8_t width = vsew_switch(vsew);
    700  GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b100);
    701 }
    702 void AssemblerRISCVV::vsseg6(VRegister vd, Register rs1, uint8_t sumop,
    703                             VSew vsew, MaskType mask) {
    704  uint8_t width = vsew_switch(vsew);
    705  GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b101);
    706 }
    707 void AssemblerRISCVV::vsseg7(VRegister vd, Register rs1, uint8_t sumop,
    708                             VSew vsew, MaskType mask) {
    709  uint8_t width = vsew_switch(vsew);
    710  GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b110);
    711 }
    712 void AssemblerRISCVV::vsseg8(VRegister vd, Register rs1, uint8_t sumop,
    713                             VSew vsew, MaskType mask) {
    714  uint8_t width = vsew_switch(vsew);
    715  GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, 0, 0b111);
    716 }
    717 
    718 void AssemblerRISCVV::vlsseg2(VRegister vd, Register rs1, Register rs2,
    719                              VSew vsew, MaskType mask) {
    720  uint8_t width = vsew_switch(vsew);
    721  GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001);
    722 }
    723 void AssemblerRISCVV::vlsseg3(VRegister vd, Register rs1, Register rs2,
    724                              VSew vsew, MaskType mask) {
    725  uint8_t width = vsew_switch(vsew);
    726  GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010);
    727 }
    728 void AssemblerRISCVV::vlsseg4(VRegister vd, Register rs1, Register rs2,
    729                              VSew vsew, MaskType mask) {
    730  uint8_t width = vsew_switch(vsew);
    731  GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011);
    732 }
    733 void AssemblerRISCVV::vlsseg5(VRegister vd, Register rs1, Register rs2,
    734                              VSew vsew, MaskType mask) {
    735  uint8_t width = vsew_switch(vsew);
    736  GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100);
    737 }
    738 void AssemblerRISCVV::vlsseg6(VRegister vd, Register rs1, Register rs2,
    739                              VSew vsew, MaskType mask) {
    740  uint8_t width = vsew_switch(vsew);
    741  GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101);
    742 }
    743 void AssemblerRISCVV::vlsseg7(VRegister vd, Register rs1, Register rs2,
    744                              VSew vsew, MaskType mask) {
    745  uint8_t width = vsew_switch(vsew);
    746  GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110);
    747 }
    748 void AssemblerRISCVV::vlsseg8(VRegister vd, Register rs1, Register rs2,
    749                              VSew vsew, MaskType mask) {
    750  uint8_t width = vsew_switch(vsew);
    751  GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111);
    752 }
    753 void AssemblerRISCVV::vssseg2(VRegister vd, Register rs1, Register rs2,
    754                              VSew vsew, MaskType mask) {
    755  uint8_t width = vsew_switch(vsew);
    756  GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b001);
    757 }
    758 void AssemblerRISCVV::vssseg3(VRegister vd, Register rs1, Register rs2,
    759                              VSew vsew, MaskType mask) {
    760  uint8_t width = vsew_switch(vsew);
    761  GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b010);
    762 }
    763 void AssemblerRISCVV::vssseg4(VRegister vd, Register rs1, Register rs2,
    764                              VSew vsew, MaskType mask) {
    765  uint8_t width = vsew_switch(vsew);
    766  GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b011);
    767 }
    768 void AssemblerRISCVV::vssseg5(VRegister vd, Register rs1, Register rs2,
    769                              VSew vsew, MaskType mask) {
    770  uint8_t width = vsew_switch(vsew);
    771  GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b100);
    772 }
    773 void AssemblerRISCVV::vssseg6(VRegister vd, Register rs1, Register rs2,
    774                              VSew vsew, MaskType mask) {
    775  uint8_t width = vsew_switch(vsew);
    776  GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b101);
    777 }
    778 void AssemblerRISCVV::vssseg7(VRegister vd, Register rs1, Register rs2,
    779                              VSew vsew, MaskType mask) {
    780  uint8_t width = vsew_switch(vsew);
    781  GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b110);
    782 }
    783 void AssemblerRISCVV::vssseg8(VRegister vd, Register rs1, Register rs2,
    784                              VSew vsew, MaskType mask) {
    785  uint8_t width = vsew_switch(vsew);
    786  GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, 0, 0b111);
    787 }
    788 
    789 void AssemblerRISCVV::vlxseg2(VRegister vd, Register rs1, VRegister rs2,
    790                              VSew vsew, MaskType mask) {
    791  uint8_t width = vsew_switch(vsew);
    792  GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001);
    793 }
    794 void AssemblerRISCVV::vlxseg3(VRegister vd, Register rs1, VRegister rs2,
    795                              VSew vsew, MaskType mask) {
    796  uint8_t width = vsew_switch(vsew);
    797  GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010);
    798 }
    799 void AssemblerRISCVV::vlxseg4(VRegister vd, Register rs1, VRegister rs2,
    800                              VSew vsew, MaskType mask) {
    801  uint8_t width = vsew_switch(vsew);
    802  GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011);
    803 }
    804 void AssemblerRISCVV::vlxseg5(VRegister vd, Register rs1, VRegister rs2,
    805                              VSew vsew, MaskType mask) {
    806  uint8_t width = vsew_switch(vsew);
    807  GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100);
    808 }
    809 void AssemblerRISCVV::vlxseg6(VRegister vd, Register rs1, VRegister rs2,
    810                              VSew vsew, MaskType mask) {
    811  uint8_t width = vsew_switch(vsew);
    812  GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101);
    813 }
    814 void AssemblerRISCVV::vlxseg7(VRegister vd, Register rs1, VRegister rs2,
    815                              VSew vsew, MaskType mask) {
    816  uint8_t width = vsew_switch(vsew);
    817  GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110);
    818 }
    819 void AssemblerRISCVV::vlxseg8(VRegister vd, Register rs1, VRegister rs2,
    820                              VSew vsew, MaskType mask) {
    821  uint8_t width = vsew_switch(vsew);
    822  GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
    823 }
    824 void AssemblerRISCVV::vsxseg2(VRegister vd, Register rs1, VRegister rs2,
    825                              VSew vsew, MaskType mask) {
    826  uint8_t width = vsew_switch(vsew);
    827  GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b001);
    828 }
    829 void AssemblerRISCVV::vsxseg3(VRegister vd, Register rs1, VRegister rs2,
    830                              VSew vsew, MaskType mask) {
    831  uint8_t width = vsew_switch(vsew);
    832  GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b010);
    833 }
    834 void AssemblerRISCVV::vsxseg4(VRegister vd, Register rs1, VRegister rs2,
    835                              VSew vsew, MaskType mask) {
    836  uint8_t width = vsew_switch(vsew);
    837  GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b011);
    838 }
    839 void AssemblerRISCVV::vsxseg5(VRegister vd, Register rs1, VRegister rs2,
    840                              VSew vsew, MaskType mask) {
    841  uint8_t width = vsew_switch(vsew);
    842  GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b100);
    843 }
    844 void AssemblerRISCVV::vsxseg6(VRegister vd, Register rs1, VRegister rs2,
    845                              VSew vsew, MaskType mask) {
    846  uint8_t width = vsew_switch(vsew);
    847  GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b101);
    848 }
    849 void AssemblerRISCVV::vsxseg7(VRegister vd, Register rs1, VRegister rs2,
    850                              VSew vsew, MaskType mask) {
    851  uint8_t width = vsew_switch(vsew);
    852  GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b110);
    853 }
    854 void AssemblerRISCVV::vsxseg8(VRegister vd, Register rs1, VRegister rs2,
    855                              VSew vsew, MaskType mask) {
    856  uint8_t width = vsew_switch(vsew);
    857  GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, 0, 0b111);
    858 }
    859 
    860 void AssemblerRISCVV::vfirst_m(Register rd, VRegister vs2, MaskType mask) {
    861  GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b10001, vs2, mask);
    862 }
    863 
    864 void AssemblerRISCVV::vcpop_m(Register rd, VRegister vs2, MaskType mask) {
    865  GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, 0b10000, vs2, mask);
    866 }
    867 
    868 LoadStoreLaneParams::LoadStoreLaneParams(MachineRepresentation rep,
    869                                         uint8_t laneidx) {
    870  switch (rep) {
    871    case MachineRepresentation::kWord8:
    872      *this = LoadStoreLaneParams(laneidx, 8, kRvvVLEN / 16);
    873      break;
    874    case MachineRepresentation::kWord16:
    875      *this = LoadStoreLaneParams(laneidx, 16, kRvvVLEN / 8);
    876      break;
    877    case MachineRepresentation::kWord32:
    878      *this = LoadStoreLaneParams(laneidx, 32, kRvvVLEN / 4);
    879      break;
    880    case MachineRepresentation::kWord64:
    881      *this = LoadStoreLaneParams(laneidx, 64, kRvvVLEN / 2);
    882      break;
    883    default:
    884      UNREACHABLE();
    885  }
    886 }
    887 
    888 }  // namespace jit
    889 }  // namespace js
    890 #endif