tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

Instructions-vixl.cpp (40291B)


      1 // Copyright 2015, VIXL authors
      2 // All rights reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are met:
      6 //
      7 //   * Redistributions of source code must retain the above copyright notice,
      8 //     this list of conditions and the following disclaimer.
      9 //   * Redistributions in binary form must reproduce the above copyright notice,
     10 //     this list of conditions and the following disclaimer in the documentation
     11 //     and/or other materials provided with the distribution.
     12 //   * Neither the name of ARM Limited nor the names of its contributors may be
     13 //     used to endorse or promote products derived from this software without
     14 //     specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
     17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
     20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26 
     27 #include "jit/arm64/vixl/Instructions-vixl.h"
     28 
     29 #include "jit/arm64/vixl/Assembler-vixl.h"
     30 
     31 namespace vixl {
     32 
     33 static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
     34                                    uint64_t value,
     35                                    unsigned width) {
     36  VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
     37              (width == 32));
     38  VIXL_ASSERT((reg_size == kBRegSize) || (reg_size == kHRegSize) ||
     39              (reg_size == kSRegSize) || (reg_size == kDRegSize));
     40  uint64_t result = value & ((UINT64_C(1) << width) - 1);
     41  for (unsigned i = width; i < reg_size; i *= 2) {
     42    result |= (result << i);
     43  }
     44  return result;
     45 }
     46 
     47 bool Instruction::CanTakeSVEMovprfx(const char* form,
     48                                    const Instruction* movprfx) const {
     49  return CanTakeSVEMovprfx(Hash(form), movprfx);
     50 }
     51 
     52 bool Instruction::CanTakeSVEMovprfx(uint32_t form_hash,
     53                                    const Instruction* movprfx) const {
     54  bool movprfx_is_predicated = movprfx->Mask(SVEMovprfxMask) == MOVPRFX_z_p_z;
     55  bool movprfx_is_unpredicated =
     56      movprfx->Mask(SVEConstructivePrefix_UnpredicatedMask) == MOVPRFX_z_z;
     57  VIXL_ASSERT(movprfx_is_predicated != movprfx_is_unpredicated);
     58 
     59  int movprfx_zd = movprfx->GetRd();
     60  int movprfx_pg = movprfx_is_predicated ? movprfx->GetPgLow8() : -1;
     61  VectorFormat movprfx_vform =
     62      movprfx_is_predicated ? movprfx->GetSVEVectorFormat() : kFormatUndefined;
     63 
     64  bool pg_matches_low8 = movprfx_pg == GetPgLow8();
     65  bool vform_matches = movprfx_vform == GetSVEVectorFormat();
     66  bool zd_matches = movprfx_zd == GetRd();
     67  bool zd_isnt_zn = movprfx_zd != GetRn();
     68  bool zd_isnt_zm = movprfx_zd != GetRm();
     69 
     70  switch (form_hash) {
     71    case "cdot_z_zzzi_s"_h:
     72    case "sdot_z_zzzi_s"_h:
     73    case "sudot_z_zzzi_s"_h:
     74    case "udot_z_zzzi_s"_h:
     75    case "usdot_z_zzzi_s"_h:
     76      return (GetRd() != static_cast<int>(ExtractBits(18, 16))) &&
     77             movprfx_is_unpredicated && zd_isnt_zn && zd_matches;
     78 
     79    case "cdot_z_zzzi_d"_h:
     80    case "sdot_z_zzzi_d"_h:
     81    case "udot_z_zzzi_d"_h:
     82      return (GetRd() != static_cast<int>(ExtractBits(19, 16))) &&
     83             movprfx_is_unpredicated && zd_isnt_zn && zd_matches;
     84 
     85    case "fmlalb_z_zzzi_s"_h:
     86    case "fmlalt_z_zzzi_s"_h:
     87    case "fmlslb_z_zzzi_s"_h:
     88    case "fmlslt_z_zzzi_s"_h:
     89    case "smlalb_z_zzzi_d"_h:
     90    case "smlalb_z_zzzi_s"_h:
     91    case "smlalt_z_zzzi_d"_h:
     92    case "smlalt_z_zzzi_s"_h:
     93    case "smlslb_z_zzzi_d"_h:
     94    case "smlslb_z_zzzi_s"_h:
     95    case "smlslt_z_zzzi_d"_h:
     96    case "smlslt_z_zzzi_s"_h:
     97    case "sqdmlalb_z_zzzi_d"_h:
     98    case "sqdmlalb_z_zzzi_s"_h:
     99    case "sqdmlalt_z_zzzi_d"_h:
    100    case "sqdmlalt_z_zzzi_s"_h:
    101    case "sqdmlslb_z_zzzi_d"_h:
    102    case "sqdmlslb_z_zzzi_s"_h:
    103    case "sqdmlslt_z_zzzi_d"_h:
    104    case "sqdmlslt_z_zzzi_s"_h:
    105    case "umlalb_z_zzzi_d"_h:
    106    case "umlalb_z_zzzi_s"_h:
    107    case "umlalt_z_zzzi_d"_h:
    108    case "umlalt_z_zzzi_s"_h:
    109    case "umlslb_z_zzzi_d"_h:
    110    case "umlslb_z_zzzi_s"_h:
    111    case "umlslt_z_zzzi_d"_h:
    112    case "umlslt_z_zzzi_s"_h:
    113      return (GetRd() != GetSVEMulLongZmAndIndex().first) &&
    114             movprfx_is_unpredicated && zd_isnt_zn && zd_matches;
    115 
    116    case "cmla_z_zzzi_h"_h:
    117    case "cmla_z_zzzi_s"_h:
    118    case "fcmla_z_zzzi_h"_h:
    119    case "fcmla_z_zzzi_s"_h:
    120    case "fmla_z_zzzi_d"_h:
    121    case "fmla_z_zzzi_h"_h:
    122    case "fmla_z_zzzi_s"_h:
    123    case "fmls_z_zzzi_d"_h:
    124    case "fmls_z_zzzi_h"_h:
    125    case "fmls_z_zzzi_s"_h:
    126    case "mla_z_zzzi_d"_h:
    127    case "mla_z_zzzi_h"_h:
    128    case "mla_z_zzzi_s"_h:
    129    case "mls_z_zzzi_d"_h:
    130    case "mls_z_zzzi_h"_h:
    131    case "mls_z_zzzi_s"_h:
    132    case "sqrdcmlah_z_zzzi_h"_h:
    133    case "sqrdcmlah_z_zzzi_s"_h:
    134    case "sqrdmlah_z_zzzi_d"_h:
    135    case "sqrdmlah_z_zzzi_h"_h:
    136    case "sqrdmlah_z_zzzi_s"_h:
    137    case "sqrdmlsh_z_zzzi_d"_h:
    138    case "sqrdmlsh_z_zzzi_h"_h:
    139    case "sqrdmlsh_z_zzzi_s"_h:
    140      return (GetRd() != GetSVEMulZmAndIndex().first) &&
    141             movprfx_is_unpredicated && zd_isnt_zn && zd_matches;
    142 
    143    case "adclb_z_zzz"_h:
    144    case "adclt_z_zzz"_h:
    145    case "bcax_z_zzz"_h:
    146    case "bsl1n_z_zzz"_h:
    147    case "bsl2n_z_zzz"_h:
    148    case "bsl_z_zzz"_h:
    149    case "cdot_z_zzz"_h:
    150    case "cmla_z_zzz"_h:
    151    case "eor3_z_zzz"_h:
    152    case "eorbt_z_zz"_h:
    153    case "eortb_z_zz"_h:
    154    case "fmlalb_z_zzz"_h:
    155    case "fmlalt_z_zzz"_h:
    156    case "fmlslb_z_zzz"_h:
    157    case "fmlslt_z_zzz"_h:
    158    case "nbsl_z_zzz"_h:
    159    case "saba_z_zzz"_h:
    160    case "sabalb_z_zzz"_h:
    161    case "sabalt_z_zzz"_h:
    162    case "sbclb_z_zzz"_h:
    163    case "sbclt_z_zzz"_h:
    164    case "sdot_z_zzz"_h:
    165    case "smlalb_z_zzz"_h:
    166    case "smlalt_z_zzz"_h:
    167    case "smlslb_z_zzz"_h:
    168    case "smlslt_z_zzz"_h:
    169    case "sqdmlalb_z_zzz"_h:
    170    case "sqdmlalbt_z_zzz"_h:
    171    case "sqdmlalt_z_zzz"_h:
    172    case "sqdmlslb_z_zzz"_h:
    173    case "sqdmlslbt_z_zzz"_h:
    174    case "sqdmlslt_z_zzz"_h:
    175    case "sqrdcmlah_z_zzz"_h:
    176    case "sqrdmlah_z_zzz"_h:
    177    case "sqrdmlsh_z_zzz"_h:
    178    case "uaba_z_zzz"_h:
    179    case "uabalb_z_zzz"_h:
    180    case "uabalt_z_zzz"_h:
    181    case "udot_z_zzz"_h:
    182    case "umlalb_z_zzz"_h:
    183    case "umlalt_z_zzz"_h:
    184    case "umlslb_z_zzz"_h:
    185    case "umlslt_z_zzz"_h:
    186    case "usdot_z_zzz_s"_h:
    187    case "fmmla_z_zzz_s"_h:
    188    case "fmmla_z_zzz_d"_h:
    189    case "smmla_z_zzz"_h:
    190    case "ummla_z_zzz"_h:
    191    case "usmmla_z_zzz"_h:
    192      return movprfx_is_unpredicated && zd_isnt_zm && zd_isnt_zn && zd_matches;
    193 
    194    case "addp_z_p_zz"_h:
    195    case "cadd_z_zz"_h:
    196    case "clasta_z_p_zz"_h:
    197    case "clastb_z_p_zz"_h:
    198    case "decd_z_zs"_h:
    199    case "dech_z_zs"_h:
    200    case "decw_z_zs"_h:
    201    case "ext_z_zi_des"_h:
    202    case "faddp_z_p_zz"_h:
    203    case "fmaxnmp_z_p_zz"_h:
    204    case "fmaxp_z_p_zz"_h:
    205    case "fminnmp_z_p_zz"_h:
    206    case "fminp_z_p_zz"_h:
    207    case "ftmad_z_zzi"_h:
    208    case "incd_z_zs"_h:
    209    case "inch_z_zs"_h:
    210    case "incw_z_zs"_h:
    211    case "insr_z_v"_h:
    212    case "smaxp_z_p_zz"_h:
    213    case "sminp_z_p_zz"_h:
    214    case "splice_z_p_zz_des"_h:
    215    case "sqcadd_z_zz"_h:
    216    case "sqdecd_z_zs"_h:
    217    case "sqdech_z_zs"_h:
    218    case "sqdecw_z_zs"_h:
    219    case "sqincd_z_zs"_h:
    220    case "sqinch_z_zs"_h:
    221    case "sqincw_z_zs"_h:
    222    case "srsra_z_zi"_h:
    223    case "ssra_z_zi"_h:
    224    case "umaxp_z_p_zz"_h:
    225    case "uminp_z_p_zz"_h:
    226    case "uqdecd_z_zs"_h:
    227    case "uqdech_z_zs"_h:
    228    case "uqdecw_z_zs"_h:
    229    case "uqincd_z_zs"_h:
    230    case "uqinch_z_zs"_h:
    231    case "uqincw_z_zs"_h:
    232    case "ursra_z_zi"_h:
    233    case "usra_z_zi"_h:
    234    case "xar_z_zzi"_h:
    235      return movprfx_is_unpredicated && zd_isnt_zn && zd_matches;
    236 
    237    case "add_z_zi"_h:
    238    case "and_z_zi"_h:
    239    case "decp_z_p_z"_h:
    240    case "eor_z_zi"_h:
    241    case "incp_z_p_z"_h:
    242    case "insr_z_r"_h:
    243    case "mul_z_zi"_h:
    244    case "orr_z_zi"_h:
    245    case "smax_z_zi"_h:
    246    case "smin_z_zi"_h:
    247    case "sqadd_z_zi"_h:
    248    case "sqdecp_z_p_z"_h:
    249    case "sqincp_z_p_z"_h:
    250    case "sqsub_z_zi"_h:
    251    case "sub_z_zi"_h:
    252    case "subr_z_zi"_h:
    253    case "umax_z_zi"_h:
    254    case "umin_z_zi"_h:
    255    case "uqadd_z_zi"_h:
    256    case "uqdecp_z_p_z"_h:
    257    case "uqincp_z_p_z"_h:
    258    case "uqsub_z_zi"_h:
    259      return movprfx_is_unpredicated && zd_matches;
    260 
    261    case "cpy_z_p_i"_h:
    262      if (movprfx_is_predicated) {
    263        if (!vform_matches) return false;
    264        if (movprfx_pg != GetRx<19, 16>()) return false;
    265      }
    266      // Only the merging form can take movprfx.
    267      if (ExtractBit(14) == 0) return false;
    268      return zd_matches;
    269 
    270    case "fcpy_z_p_i"_h:
    271      return (movprfx_is_unpredicated ||
    272              ((movprfx_pg == GetRx<19, 16>()) && vform_matches)) &&
    273             zd_matches;
    274 
    275    case "flogb_z_p_z"_h:
    276      return (movprfx_is_unpredicated ||
    277              ((movprfx_vform == GetSVEVectorFormat(17)) && pg_matches_low8)) &&
    278             zd_isnt_zn && zd_matches;
    279 
    280    case "asr_z_p_zi"_h:
    281    case "asrd_z_p_zi"_h:
    282    case "lsl_z_p_zi"_h:
    283    case "lsr_z_p_zi"_h:
    284    case "sqshl_z_p_zi"_h:
    285    case "sqshlu_z_p_zi"_h:
    286    case "srshr_z_p_zi"_h:
    287    case "uqshl_z_p_zi"_h:
    288    case "urshr_z_p_zi"_h:
    289      return (movprfx_is_unpredicated ||
    290              ((movprfx_vform ==
    291                SVEFormatFromLaneSizeInBytesLog2(
    292                    GetSVEImmShiftAndLaneSizeLog2(true).second)) &&
    293               pg_matches_low8)) &&
    294             zd_matches;
    295 
    296    case "fcvt_z_p_z_d2h"_h:
    297    case "fcvt_z_p_z_d2s"_h:
    298    case "fcvt_z_p_z_h2d"_h:
    299    case "fcvt_z_p_z_s2d"_h:
    300    case "fcvtx_z_p_z_d2s"_h:
    301    case "fcvtzs_z_p_z_d2w"_h:
    302    case "fcvtzs_z_p_z_d2x"_h:
    303    case "fcvtzs_z_p_z_fp162x"_h:
    304    case "fcvtzs_z_p_z_s2x"_h:
    305    case "fcvtzu_z_p_z_d2w"_h:
    306    case "fcvtzu_z_p_z_d2x"_h:
    307    case "fcvtzu_z_p_z_fp162x"_h:
    308    case "fcvtzu_z_p_z_s2x"_h:
    309    case "scvtf_z_p_z_w2d"_h:
    310    case "scvtf_z_p_z_x2d"_h:
    311    case "scvtf_z_p_z_x2fp16"_h:
    312    case "scvtf_z_p_z_x2s"_h:
    313    case "ucvtf_z_p_z_w2d"_h:
    314    case "ucvtf_z_p_z_x2d"_h:
    315    case "ucvtf_z_p_z_x2fp16"_h:
    316    case "ucvtf_z_p_z_x2s"_h:
    317      return (movprfx_is_unpredicated ||
    318              ((movprfx_vform == kFormatVnD) && pg_matches_low8)) &&
    319             zd_isnt_zn && zd_matches;
    320 
    321    case "fcvtzs_z_p_z_fp162h"_h:
    322    case "fcvtzu_z_p_z_fp162h"_h:
    323    case "scvtf_z_p_z_h2fp16"_h:
    324    case "ucvtf_z_p_z_h2fp16"_h:
    325      return (movprfx_is_unpredicated ||
    326              ((movprfx_vform == kFormatVnH) && pg_matches_low8)) &&
    327             zd_isnt_zn && zd_matches;
    328 
    329    case "fcvt_z_p_z_h2s"_h:
    330    case "fcvt_z_p_z_s2h"_h:
    331    case "fcvtzs_z_p_z_fp162w"_h:
    332    case "fcvtzs_z_p_z_s2w"_h:
    333    case "fcvtzu_z_p_z_fp162w"_h:
    334    case "fcvtzu_z_p_z_s2w"_h:
    335    case "scvtf_z_p_z_w2fp16"_h:
    336    case "scvtf_z_p_z_w2s"_h:
    337    case "ucvtf_z_p_z_w2fp16"_h:
    338    case "ucvtf_z_p_z_w2s"_h:
    339      return (movprfx_is_unpredicated ||
    340              ((movprfx_vform == kFormatVnS) && pg_matches_low8)) &&
    341             zd_isnt_zn && zd_matches;
    342 
    343    case "fcmla_z_p_zzz"_h:
    344    case "fmad_z_p_zzz"_h:
    345    case "fmla_z_p_zzz"_h:
    346    case "fmls_z_p_zzz"_h:
    347    case "fmsb_z_p_zzz"_h:
    348    case "fnmad_z_p_zzz"_h:
    349    case "fnmla_z_p_zzz"_h:
    350    case "fnmls_z_p_zzz"_h:
    351    case "fnmsb_z_p_zzz"_h:
    352    case "mad_z_p_zzz"_h:
    353    case "mla_z_p_zzz"_h:
    354    case "mls_z_p_zzz"_h:
    355    case "msb_z_p_zzz"_h:
    356      return (movprfx_is_unpredicated || (pg_matches_low8 && vform_matches)) &&
    357             zd_isnt_zm && zd_isnt_zn && zd_matches;
    358 
    359    case "abs_z_p_z"_h:
    360    case "add_z_p_zz"_h:
    361    case "and_z_p_zz"_h:
    362    case "asr_z_p_zw"_h:
    363    case "asr_z_p_zz"_h:
    364    case "asrr_z_p_zz"_h:
    365    case "bic_z_p_zz"_h:
    366    case "cls_z_p_z"_h:
    367    case "clz_z_p_z"_h:
    368    case "cnot_z_p_z"_h:
    369    case "cnt_z_p_z"_h:
    370    case "cpy_z_p_v"_h:
    371    case "eor_z_p_zz"_h:
    372    case "fabd_z_p_zz"_h:
    373    case "fabs_z_p_z"_h:
    374    case "fadd_z_p_zz"_h:
    375    case "fcadd_z_p_zz"_h:
    376    case "fdiv_z_p_zz"_h:
    377    case "fdivr_z_p_zz"_h:
    378    case "fmax_z_p_zz"_h:
    379    case "fmaxnm_z_p_zz"_h:
    380    case "fmin_z_p_zz"_h:
    381    case "fminnm_z_p_zz"_h:
    382    case "fmul_z_p_zz"_h:
    383    case "fmulx_z_p_zz"_h:
    384    case "fneg_z_p_z"_h:
    385    case "frecpx_z_p_z"_h:
    386    case "frinta_z_p_z"_h:
    387    case "frinti_z_p_z"_h:
    388    case "frintm_z_p_z"_h:
    389    case "frintn_z_p_z"_h:
    390    case "frintp_z_p_z"_h:
    391    case "frintx_z_p_z"_h:
    392    case "frintz_z_p_z"_h:
    393    case "fscale_z_p_zz"_h:
    394    case "fsqrt_z_p_z"_h:
    395    case "fsub_z_p_zz"_h:
    396    case "fsubr_z_p_zz"_h:
    397    case "lsl_z_p_zw"_h:
    398    case "lsl_z_p_zz"_h:
    399    case "lslr_z_p_zz"_h:
    400    case "lsr_z_p_zw"_h:
    401    case "lsr_z_p_zz"_h:
    402    case "lsrr_z_p_zz"_h:
    403    case "mul_z_p_zz"_h:
    404    case "neg_z_p_z"_h:
    405    case "not_z_p_z"_h:
    406    case "orr_z_p_zz"_h:
    407    case "rbit_z_p_z"_h:
    408    case "revb_z_z"_h:
    409    case "revh_z_z"_h:
    410    case "revw_z_z"_h:
    411    case "sabd_z_p_zz"_h:
    412    case "sadalp_z_p_z"_h:
    413    case "sdiv_z_p_zz"_h:
    414    case "sdivr_z_p_zz"_h:
    415    case "shadd_z_p_zz"_h:
    416    case "shsub_z_p_zz"_h:
    417    case "shsubr_z_p_zz"_h:
    418    case "smax_z_p_zz"_h:
    419    case "smin_z_p_zz"_h:
    420    case "smulh_z_p_zz"_h:
    421    case "sqabs_z_p_z"_h:
    422    case "sqadd_z_p_zz"_h:
    423    case "sqneg_z_p_z"_h:
    424    case "sqrshl_z_p_zz"_h:
    425    case "sqrshlr_z_p_zz"_h:
    426    case "sqshl_z_p_zz"_h:
    427    case "sqshlr_z_p_zz"_h:
    428    case "sqsub_z_p_zz"_h:
    429    case "sqsubr_z_p_zz"_h:
    430    case "srhadd_z_p_zz"_h:
    431    case "srshl_z_p_zz"_h:
    432    case "srshlr_z_p_zz"_h:
    433    case "sub_z_p_zz"_h:
    434    case "subr_z_p_zz"_h:
    435    case "suqadd_z_p_zz"_h:
    436    case "sxtb_z_p_z"_h:
    437    case "sxth_z_p_z"_h:
    438    case "sxtw_z_p_z"_h:
    439    case "uabd_z_p_zz"_h:
    440    case "uadalp_z_p_z"_h:
    441    case "udiv_z_p_zz"_h:
    442    case "udivr_z_p_zz"_h:
    443    case "uhadd_z_p_zz"_h:
    444    case "uhsub_z_p_zz"_h:
    445    case "uhsubr_z_p_zz"_h:
    446    case "umax_z_p_zz"_h:
    447    case "umin_z_p_zz"_h:
    448    case "umulh_z_p_zz"_h:
    449    case "uqadd_z_p_zz"_h:
    450    case "uqrshl_z_p_zz"_h:
    451    case "uqrshlr_z_p_zz"_h:
    452    case "uqshl_z_p_zz"_h:
    453    case "uqshlr_z_p_zz"_h:
    454    case "uqsub_z_p_zz"_h:
    455    case "uqsubr_z_p_zz"_h:
    456    case "urecpe_z_p_z"_h:
    457    case "urhadd_z_p_zz"_h:
    458    case "urshl_z_p_zz"_h:
    459    case "urshlr_z_p_zz"_h:
    460    case "ursqrte_z_p_z"_h:
    461    case "usqadd_z_p_zz"_h:
    462    case "uxtb_z_p_z"_h:
    463    case "uxth_z_p_z"_h:
    464    case "uxtw_z_p_z"_h:
    465      return (movprfx_is_unpredicated || (pg_matches_low8 && vform_matches)) &&
    466             zd_isnt_zn && zd_matches;
    467 
    468    case "cpy_z_p_r"_h:
    469    case "fadd_z_p_zs"_h:
    470    case "fmax_z_p_zs"_h:
    471    case "fmaxnm_z_p_zs"_h:
    472    case "fmin_z_p_zs"_h:
    473    case "fminnm_z_p_zs"_h:
    474    case "fmul_z_p_zs"_h:
    475    case "fsub_z_p_zs"_h:
    476    case "fsubr_z_p_zs"_h:
    477      return (movprfx_is_unpredicated || (pg_matches_low8 && vform_matches)) &&
    478             zd_matches;
    479    default:
    480      return false;
    481  }
    482 }  // NOLINT(readability/fn_size)
    483 
    484 bool Instruction::IsLoad() const {
    485  if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
    486    return false;
    487  }
    488 
    489  if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
    490    return Mask(LoadStorePairLBit) != 0;
    491  } else {
    492    LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
    493    switch (op) {
    494      case LDRB_w:
    495      case LDRH_w:
    496      case LDR_w:
    497      case LDR_x:
    498      case LDRSB_w:
    499      case LDRSB_x:
    500      case LDRSH_w:
    501      case LDRSH_x:
    502      case LDRSW_x:
    503      case LDR_b:
    504      case LDR_h:
    505      case LDR_s:
    506      case LDR_d:
    507      case LDR_q:
    508        return true;
    509      default:
    510        return false;
    511    }
    512  }
    513 }
    514 
    515 
    516 bool Instruction::IsStore() const {
    517  if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
    518    return false;
    519  }
    520 
    521  if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
    522    return Mask(LoadStorePairLBit) == 0;
    523  } else {
    524    LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
    525    switch (op) {
    526      case STRB_w:
    527      case STRH_w:
    528      case STR_w:
    529      case STR_x:
    530      case STR_b:
    531      case STR_h:
    532      case STR_s:
    533      case STR_d:
    534      case STR_q:
    535        return true;
    536      default:
    537        return false;
    538    }
    539  }
    540 }
    541 
    542 
    543 std::pair<int, int> Instruction::GetSVEPermuteIndexAndLaneSizeLog2() const {
    544  uint32_t imm_2 = ExtractBits<0x00C00000>();
    545  uint32_t tsz_5 = ExtractBits<0x001F0000>();
    546  uint32_t imm_7 = (imm_2 << 5) | tsz_5;
    547  int lane_size_in_byte_log_2 = std::min(CountTrailingZeros(tsz_5), 5);
    548  int index = ExtractUnsignedBitfield32(6, lane_size_in_byte_log_2 + 1, imm_7);
    549  return std::make_pair(index, lane_size_in_byte_log_2);
    550 }
    551 
    552 // Get the register and index for SVE indexed multiplies encoded in the forms:
    553 //  .h : Zm = <18:16>, index = <22><20:19>
    554 //  .s : Zm = <18:16>, index = <20:19>
    555 //  .d : Zm = <19:16>, index = <20>
    556 std::pair<int, int> Instruction::GetSVEMulZmAndIndex() const {
    557  int reg_code = GetRmLow16();
    558  int index = ExtractBits(20, 19);
    559 
    560  // For .h, index uses bit zero of the size field, so kFormatVnB below implies
    561  // half-word lane, with most-significant bit of the index zero.
    562  switch (GetSVEVectorFormat()) {
    563    case kFormatVnD:
    564      index >>= 1;  // Only bit 20 in the index for D lanes.
    565      break;
    566    case kFormatVnH:
    567      index += 4;  // Bit 22 is the top bit of index.
    568      VIXL_FALLTHROUGH();
    569    case kFormatVnB:
    570    case kFormatVnS:
    571      reg_code &= 7;  // Three bits used for the register.
    572      break;
    573    default:
    574      VIXL_UNIMPLEMENTED();
    575      break;
    576  }
    577  return std::make_pair(reg_code, index);
    578 }
    579 
    580 // Get the register and index for SVE indexed long multiplies encoded in the
    581 // forms:
    582 //  .h : Zm = <18:16>, index = <20:19><11>
    583 //  .s : Zm = <19:16>, index = <20><11>
    584 std::pair<int, int> Instruction::GetSVEMulLongZmAndIndex() const {
    585  int reg_code = GetRmLow16();
    586  int index = ExtractBit(11);
    587 
    588  // For long multiplies, the SVE size field <23:22> encodes the destination
    589  // element size. The source element size is half the width.
    590  switch (GetSVEVectorFormat()) {
    591    case kFormatVnS:
    592      reg_code &= 7;
    593      index |= ExtractBits(20, 19) << 1;
    594      break;
    595    case kFormatVnD:
    596      index |= ExtractBit(20) << 1;
    597      break;
    598    default:
    599      VIXL_UNIMPLEMENTED();
    600      break;
    601  }
    602  return std::make_pair(reg_code, index);
    603 }
    604 
    605 // Get the register and index for NEON indexed multiplies.
    606 std::pair<int, int> Instruction::GetNEONMulRmAndIndex() const {
    607  int reg_code = GetRm();
    608  int index = (GetNEONH() << 2) | (GetNEONL() << 1) | GetNEONM();
    609  switch (GetNEONSize()) {
    610    case 0:  // FP H-sized elements.
    611    case 1:  // Integer H-sized elements.
    612      // 4-bit Rm, 3-bit index.
    613      reg_code &= 0xf;
    614      break;
    615    case 2:  // S-sized elements.
    616      // 5-bit Rm, 2-bit index.
    617      index >>= 1;
    618      break;
    619    case 3:  // FP D-sized elements.
    620      // 5-bit Rm, 1-bit index.
    621      index >>= 2;
    622      break;
    623  }
    624  return std::make_pair(reg_code, index);
    625 }
    626 
    627 // Logical immediates can't encode zero, so a return value of zero is used to
    628 // indicate a failure case. Specifically, where the constraints on imm_s are
    629 // not met.
    630 uint64_t Instruction::GetImmLogical() const {
    631  unsigned reg_size = GetSixtyFourBits() ? kXRegSize : kWRegSize;
    632  int32_t n = GetBitN();
    633  int32_t imm_s = GetImmSetBits();
    634  int32_t imm_r = GetImmRotate();
    635  return DecodeImmBitMask(n, imm_s, imm_r, reg_size);
    636 }
    637 
    638 // Logical immediates can't encode zero, so a return value of zero is used to
    639 // indicate a failure case. Specifically, where the constraints on imm_s are
    640 // not met.
    641 uint64_t Instruction::GetSVEImmLogical() const {
    642  int n = GetSVEBitN();
    643  int imm_s = GetSVEImmSetBits();
    644  int imm_r = GetSVEImmRotate();
    645  int lane_size_in_bytes_log2 = GetSVEBitwiseImmLaneSizeInBytesLog2();
    646  switch (lane_size_in_bytes_log2) {
    647    case kDRegSizeInBytesLog2:
    648    case kSRegSizeInBytesLog2:
    649    case kHRegSizeInBytesLog2:
    650    case kBRegSizeInBytesLog2: {
    651      int lane_size_in_bits = 1 << (lane_size_in_bytes_log2 + 3);
    652      return DecodeImmBitMask(n, imm_s, imm_r, lane_size_in_bits);
    653    }
    654    default:
    655      return 0;
    656  }
    657 }
    658 
    659 std::pair<int, int> Instruction::GetSVEImmShiftAndLaneSizeLog2(
    660    bool is_predicated) const {
    661  Instr tsize =
    662      is_predicated ? ExtractBits<0x00C00300>() : ExtractBits<0x00D80000>();
    663  Instr imm_3 =
    664      is_predicated ? ExtractBits<0x000000E0>() : ExtractBits<0x00070000>();
    665  if (tsize == 0) {
    666    // The bit field `tsize` means undefined if it is zero, so return a
    667    // convenience value kWMinInt to indicate a failure case.
    668    return std::make_pair(kWMinInt, kWMinInt);
    669  }
    670 
    671  int lane_size_in_bytes_log_2 = 32 - CountLeadingZeros(tsize, 32) - 1;
    672  int esize = (1 << lane_size_in_bytes_log_2) * kBitsPerByte;
    673  int shift = (2 * esize) - ((tsize << 3) | imm_3);
    674  return std::make_pair(shift, lane_size_in_bytes_log_2);
    675 }
    676 
    677 int Instruction::GetSVEMsizeFromDtype(bool is_signed, int dtype_h_lsb) const {
    678  Instr dtype_h = ExtractBits(dtype_h_lsb + 1, dtype_h_lsb);
    679  if (is_signed) {
    680    dtype_h = dtype_h ^ 0x3;
    681  }
    682  return dtype_h;
    683 }
    684 
    685 int Instruction::GetSVEEsizeFromDtype(bool is_signed, int dtype_l_lsb) const {
    686  Instr dtype_l = ExtractBits(dtype_l_lsb + 1, dtype_l_lsb);
    687  if (is_signed) {
    688    dtype_l = dtype_l ^ 0x3;
    689  }
    690  return dtype_l;
    691 }
    692 
    693 int Instruction::GetSVEBitwiseImmLaneSizeInBytesLog2() const {
    694  int n = GetSVEBitN();
    695  int imm_s = GetSVEImmSetBits();
    696  unsigned type_bitset =
    697      (n << SVEImmSetBits_width) | (~imm_s & GetUintMask(SVEImmSetBits_width));
    698 
    699  // An lane size is constructed from the n and imm_s bits according to
    700  // the following table:
    701  //
    702  // N   imms   size
    703  // 0  0xxxxx   32
    704  // 0  10xxxx   16
    705  // 0  110xxx    8
    706  // 0  1110xx    8
    707  // 0  11110x    8
    708  // 1  xxxxxx   64
    709 
    710  if (type_bitset == 0) {
    711    // Bail out early since `HighestSetBitPosition` doesn't accept zero
    712    // value input.
    713    return -1;
    714  }
    715 
    716  switch (HighestSetBitPosition(type_bitset)) {
    717    case 6:
    718      return kDRegSizeInBytesLog2;
    719    case 5:
    720      return kSRegSizeInBytesLog2;
    721    case 4:
    722      return kHRegSizeInBytesLog2;
    723    case 3:
    724    case 2:
    725    case 1:
    726      return kBRegSizeInBytesLog2;
    727    default:
    728      // RESERVED encoding.
    729      return -1;
    730  }
    731 }
    732 
    733 int Instruction::GetSVEExtractImmediate() const {
    734  const int imm8h_mask = 0x001F0000;
    735  const int imm8l_mask = 0x00001C00;
    736  return ExtractBits<imm8h_mask | imm8l_mask>();
    737 }
    738 
    739 uint64_t Instruction::DecodeImmBitMask(int32_t n,
    740                                       int32_t imm_s,
    741                                       int32_t imm_r,
    742                                       int32_t size) const {
    743  // An integer is constructed from the n, imm_s and imm_r bits according to
    744  // the following table:
    745  //
    746  //  N   imms    immr    size        S             R
    747  //  1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
    748  //  0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
    749  //  0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
    750  //  0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
    751  //  0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
    752  //  0  11110s  xxxxxr     2    UInt(s)       UInt(r)
    753  // (s bits must not be all set)
    754  //
    755  // A pattern is constructed of size bits, where the least significant S+1
    756  // bits are set. The pattern is rotated right by R, and repeated across a
    757  // 32 or 64-bit value, depending on destination register width.
    758  //
    759 
    760  if (n == 1) {
    761    if (imm_s == 0x3f) {
    762      return 0;
    763    }
    764    uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
    765    return RotateRight(bits, imm_r, 64);
    766  } else {
    767    if ((imm_s >> 1) == 0x1f) {
    768      return 0;
    769    }
    770    for (int width = 0x20; width >= 0x2; width >>= 1) {
    771      if ((imm_s & width) == 0) {
    772        int mask = width - 1;
    773        if ((imm_s & mask) == mask) {
    774          return 0;
    775        }
    776        uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
    777        return RepeatBitsAcrossReg(size,
    778                                   RotateRight(bits, imm_r & mask, width),
    779                                   width);
    780      }
    781    }
    782  }
    783  VIXL_UNREACHABLE();
    784  return 0;
    785 }
    786 
    787 
    788 uint32_t Instruction::GetImmNEONabcdefgh() const {
    789  return GetImmNEONabc() << 5 | GetImmNEONdefgh();
    790 }
    791 
    792 
    793 Float16 Instruction::Imm8ToFloat16(uint32_t imm8) {
    794  // Imm8: abcdefgh (8 bits)
    795  // Half: aBbb.cdef.gh00.0000 (16 bits)
    796  // where B is b ^ 1
    797  uint32_t bits = imm8;
    798  uint16_t bit7 = (bits >> 7) & 0x1;
    799  uint16_t bit6 = (bits >> 6) & 0x1;
    800  uint16_t bit5_to_0 = bits & 0x3f;
    801  uint16_t result = (bit7 << 15) | ((4 - bit6) << 12) | (bit5_to_0 << 6);
    802  return RawbitsToFloat16(result);
    803 }
    804 
    805 
    806 float Instruction::Imm8ToFP32(uint32_t imm8) {
    807  // Imm8: abcdefgh (8 bits)
    808  // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
    809  // where B is b ^ 1
    810  uint32_t bits = imm8;
    811  uint32_t bit7 = (bits >> 7) & 0x1;
    812  uint32_t bit6 = (bits >> 6) & 0x1;
    813  uint32_t bit5_to_0 = bits & 0x3f;
    814  uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
    815 
    816  return RawbitsToFloat(result);
    817 }
    818 
    819 
    820 Float16 Instruction::GetImmFP16() const { return Imm8ToFloat16(GetImmFP()); }
    821 
    822 
    823 float Instruction::GetImmFP32() const { return Imm8ToFP32(GetImmFP()); }
    824 
    825 
    826 double Instruction::Imm8ToFP64(uint32_t imm8) {
    827  // Imm8: abcdefgh (8 bits)
    828  // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
    829  //         0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
    830  // where B is b ^ 1
    831  uint32_t bits = imm8;
    832  uint64_t bit7 = (bits >> 7) & 0x1;
    833  uint64_t bit6 = (bits >> 6) & 0x1;
    834  uint64_t bit5_to_0 = bits & 0x3f;
    835  uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
    836 
    837  return RawbitsToDouble(result);
    838 }
    839 
    840 
    841 double Instruction::GetImmFP64() const { return Imm8ToFP64(GetImmFP()); }
    842 
    843 
    844 Float16 Instruction::GetImmNEONFP16() const {
    845  return Imm8ToFloat16(GetImmNEONabcdefgh());
    846 }
    847 
    848 
    849 float Instruction::GetImmNEONFP32() const {
    850  return Imm8ToFP32(GetImmNEONabcdefgh());
    851 }
    852 
    853 
    854 double Instruction::GetImmNEONFP64() const {
    855  return Imm8ToFP64(GetImmNEONabcdefgh());
    856 }
    857 
    858 
    859 unsigned CalcLSPairDataSize(LoadStorePairOp op) {
    860  VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
    861  VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
    862  switch (op) {
    863    case STP_q:
    864    case LDP_q:
    865      return kQRegSizeInBytesLog2;
    866    case STP_x:
    867    case LDP_x:
    868    case STP_d:
    869    case LDP_d:
    870      return kXRegSizeInBytesLog2;
    871    default:
    872      return kWRegSizeInBytesLog2;
    873  }
    874 }
    875 
    876 
    877 int Instruction::GetImmBranchRangeBitwidth(ImmBranchType branch_type) {
    878  switch (branch_type) {
    879    case UncondBranchType:
    880      return ImmUncondBranch_width;
    881    case CondBranchType:
    882      return ImmCondBranch_width;
    883    case CompareBranchType:
    884      return ImmCmpBranch_width;
    885    case TestBranchType:
    886      return ImmTestBranch_width;
    887    default:
    888      VIXL_UNREACHABLE();
    889      return 0;
    890  }
    891 }
    892 
    893 
    894 int32_t Instruction::GetImmBranchForwardRange(ImmBranchType branch_type) {
    895  int32_t encoded_max = 1 << (GetImmBranchRangeBitwidth(branch_type) - 1);
    896  return encoded_max * kInstructionSize;
    897 }
    898 
    899 
    900 bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
    901                                     int64_t offset) {
    902  return IsIntN(GetImmBranchRangeBitwidth(branch_type), offset);
    903 }
    904 
    905 ImmBranchRangeType Instruction::ImmBranchTypeToRange(ImmBranchType branch_type)
    906 {
    907  switch (branch_type) {
    908    case UncondBranchType:
    909      return UncondBranchRangeType;
    910    case CondBranchType:
    911    case CompareBranchType:
    912      return CondBranchRangeType;
    913    case TestBranchType:
    914      return TestBranchRangeType;
    915    default:
    916      return UnknownBranchRangeType;
    917  }
    918 }
    919 
    920 int32_t Instruction::ImmBranchMaxForwardOffset(ImmBranchRangeType range_type)
    921 {
    922  // Branches encode a pc-relative two's complement number of 32-bit
    923  // instructions. Compute the number of bytes corresponding to the largest
    924  // positive number of instructions that can be encoded.
    925  switch(range_type) {
    926    case TestBranchRangeType:
    927      return ((1 << ImmTestBranch_width) - 1) / 2 * kInstructionSize;
    928    case CondBranchRangeType:
    929      return ((1 << ImmCondBranch_width) - 1) / 2 * kInstructionSize;
    930    case UncondBranchRangeType:
    931      return ((1 << ImmUncondBranch_width) - 1) / 2 * kInstructionSize;
    932    default:
    933      VIXL_UNREACHABLE();
    934      return 0;
    935  }
    936 }
    937 
    938 int32_t Instruction::ImmBranchMinBackwardOffset(ImmBranchRangeType range_type)
    939 {
    940  switch(range_type) {
    941    case TestBranchRangeType:
    942      return -int32_t(1 << ImmTestBranch_width) / int32_t(2 * kInstructionSize);
    943    case CondBranchRangeType:
    944      return -int32_t(1 << ImmCondBranch_width) / int32_t(2 * kInstructionSize);
    945    case UncondBranchRangeType:
    946      return -int32_t(1 << ImmUncondBranch_width) / int32_t(2 * kInstructionSize);
    947    default:
    948      VIXL_UNREACHABLE();
    949      return 0;
    950  }
    951 }
    952 
    953 const Instruction* Instruction::GetImmPCOffsetTarget() const {
    954  const Instruction* base = this;
    955  ptrdiff_t offset;
    956  if (IsPCRelAddressing()) {
    957    // ADR and ADRP.
    958    offset = GetImmPCRel();
    959    if (Mask(PCRelAddressingMask) == ADRP) {
    960      base = AlignDown(base, kPageSize);
    961      offset *= kPageSize;
    962    } else {
    963      VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
    964    }
    965  } else {
    966    // All PC-relative branches.
    967    VIXL_ASSERT(GetBranchType() != UnknownBranchType);
    968    // Relative branch offsets are instruction-size-aligned.
    969    offset = GetImmBranch() * static_cast<int>(kInstructionSize);
    970  }
    971  return base + offset;
    972 }
    973 
    974 
    975 int Instruction::GetImmBranch() const {
    976  switch (GetBranchType()) {
    977    case CondBranchType:
    978      return GetImmCondBranch();
    979    case UncondBranchType:
    980      return GetImmUncondBranch();
    981    case CompareBranchType:
    982      return GetImmCmpBranch();
    983    case TestBranchType:
    984      return GetImmTestBranch();
    985    default:
    986      VIXL_UNREACHABLE();
    987  }
    988  return 0;
    989 }
    990 
    991 
    992 void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
    993  if (IsPCRelAddressing()) {
    994    SetPCRelImmTarget(target);
    995  } else {
    996    SetBranchImmTarget(target);
    997  }
    998 }
    999 
   1000 
   1001 void Instruction::SetPCRelImmTarget(const Instruction* target) {
   1002  ptrdiff_t imm21;
   1003  if ((Mask(PCRelAddressingMask) == ADR)) {
   1004    imm21 = target - this;
   1005  } else {
   1006    VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
   1007    uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
   1008    uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
   1009    imm21 = target_page - this_page;
   1010  }
   1011  Instr imm = Assembler::ImmPCRelAddress(static_cast<int32_t>(imm21));
   1012 
   1013  SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
   1014 }
   1015 
   1016 
   1017 void Instruction::SetBranchImmTarget(const Instruction* target) {
   1018  VIXL_ASSERT(((target - this) & 3) == 0);
   1019  Instr branch_imm = 0;
   1020  uint32_t imm_mask = 0;
   1021  int offset = static_cast<int>((target - this) >> kInstructionSizeLog2);
   1022  switch (GetBranchType()) {
   1023    case CondBranchType: {
   1024      branch_imm = Assembler::ImmCondBranch(offset);
   1025      imm_mask = ImmCondBranch_mask;
   1026      break;
   1027    }
   1028    case UncondBranchType: {
   1029      branch_imm = Assembler::ImmUncondBranch(offset);
   1030      imm_mask = ImmUncondBranch_mask;
   1031      break;
   1032    }
   1033    case CompareBranchType: {
   1034      branch_imm = Assembler::ImmCmpBranch(offset);
   1035      imm_mask = ImmCmpBranch_mask;
   1036      break;
   1037    }
   1038    case TestBranchType: {
   1039      branch_imm = Assembler::ImmTestBranch(offset);
   1040      imm_mask = ImmTestBranch_mask;
   1041      break;
   1042    }
   1043    default:
   1044      VIXL_UNREACHABLE();
   1045  }
   1046  SetInstructionBits(Mask(~imm_mask) | branch_imm);
   1047 }
   1048 
   1049 
   1050 void Instruction::SetImmLLiteral(const Instruction* source) {
   1051  VIXL_ASSERT(IsWordAligned(source));
   1052  ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
   1053  Instr imm = Assembler::ImmLLiteral(static_cast<int>(offset));
   1054  Instr mask = ImmLLiteral_mask;
   1055 
   1056  SetInstructionBits(Mask(~mask) | imm);
   1057 }
   1058 
   1059 
   1060 VectorFormat VectorFormatHalfWidth(VectorFormat vform) {
   1061  switch (vform) {
   1062    case kFormat8H:
   1063      return kFormat8B;
   1064    case kFormat4S:
   1065      return kFormat4H;
   1066    case kFormat2D:
   1067      return kFormat2S;
   1068    case kFormat1Q:
   1069      return kFormat1D;
   1070    case kFormatH:
   1071      return kFormatB;
   1072    case kFormatS:
   1073      return kFormatH;
   1074    case kFormatD:
   1075      return kFormatS;
   1076    case kFormatVnH:
   1077      return kFormatVnB;
   1078    case kFormatVnS:
   1079      return kFormatVnH;
   1080    case kFormatVnD:
   1081      return kFormatVnS;
   1082    case kFormatVnQ:
   1083      return kFormatVnD;
   1084    default:
   1085      VIXL_UNREACHABLE();
   1086      return kFormatUndefined;
   1087  }
   1088 }
   1089 
   1090 
   1091 VectorFormat VectorFormatDoubleWidth(VectorFormat vform) {
   1092  switch (vform) {
   1093    case kFormat8B:
   1094      return kFormat8H;
   1095    case kFormat4H:
   1096      return kFormat4S;
   1097    case kFormat2S:
   1098      return kFormat2D;
   1099    case kFormatB:
   1100      return kFormatH;
   1101    case kFormatH:
   1102      return kFormatS;
   1103    case kFormatS:
   1104      return kFormatD;
   1105    case kFormatVnB:
   1106      return kFormatVnH;
   1107    case kFormatVnH:
   1108      return kFormatVnS;
   1109    case kFormatVnS:
   1110      return kFormatVnD;
   1111    default:
   1112      VIXL_UNREACHABLE();
   1113      return kFormatUndefined;
   1114  }
   1115 }
   1116 
   1117 
   1118 VectorFormat VectorFormatFillQ(VectorFormat vform) {
   1119  switch (vform) {
   1120    case kFormatB:
   1121    case kFormat8B:
   1122    case kFormat16B:
   1123      return kFormat16B;
   1124    case kFormatH:
   1125    case kFormat4H:
   1126    case kFormat8H:
   1127      return kFormat8H;
   1128    case kFormatS:
   1129    case kFormat2S:
   1130    case kFormat4S:
   1131      return kFormat4S;
   1132    case kFormatD:
   1133    case kFormat1D:
   1134    case kFormat2D:
   1135      return kFormat2D;
   1136    default:
   1137      VIXL_UNREACHABLE();
   1138      return kFormatUndefined;
   1139  }
   1140 }
   1141 
   1142 VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform) {
   1143  switch (vform) {
   1144    case kFormat4H:
   1145      return kFormat8B;
   1146    case kFormat8H:
   1147      return kFormat16B;
   1148    case kFormat2S:
   1149      return kFormat4H;
   1150    case kFormat4S:
   1151      return kFormat8H;
   1152    case kFormat1D:
   1153      return kFormat2S;
   1154    case kFormat2D:
   1155      return kFormat4S;
   1156    case kFormat1Q:
   1157      return kFormat2D;
   1158    case kFormatVnH:
   1159      return kFormatVnB;
   1160    case kFormatVnS:
   1161      return kFormatVnH;
   1162    case kFormatVnD:
   1163      return kFormatVnS;
   1164    default:
   1165      VIXL_UNREACHABLE();
   1166      return kFormatUndefined;
   1167  }
   1168 }
   1169 
   1170 VectorFormat VectorFormatDoubleLanes(VectorFormat vform) {
   1171  VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
   1172  switch (vform) {
   1173    case kFormat8B:
   1174      return kFormat16B;
   1175    case kFormat4H:
   1176      return kFormat8H;
   1177    case kFormat2S:
   1178      return kFormat4S;
   1179    default:
   1180      VIXL_UNREACHABLE();
   1181      return kFormatUndefined;
   1182  }
   1183 }
   1184 
   1185 
   1186 VectorFormat VectorFormatHalfLanes(VectorFormat vform) {
   1187  VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
   1188  switch (vform) {
   1189    case kFormat16B:
   1190      return kFormat8B;
   1191    case kFormat8H:
   1192      return kFormat4H;
   1193    case kFormat4S:
   1194      return kFormat2S;
   1195    default:
   1196      VIXL_UNREACHABLE();
   1197      return kFormatUndefined;
   1198  }
   1199 }
   1200 
   1201 
   1202 VectorFormat ScalarFormatFromLaneSize(int lane_size_in_bits) {
   1203  switch (lane_size_in_bits) {
   1204    case 8:
   1205      return kFormatB;
   1206    case 16:
   1207      return kFormatH;
   1208    case 32:
   1209      return kFormatS;
   1210    case 64:
   1211      return kFormatD;
   1212    default:
   1213      VIXL_UNREACHABLE();
   1214      return kFormatUndefined;
   1215  }
   1216 }
   1217 
   1218 
   1219 bool IsSVEFormat(VectorFormat vform) {
   1220  switch (vform) {
   1221    case kFormatVnB:
   1222    case kFormatVnH:
   1223    case kFormatVnS:
   1224    case kFormatVnD:
   1225    case kFormatVnQ:
   1226    case kFormatVnO:
   1227      return true;
   1228    default:
   1229      return false;
   1230  }
   1231 }
   1232 
   1233 
   1234 VectorFormat SVEFormatFromLaneSizeInBytes(int lane_size_in_bytes) {
   1235  switch (lane_size_in_bytes) {
   1236    case 1:
   1237      return kFormatVnB;
   1238    case 2:
   1239      return kFormatVnH;
   1240    case 4:
   1241      return kFormatVnS;
   1242    case 8:
   1243      return kFormatVnD;
   1244    case 16:
   1245      return kFormatVnQ;
   1246    default:
   1247      VIXL_UNREACHABLE();
   1248      return kFormatUndefined;
   1249  }
   1250 }
   1251 
   1252 
   1253 VectorFormat SVEFormatFromLaneSizeInBits(int lane_size_in_bits) {
   1254  switch (lane_size_in_bits) {
   1255    case 8:
   1256    case 16:
   1257    case 32:
   1258    case 64:
   1259    case 128:
   1260      return SVEFormatFromLaneSizeInBytes(lane_size_in_bits / kBitsPerByte);
   1261    default:
   1262      VIXL_UNREACHABLE();
   1263      return kFormatUndefined;
   1264  }
   1265 }
   1266 
   1267 
   1268 VectorFormat SVEFormatFromLaneSizeInBytesLog2(int lane_size_in_bytes_log2) {
   1269  switch (lane_size_in_bytes_log2) {
   1270    case 0:
   1271    case 1:
   1272    case 2:
   1273    case 3:
   1274    case 4:
   1275      return SVEFormatFromLaneSizeInBytes(1 << lane_size_in_bytes_log2);
   1276    default:
   1277      VIXL_UNREACHABLE();
   1278      return kFormatUndefined;
   1279  }
   1280 }
   1281 
   1282 
   1283 VectorFormat ScalarFormatFromFormat(VectorFormat vform) {
   1284  return ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform));
   1285 }
   1286 
   1287 
   1288 unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
   1289  VIXL_ASSERT(vform != kFormatUndefined);
   1290  VIXL_ASSERT(!IsSVEFormat(vform));
   1291  switch (vform) {
   1292    case kFormatB:
   1293      return kBRegSize;
   1294    case kFormatH:
   1295      return kHRegSize;
   1296    case kFormatS:
   1297    case kFormat2H:
   1298      return kSRegSize;
   1299    case kFormatD:
   1300    case kFormat8B:
   1301    case kFormat4H:
   1302    case kFormat2S:
   1303    case kFormat1D:
   1304      return kDRegSize;
   1305    case kFormat16B:
   1306    case kFormat8H:
   1307    case kFormat4S:
   1308    case kFormat2D:
   1309    case kFormat1Q:
   1310      return kQRegSize;
   1311    default:
   1312      VIXL_UNREACHABLE();
   1313      return 0;
   1314  }
   1315 }
   1316 
   1317 
   1318 unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
   1319  return RegisterSizeInBitsFromFormat(vform) / 8;
   1320 }
   1321 
   1322 
   1323 unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
   1324  VIXL_ASSERT(vform != kFormatUndefined);
   1325  switch (vform) {
   1326    case kFormatB:
   1327    case kFormat8B:
   1328    case kFormat16B:
   1329    case kFormatVnB:
   1330      return 8;
   1331    case kFormatH:
   1332    case kFormat2H:
   1333    case kFormat4H:
   1334    case kFormat8H:
   1335    case kFormatVnH:
   1336      return 16;
   1337    case kFormatS:
   1338    case kFormat2S:
   1339    case kFormat4S:
   1340    case kFormatVnS:
   1341      return 32;
   1342    case kFormatD:
   1343    case kFormat1D:
   1344    case kFormat2D:
   1345    case kFormatVnD:
   1346      return 64;
   1347    case kFormat1Q:
   1348    case kFormatVnQ:
   1349      return 128;
   1350    case kFormatVnO:
   1351      return 256;
   1352    default:
   1353      VIXL_UNREACHABLE();
   1354      return 0;
   1355  }
   1356 }
   1357 
   1358 
   1359 int LaneSizeInBytesFromFormat(VectorFormat vform) {
   1360  return LaneSizeInBitsFromFormat(vform) / 8;
   1361 }
   1362 
   1363 
   1364 int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
   1365  VIXL_ASSERT(vform != kFormatUndefined);
   1366  switch (vform) {
   1367    case kFormatB:
   1368    case kFormat8B:
   1369    case kFormat16B:
   1370    case kFormatVnB:
   1371      return 0;
   1372    case kFormatH:
   1373    case kFormat2H:
   1374    case kFormat4H:
   1375    case kFormat8H:
   1376    case kFormatVnH:
   1377      return 1;
   1378    case kFormatS:
   1379    case kFormat2S:
   1380    case kFormat4S:
   1381    case kFormatVnS:
   1382      return 2;
   1383    case kFormatD:
   1384    case kFormat1D:
   1385    case kFormat2D:
   1386    case kFormatVnD:
   1387      return 3;
   1388    case kFormatVnQ:
   1389      return 4;
   1390    default:
   1391      VIXL_UNREACHABLE();
   1392      return 0;
   1393  }
   1394 }
   1395 
   1396 
   1397 int LaneCountFromFormat(VectorFormat vform) {
   1398  VIXL_ASSERT(vform != kFormatUndefined);
   1399  switch (vform) {
   1400    case kFormat16B:
   1401      return 16;
   1402    case kFormat8B:
   1403    case kFormat8H:
   1404      return 8;
   1405    case kFormat4H:
   1406    case kFormat4S:
   1407      return 4;
   1408    case kFormat2H:
   1409    case kFormat2S:
   1410    case kFormat2D:
   1411      return 2;
   1412    case kFormat1D:
   1413    case kFormat1Q:
   1414    case kFormatB:
   1415    case kFormatH:
   1416    case kFormatS:
   1417    case kFormatD:
   1418      return 1;
   1419    default:
   1420      VIXL_UNREACHABLE();
   1421      return 0;
   1422  }
   1423 }
   1424 
   1425 
   1426 int MaxLaneCountFromFormat(VectorFormat vform) {
   1427  VIXL_ASSERT(vform != kFormatUndefined);
   1428  switch (vform) {
   1429    case kFormatB:
   1430    case kFormat8B:
   1431    case kFormat16B:
   1432      return 16;
   1433    case kFormatH:
   1434    case kFormat4H:
   1435    case kFormat8H:
   1436      return 8;
   1437    case kFormatS:
   1438    case kFormat2S:
   1439    case kFormat4S:
   1440      return 4;
   1441    case kFormatD:
   1442    case kFormat1D:
   1443    case kFormat2D:
   1444      return 2;
   1445    default:
   1446      VIXL_UNREACHABLE();
   1447      return 0;
   1448  }
   1449 }
   1450 
   1451 
   1452 // Does 'vform' indicate a vector format or a scalar format?
   1453 bool IsVectorFormat(VectorFormat vform) {
   1454  VIXL_ASSERT(vform != kFormatUndefined);
   1455  switch (vform) {
   1456    case kFormatB:
   1457    case kFormatH:
   1458    case kFormatS:
   1459    case kFormatD:
   1460      return false;
   1461    default:
   1462      return true;
   1463  }
   1464 }
   1465 
   1466 
   1467 int64_t MaxIntFromFormat(VectorFormat vform) {
   1468  int lane_size = LaneSizeInBitsFromFormat(vform);
   1469  return static_cast<int64_t>(GetUintMask(lane_size) >> 1);
   1470 }
   1471 
   1472 
   1473 int64_t MinIntFromFormat(VectorFormat vform) {
   1474  return -MaxIntFromFormat(vform) - 1;
   1475 }
   1476 
   1477 
   1478 uint64_t MaxUintFromFormat(VectorFormat vform) {
   1479  return GetUintMask(LaneSizeInBitsFromFormat(vform));
   1480 }
   1481 }  // namespace vixl