tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

warp_plane_neon_i8mm.c (12808B)


      1 /*
      2 * Copyright (c) 2023, Alliance for Open Media. All rights reserved.
      3 *
      4 * This source code is subject to the terms of the BSD 2 Clause License and
      5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
      6 * was not distributed with this source code in the LICENSE file, you can
      7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
      8 * Media Patent License 1.0 was not distributed with this source code in the
      9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
     10 */
     11 
     12 #include "warp_plane_neon.h"
     13 
     14 DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = {
     15  0, 1, 2,  3,  1, 2,  3,  4,  2,  3,  4,  5,  3,  4,  5,  6,
     16  4, 5, 6,  7,  5, 6,  7,  8,  6,  7,  8,  9,  7,  8,  9,  10,
     17  8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14
     18 };
     19 
     20 static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in,
     21                                                           int sx, int alpha) {
     22  // Only put the constant in every other lane to avoid double-counting when
     23  // performing the pairwise add later.
     24  const int32x4_t add_const =
     25      vreinterpretq_s32_u64(vdupq_n_u64(1 << (8 + FILTER_BITS - 1)));
     26 
     27  // Loading the 8 filter taps
     28  int16x8_t f[4];
     29  load_filters_4(f, sx, alpha);
     30 
     31  int8x16_t f01_u8 = vcombine_s8(vmovn_s16(f[0]), vmovn_s16(f[1]));
     32  int8x16_t f23_u8 = vcombine_s8(vmovn_s16(f[2]), vmovn_s16(f[3]));
     33 
     34  uint8x8_t in0 = vget_low_u8(in);
     35  uint8x8_t in1 = vget_low_u8(vextq_u8(in, in, 1));
     36  uint8x8_t in2 = vget_low_u8(vextq_u8(in, in, 2));
     37  uint8x8_t in3 = vget_low_u8(vextq_u8(in, in, 3));
     38 
     39  int32x4_t m01 = vusdotq_s32(add_const, vcombine_u8(in0, in1), f01_u8);
     40  int32x4_t m23 = vusdotq_s32(add_const, vcombine_u8(in2, in3), f23_u8);
     41 
     42  int32x4_t m0123 = vpaddq_s32(m01, m23);
     43 
     44  uint16x8_t res =
     45      vcombine_u16(vqrshrun_n_s32(m0123, ROUND0_BITS), vdup_n_u16(0));
     46  return vreinterpretq_s16_u16(res);
     47 }
     48 
     49 static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in,
     50                                                           int sx, int alpha) {
     51  // Only put the constant in every other lane to avoid double-counting when
     52  // performing the pairwise add later.
     53  const int32x4_t add_const =
     54      vreinterpretq_s32_u64(vdupq_n_u64(1 << (8 + FILTER_BITS - 1)));
     55 
     56  // Loading the 8 filter taps
     57  int16x8_t f[8];
     58  load_filters_8(f, sx, alpha);
     59 
     60  int8x16_t f01_u8 = vcombine_s8(vmovn_s16(f[0]), vmovn_s16(f[1]));
     61  int8x16_t f23_u8 = vcombine_s8(vmovn_s16(f[2]), vmovn_s16(f[3]));
     62  int8x16_t f45_u8 = vcombine_s8(vmovn_s16(f[4]), vmovn_s16(f[5]));
     63  int8x16_t f67_u8 = vcombine_s8(vmovn_s16(f[6]), vmovn_s16(f[7]));
     64 
     65  uint8x8_t in0 = vget_low_u8(in);
     66  uint8x8_t in1 = vget_low_u8(vextq_u8(in, in, 1));
     67  uint8x8_t in2 = vget_low_u8(vextq_u8(in, in, 2));
     68  uint8x8_t in3 = vget_low_u8(vextq_u8(in, in, 3));
     69  uint8x8_t in4 = vget_low_u8(vextq_u8(in, in, 4));
     70  uint8x8_t in5 = vget_low_u8(vextq_u8(in, in, 5));
     71  uint8x8_t in6 = vget_low_u8(vextq_u8(in, in, 6));
     72  uint8x8_t in7 = vget_low_u8(vextq_u8(in, in, 7));
     73 
     74  int32x4_t m01 = vusdotq_s32(add_const, vcombine_u8(in0, in1), f01_u8);
     75  int32x4_t m23 = vusdotq_s32(add_const, vcombine_u8(in2, in3), f23_u8);
     76  int32x4_t m45 = vusdotq_s32(add_const, vcombine_u8(in4, in5), f45_u8);
     77  int32x4_t m67 = vusdotq_s32(add_const, vcombine_u8(in6, in7), f67_u8);
     78 
     79  int32x4_t m0123 = vpaddq_s32(m01, m23);
     80  int32x4_t m4567 = vpaddq_s32(m45, m67);
     81 
     82  uint16x8_t res = vcombine_u16(vqrshrun_n_s32(m0123, ROUND0_BITS),
     83                                vqrshrun_n_s32(m4567, ROUND0_BITS));
     84  return vreinterpretq_s16_u16(res);
     85 }
     86 
     87 static AOM_FORCE_INLINE int16x8_t
     88 horizontal_filter_4x1_f1_beta0(const uint8x16_t in, int16x8_t f_s16) {
     89  const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
     90 
     91  int8x16_t f_s8 = vcombine_s8(vmovn_s16(f_s16), vmovn_s16(f_s16));
     92 
     93  uint8x16_t perm0 = vld1q_u8(&usdot_permute_idx[0]);
     94  uint8x16_t perm1 = vld1q_u8(&usdot_permute_idx[16]);
     95 
     96  // Permute samples ready for dot product.
     97  // { 0,  1,  2,  3,  1,  2,  3,  4,  2,  3,  4,  5,  3,  4,  5,  6 }
     98  // { 4,  5,  6,  7,  5,  6,  7,  8,  6,  7,  8,  9,  7,  8,  9, 10 }
     99  uint8x16_t in_0123 = vqtbl1q_u8(in, perm0);
    100  uint8x16_t in_4567 = vqtbl1q_u8(in, perm1);
    101 
    102  int32x4_t m0123 = vusdotq_laneq_s32(add_const, in_0123, f_s8, 0);
    103  m0123 = vusdotq_laneq_s32(m0123, in_4567, f_s8, 1);
    104 
    105  uint16x8_t res =
    106      vcombine_u16(vqrshrun_n_s32(m0123, ROUND0_BITS), vdup_n_u16(0));
    107  return vreinterpretq_s16_u16(res);
    108 }
    109 
    110 static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in,
    111                                                           int sx) {
    112  int16x8_t f_s16 = vld1q_s16(av1_warped_filter[sx >> WARPEDDIFF_PREC_BITS]);
    113  return horizontal_filter_4x1_f1_beta0(in, f_s16);
    114 }
    115 
    116 static AOM_FORCE_INLINE int16x8_t
    117 horizontal_filter_8x1_f1_beta0(const uint8x16_t in, int16x8_t f_s16) {
    118  const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
    119 
    120  int8x16_t f_s8 = vcombine_s8(vmovn_s16(f_s16), vmovn_s16(f_s16));
    121 
    122  uint8x16_t perm0 = vld1q_u8(&usdot_permute_idx[0]);
    123  uint8x16_t perm1 = vld1q_u8(&usdot_permute_idx[16]);
    124  uint8x16_t perm2 = vld1q_u8(&usdot_permute_idx[32]);
    125 
    126  // Permute samples ready for dot product.
    127  // { 0,  1,  2,  3,  1,  2,  3,  4,  2,  3,  4,  5,  3,  4,  5,  6 }
    128  // { 4,  5,  6,  7,  5,  6,  7,  8,  6,  7,  8,  9,  7,  8,  9, 10 }
    129  // { 8,  9, 10, 11,  9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 }
    130  uint8x16_t in_0123 = vqtbl1q_u8(in, perm0);
    131  uint8x16_t in_4567 = vqtbl1q_u8(in, perm1);
    132  uint8x16_t in_89ab = vqtbl1q_u8(in, perm2);
    133 
    134  int32x4_t m0123 = vusdotq_laneq_s32(add_const, in_0123, f_s8, 0);
    135  m0123 = vusdotq_laneq_s32(m0123, in_4567, f_s8, 1);
    136 
    137  int32x4_t m4567 = vusdotq_laneq_s32(add_const, in_4567, f_s8, 0);
    138  m4567 = vusdotq_laneq_s32(m4567, in_89ab, f_s8, 1);
    139 
    140  uint16x8_t res = vcombine_u16(vqrshrun_n_s32(m0123, ROUND0_BITS),
    141                                vqrshrun_n_s32(m4567, ROUND0_BITS));
    142  return vreinterpretq_s16_u16(res);
    143 }
    144 
    145 static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in,
    146                                                           int sx) {
    147  int16x8_t f_s16 = vld1q_s16(av1_warped_filter[sx >> WARPEDDIFF_PREC_BITS]);
    148  return horizontal_filter_8x1_f1_beta0(in, f_s16);
    149 }
    150 
    151 static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src,
    152                                                    int32x4_t *res, int sy) {
    153  int16x4_t s0 = vget_low_s16(src[0]);
    154  int16x4_t s1 = vget_low_s16(src[1]);
    155  int16x4_t s2 = vget_low_s16(src[2]);
    156  int16x4_t s3 = vget_low_s16(src[3]);
    157  int16x4_t s4 = vget_low_s16(src[4]);
    158  int16x4_t s5 = vget_low_s16(src[5]);
    159  int16x4_t s6 = vget_low_s16(src[6]);
    160  int16x4_t s7 = vget_low_s16(src[7]);
    161 
    162  int16x8_t f = vld1q_s16(av1_warped_filter[sy >> WARPEDDIFF_PREC_BITS]);
    163 
    164  int32x4_t m0123 = vmull_lane_s16(s0, vget_low_s16(f), 0);
    165  m0123 = vmlal_lane_s16(m0123, s1, vget_low_s16(f), 1);
    166  m0123 = vmlal_lane_s16(m0123, s2, vget_low_s16(f), 2);
    167  m0123 = vmlal_lane_s16(m0123, s3, vget_low_s16(f), 3);
    168  m0123 = vmlal_lane_s16(m0123, s4, vget_high_s16(f), 0);
    169  m0123 = vmlal_lane_s16(m0123, s5, vget_high_s16(f), 1);
    170  m0123 = vmlal_lane_s16(m0123, s6, vget_high_s16(f), 2);
    171  m0123 = vmlal_lane_s16(m0123, s7, vget_high_s16(f), 3);
    172 
    173  *res = m0123;
    174 }
    175 
    176 static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src,
    177                                                    int32x4_t *res, int sy,
    178                                                    int gamma) {
    179  int16x8_t s0, s1, s2, s3;
    180  transpose_elems_s16_4x8(
    181      vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]),
    182      vget_low_s16(src[3]), vget_low_s16(src[4]), vget_low_s16(src[5]),
    183      vget_low_s16(src[6]), vget_low_s16(src[7]), &s0, &s1, &s2, &s3);
    184 
    185  int16x8_t f[4];
    186  load_filters_4(f, sy, gamma);
    187 
    188  int32x4_t m0 = vmull_s16(vget_low_s16(s0), vget_low_s16(f[0]));
    189  m0 = vmlal_s16(m0, vget_high_s16(s0), vget_high_s16(f[0]));
    190  int32x4_t m1 = vmull_s16(vget_low_s16(s1), vget_low_s16(f[1]));
    191  m1 = vmlal_s16(m1, vget_high_s16(s1), vget_high_s16(f[1]));
    192  int32x4_t m2 = vmull_s16(vget_low_s16(s2), vget_low_s16(f[2]));
    193  m2 = vmlal_s16(m2, vget_high_s16(s2), vget_high_s16(f[2]));
    194  int32x4_t m3 = vmull_s16(vget_low_s16(s3), vget_low_s16(f[3]));
    195  m3 = vmlal_s16(m3, vget_high_s16(s3), vget_high_s16(f[3]));
    196 
    197  int32x4_t m0123_pairs[] = { m0, m1, m2, m3 };
    198 
    199  *res = horizontal_add_4d_s32x4(m0123_pairs);
    200 }
    201 
    202 static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
    203                                                    int32x4_t *res_low,
    204                                                    int32x4_t *res_high,
    205                                                    int sy) {
    206  int16x8_t s0 = src[0];
    207  int16x8_t s1 = src[1];
    208  int16x8_t s2 = src[2];
    209  int16x8_t s3 = src[3];
    210  int16x8_t s4 = src[4];
    211  int16x8_t s5 = src[5];
    212  int16x8_t s6 = src[6];
    213  int16x8_t s7 = src[7];
    214 
    215  int16x8_t f = vld1q_s16(av1_warped_filter[sy >> WARPEDDIFF_PREC_BITS]);
    216 
    217  int32x4_t m0123 = vmull_lane_s16(vget_low_s16(s0), vget_low_s16(f), 0);
    218  m0123 = vmlal_lane_s16(m0123, vget_low_s16(s1), vget_low_s16(f), 1);
    219  m0123 = vmlal_lane_s16(m0123, vget_low_s16(s2), vget_low_s16(f), 2);
    220  m0123 = vmlal_lane_s16(m0123, vget_low_s16(s3), vget_low_s16(f), 3);
    221  m0123 = vmlal_lane_s16(m0123, vget_low_s16(s4), vget_high_s16(f), 0);
    222  m0123 = vmlal_lane_s16(m0123, vget_low_s16(s5), vget_high_s16(f), 1);
    223  m0123 = vmlal_lane_s16(m0123, vget_low_s16(s6), vget_high_s16(f), 2);
    224  m0123 = vmlal_lane_s16(m0123, vget_low_s16(s7), vget_high_s16(f), 3);
    225 
    226  int32x4_t m4567 = vmull_lane_s16(vget_high_s16(s0), vget_low_s16(f), 0);
    227  m4567 = vmlal_lane_s16(m4567, vget_high_s16(s1), vget_low_s16(f), 1);
    228  m4567 = vmlal_lane_s16(m4567, vget_high_s16(s2), vget_low_s16(f), 2);
    229  m4567 = vmlal_lane_s16(m4567, vget_high_s16(s3), vget_low_s16(f), 3);
    230  m4567 = vmlal_lane_s16(m4567, vget_high_s16(s4), vget_high_s16(f), 0);
    231  m4567 = vmlal_lane_s16(m4567, vget_high_s16(s5), vget_high_s16(f), 1);
    232  m4567 = vmlal_lane_s16(m4567, vget_high_s16(s6), vget_high_s16(f), 2);
    233  m4567 = vmlal_lane_s16(m4567, vget_high_s16(s7), vget_high_s16(f), 3);
    234 
    235  *res_low = m0123;
    236  *res_high = m4567;
    237 }
    238 
    239 static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
    240                                                    int32x4_t *res_low,
    241                                                    int32x4_t *res_high, int sy,
    242                                                    int gamma) {
    243  int16x8_t s0 = src[0];
    244  int16x8_t s1 = src[1];
    245  int16x8_t s2 = src[2];
    246  int16x8_t s3 = src[3];
    247  int16x8_t s4 = src[4];
    248  int16x8_t s5 = src[5];
    249  int16x8_t s6 = src[6];
    250  int16x8_t s7 = src[7];
    251  transpose_elems_inplace_s16_8x8(&s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
    252 
    253  int16x8_t f[8];
    254  load_filters_8(f, sy, gamma);
    255 
    256  int32x4_t m0 = vmull_s16(vget_low_s16(s0), vget_low_s16(f[0]));
    257  m0 = vmlal_s16(m0, vget_high_s16(s0), vget_high_s16(f[0]));
    258  int32x4_t m1 = vmull_s16(vget_low_s16(s1), vget_low_s16(f[1]));
    259  m1 = vmlal_s16(m1, vget_high_s16(s1), vget_high_s16(f[1]));
    260  int32x4_t m2 = vmull_s16(vget_low_s16(s2), vget_low_s16(f[2]));
    261  m2 = vmlal_s16(m2, vget_high_s16(s2), vget_high_s16(f[2]));
    262  int32x4_t m3 = vmull_s16(vget_low_s16(s3), vget_low_s16(f[3]));
    263  m3 = vmlal_s16(m3, vget_high_s16(s3), vget_high_s16(f[3]));
    264  int32x4_t m4 = vmull_s16(vget_low_s16(s4), vget_low_s16(f[4]));
    265  m4 = vmlal_s16(m4, vget_high_s16(s4), vget_high_s16(f[4]));
    266  int32x4_t m5 = vmull_s16(vget_low_s16(s5), vget_low_s16(f[5]));
    267  m5 = vmlal_s16(m5, vget_high_s16(s5), vget_high_s16(f[5]));
    268  int32x4_t m6 = vmull_s16(vget_low_s16(s6), vget_low_s16(f[6]));
    269  m6 = vmlal_s16(m6, vget_high_s16(s6), vget_high_s16(f[6]));
    270  int32x4_t m7 = vmull_s16(vget_low_s16(s7), vget_low_s16(f[7]));
    271  m7 = vmlal_s16(m7, vget_high_s16(s7), vget_high_s16(f[7]));
    272 
    273  int32x4_t m0123_pairs[] = { m0, m1, m2, m3 };
    274  int32x4_t m4567_pairs[] = { m4, m5, m6, m7 };
    275 
    276  *res_low = horizontal_add_4d_s32x4(m0123_pairs);
    277  *res_high = horizontal_add_4d_s32x4(m4567_pairs);
    278 }
    279 
    280 void av1_warp_affine_neon_i8mm(const int32_t *mat, const uint8_t *ref,
    281                               int width, int height, int stride, uint8_t *pred,
    282                               int p_col, int p_row, int p_width, int p_height,
    283                               int p_stride, int subsampling_x,
    284                               int subsampling_y, ConvolveParams *conv_params,
    285                               int16_t alpha, int16_t beta, int16_t gamma,
    286                               int16_t delta) {
    287  av1_warp_affine_common(mat, ref, width, height, stride, pred, p_col, p_row,
    288                         p_width, p_height, p_stride, subsampling_x,
    289                         subsampling_y, conv_params, alpha, beta, gamma, delta);
    290 }