tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

highbd_warp_plane_neon.h (23947B)


      1 /*
      2 * Copyright (c) 2023, Alliance for Open Media. All rights reserved.
      3 *
      4 * This source code is subject to the terms of the BSD 2 Clause License and
      5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
      6 * was not distributed with this source code in the LICENSE file, you can
      7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
      8 * Media Patent License 1.0 was not distributed with this source code in the
      9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
     10 */
     11 #ifndef AOM_AV1_COMMON_ARM_HIGHBD_WARP_PLANE_NEON_H_
     12 #define AOM_AV1_COMMON_ARM_HIGHBD_WARP_PLANE_NEON_H_
     13 
     14 #include <arm_neon.h>
     15 #include <assert.h>
     16 #include <stdbool.h>
     17 
     18 #include "aom_dsp/aom_dsp_common.h"
     19 #include "aom_dsp/arm/mem_neon.h"
     20 #include "aom_dsp/arm/sum_neon.h"
     21 #include "aom_ports/mem.h"
     22 #include "av1/common/scale.h"
     23 #include "av1/common/warped_motion.h"
     24 #include "config/av1_rtcd.h"
     25 
     26 static AOM_FORCE_INLINE int16x8_t
     27 highbd_horizontal_filter_4x1_f4(int16x8_t rv0, int16x8_t rv1, int16x8_t rv2,
     28                                int16x8_t rv3, int bd, int sx, int alpha);
     29 
     30 static AOM_FORCE_INLINE int16x8_t highbd_horizontal_filter_8x1_f8(
     31    int16x8_t rv0, int16x8_t rv1, int16x8_t rv2, int16x8_t rv3, int16x8_t rv4,
     32    int16x8_t rv5, int16x8_t rv6, int16x8_t rv7, int bd, int sx, int alpha);
     33 
     34 static AOM_FORCE_INLINE int16x8_t highbd_horizontal_filter_4x1_f1(
     35    int16x8_t rv0, int16x8_t rv1, int16x8_t rv2, int16x8_t rv3, int bd, int sx);
     36 
     37 static AOM_FORCE_INLINE int16x8_t highbd_horizontal_filter_8x1_f1(
     38    int16x8_t rv0, int16x8_t rv1, int16x8_t rv2, int16x8_t rv3, int16x8_t rv4,
     39    int16x8_t rv5, int16x8_t rv6, int16x8_t rv7, int bd, int sx);
     40 
     41 static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp,
     42                                                         int sy);
     43 
     44 static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp,
     45                                                           int sy);
     46 
     47 static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp,
     48                                                         int sy, int gamma);
     49 
     50 static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp,
     51                                                           int sy, int gamma);
     52 
     53 static AOM_FORCE_INLINE int16x8_t load_filters_1(int ofs) {
     54  const int ofs0 = ROUND_POWER_OF_TWO(ofs, WARPEDDIFF_PREC_BITS);
     55 
     56  const int16_t *base = av1_warped_filter[WARPEDPIXEL_PREC_SHIFTS];
     57  return vld1q_s16(base + ofs0 * 8);
     58 }
     59 
     60 static AOM_FORCE_INLINE void load_filters_4(int16x8_t out[], int ofs,
     61                                            int stride) {
     62  const int ofs0 = ROUND_POWER_OF_TWO(ofs + stride * 0, WARPEDDIFF_PREC_BITS);
     63  const int ofs1 = ROUND_POWER_OF_TWO(ofs + stride * 1, WARPEDDIFF_PREC_BITS);
     64  const int ofs2 = ROUND_POWER_OF_TWO(ofs + stride * 2, WARPEDDIFF_PREC_BITS);
     65  const int ofs3 = ROUND_POWER_OF_TWO(ofs + stride * 3, WARPEDDIFF_PREC_BITS);
     66 
     67  const int16_t *base = av1_warped_filter[WARPEDPIXEL_PREC_SHIFTS];
     68  out[0] = vld1q_s16(base + ofs0 * 8);
     69  out[1] = vld1q_s16(base + ofs1 * 8);
     70  out[2] = vld1q_s16(base + ofs2 * 8);
     71  out[3] = vld1q_s16(base + ofs3 * 8);
     72 }
     73 
     74 static AOM_FORCE_INLINE void load_filters_8(int16x8_t out[], int ofs,
     75                                            int stride) {
     76  const int ofs0 = ROUND_POWER_OF_TWO(ofs + stride * 0, WARPEDDIFF_PREC_BITS);
     77  const int ofs1 = ROUND_POWER_OF_TWO(ofs + stride * 1, WARPEDDIFF_PREC_BITS);
     78  const int ofs2 = ROUND_POWER_OF_TWO(ofs + stride * 2, WARPEDDIFF_PREC_BITS);
     79  const int ofs3 = ROUND_POWER_OF_TWO(ofs + stride * 3, WARPEDDIFF_PREC_BITS);
     80  const int ofs4 = ROUND_POWER_OF_TWO(ofs + stride * 4, WARPEDDIFF_PREC_BITS);
     81  const int ofs5 = ROUND_POWER_OF_TWO(ofs + stride * 5, WARPEDDIFF_PREC_BITS);
     82  const int ofs6 = ROUND_POWER_OF_TWO(ofs + stride * 6, WARPEDDIFF_PREC_BITS);
     83  const int ofs7 = ROUND_POWER_OF_TWO(ofs + stride * 7, WARPEDDIFF_PREC_BITS);
     84 
     85  const int16_t *base = av1_warped_filter[WARPEDPIXEL_PREC_SHIFTS];
     86  out[0] = vld1q_s16(base + ofs0 * 8);
     87  out[1] = vld1q_s16(base + ofs1 * 8);
     88  out[2] = vld1q_s16(base + ofs2 * 8);
     89  out[3] = vld1q_s16(base + ofs3 * 8);
     90  out[4] = vld1q_s16(base + ofs4 * 8);
     91  out[5] = vld1q_s16(base + ofs5 * 8);
     92  out[6] = vld1q_s16(base + ofs6 * 8);
     93  out[7] = vld1q_s16(base + ofs7 * 8);
     94 }
     95 
     96 static AOM_FORCE_INLINE uint16x4_t clip_pixel_highbd_vec(int32x4_t val,
     97                                                         int bd) {
     98  const int limit = (1 << bd) - 1;
     99  return vqmovun_s32(vminq_s32(val, vdupq_n_s32(limit)));
    100 }
    101 
    102 static AOM_FORCE_INLINE uint16x8x2_t clamp_horizontal(
    103    uint16x8x2_t src_1, int out_of_boundary_left, int out_of_boundary_right,
    104    const uint16_t *ref, int iy, int stride, int width, const uint16x8_t indx0,
    105    const uint16x8_t indx1) {
    106  if (out_of_boundary_left >= 0) {
    107    uint16x8_t cmp_vec = vdupq_n_u16(out_of_boundary_left);
    108    uint16x8_t vec_dup = vdupq_n_u16(ref[iy * stride]);
    109    uint16x8_t mask0 = vcleq_u16(indx0, cmp_vec);
    110    uint16x8_t mask1 = vcleq_u16(indx1, cmp_vec);
    111    src_1.val[0] = vbslq_u16(mask0, vec_dup, src_1.val[0]);
    112    src_1.val[1] = vbslq_u16(mask1, vec_dup, src_1.val[1]);
    113  }
    114  if (out_of_boundary_right >= 0) {
    115    uint16x8_t cmp_vec = vdupq_n_u16(15 - out_of_boundary_right);
    116    uint16x8_t vec_dup = vdupq_n_u16(ref[iy * stride + width - 1]);
    117    uint16x8_t mask0 = vcgeq_u16(indx0, cmp_vec);
    118    uint16x8_t mask1 = vcgeq_u16(indx1, cmp_vec);
    119    src_1.val[0] = vbslq_u16(mask0, vec_dup, src_1.val[0]);
    120    src_1.val[1] = vbslq_u16(mask1, vec_dup, src_1.val[1]);
    121  }
    122  return src_1;
    123 }
    124 
    125 static AOM_FORCE_INLINE void warp_affine_horizontal(const uint16_t *ref,
    126                                                    int width, int height,
    127                                                    int stride, int p_width,
    128                                                    int16_t alpha, int16_t beta,
    129                                                    int iy4, int sx4, int ix4,
    130                                                    int16x8_t tmp[], int bd) {
    131  const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS;
    132 
    133  if (ix4 <= -7) {
    134    for (int k = 0; k < 15; ++k) {
    135      int iy = clamp(iy4 + k - 7, 0, height - 1);
    136      int32_t dup_val = (1 << (bd + FILTER_BITS - round0 - 1)) +
    137                        ref[iy * stride] * (1 << (FILTER_BITS - round0));
    138      tmp[k] = vdupq_n_s16(dup_val);
    139    }
    140    return;
    141  } else if (ix4 >= width + 6) {
    142    for (int k = 0; k < 15; ++k) {
    143      int iy = clamp(iy4 + k - 7, 0, height - 1);
    144      int32_t dup_val =
    145          (1 << (bd + FILTER_BITS - round0 - 1)) +
    146          ref[iy * stride + (width - 1)] * (1 << (FILTER_BITS - round0));
    147      tmp[k] = vdupq_n_s16(dup_val);
    148    }
    149    return;
    150  }
    151 
    152  static const uint16_t kIotaArr[] = { 0, 1, 2,  3,  4,  5,  6,  7,
    153                                       8, 9, 10, 11, 12, 13, 14, 15 };
    154  const uint16x8_t indx0 = vld1q_u16(kIotaArr);
    155  const uint16x8_t indx1 = vld1q_u16(kIotaArr + 8);
    156 
    157  const int out_of_boundary_left = -(ix4 - 6);
    158  const int out_of_boundary_right = (ix4 + 8) - width;
    159 
    160 #define APPLY_HORIZONTAL_SHIFT_4X1(fn, ...)                                \
    161  do {                                                                     \
    162    if (out_of_boundary_left >= 0 || out_of_boundary_right >= 0) {         \
    163      for (int k = 0; k < 15; ++k) {                                       \
    164        const int iy = clamp(iy4 + k - 7, 0, height - 1);                  \
    165        const uint16_t *idx = ref + iy * stride + ix4 - 7;                 \
    166        /* We don't use vld1q_u16_x2 here as LLVM generates an incorrect   \
    167         * alignment hint for this intrinsic that causes a SIGBUS on Armv7 \
    168         * targets when alignment checks are enabled.                      \
    169         * (See bug: b/349455146) */                                       \
    170        uint16x8x2_t src_1 = { { vld1q_u16(idx), vld1q_u16(idx + 8) } };   \
    171        src_1 = clamp_horizontal(src_1, out_of_boundary_left,              \
    172                                 out_of_boundary_right, ref, iy, stride,   \
    173                                 width, indx0, indx1);                     \
    174        int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(src_1.val[0]),     \
    175                                  vreinterpretq_s16_u16(src_1.val[1]), 0); \
    176        int16x8_t rv1 = vextq_s16(vreinterpretq_s16_u16(src_1.val[0]),     \
    177                                  vreinterpretq_s16_u16(src_1.val[1]), 1); \
    178        int16x8_t rv2 = vextq_s16(vreinterpretq_s16_u16(src_1.val[0]),     \
    179                                  vreinterpretq_s16_u16(src_1.val[1]), 2); \
    180        int16x8_t rv3 = vextq_s16(vreinterpretq_s16_u16(src_1.val[0]),     \
    181                                  vreinterpretq_s16_u16(src_1.val[1]), 3); \
    182        tmp[k] = (fn)(rv0, rv1, rv2, rv3, __VA_ARGS__);                    \
    183      }                                                                    \
    184    } else {                                                               \
    185      for (int k = 0; k < 15; ++k) {                                       \
    186        const int iy = clamp(iy4 + k - 7, 0, height - 1);                  \
    187        const uint16_t *src = ref + iy * stride + ix4;                     \
    188        int16x8_t rv0 = vreinterpretq_s16_u16(vld1q_u16(src - 7));         \
    189        int16x8_t rv1 = vreinterpretq_s16_u16(vld1q_u16(src - 6));         \
    190        int16x8_t rv2 = vreinterpretq_s16_u16(vld1q_u16(src - 5));         \
    191        int16x8_t rv3 = vreinterpretq_s16_u16(vld1q_u16(src - 4));         \
    192        tmp[k] = (fn)(rv0, rv1, rv2, rv3, __VA_ARGS__);                    \
    193      }                                                                    \
    194    }                                                                      \
    195  } while (0)
    196 
    197 #define APPLY_HORIZONTAL_SHIFT_8X1(fn, ...)                                 \
    198  do {                                                                      \
    199    if (out_of_boundary_left >= 0 || out_of_boundary_right >= 0) {          \
    200      for (int k = 0; k < 15; ++k) {                                        \
    201        const int iy = clamp(iy4 + k - 7, 0, height - 1);                   \
    202        const uint16_t *idx = ref + iy * stride + ix4 - 7;                  \
    203        /* We don't use vld1q_u16_x2 here as LLVM generates an incorrect    \
    204         * alignment hint for this intrinsic that causes a SIGBUS on Armv7  \
    205         * targets when alignment checks are enabled.                       \
    206         * (See bug: b/349455146) */                                        \
    207        uint16x8x2_t src_1 = { { vld1q_u16(idx), vld1q_u16(idx + 8) } };    \
    208        src_1 = clamp_horizontal(src_1, out_of_boundary_left,               \
    209                                 out_of_boundary_right, ref, iy, stride,    \
    210                                 width, indx0, indx1);                      \
    211        int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(src_1.val[0]),      \
    212                                  vreinterpretq_s16_u16(src_1.val[1]), 0);  \
    213        int16x8_t rv1 = vextq_s16(vreinterpretq_s16_u16(src_1.val[0]),      \
    214                                  vreinterpretq_s16_u16(src_1.val[1]), 1);  \
    215        int16x8_t rv2 = vextq_s16(vreinterpretq_s16_u16(src_1.val[0]),      \
    216                                  vreinterpretq_s16_u16(src_1.val[1]), 2);  \
    217        int16x8_t rv3 = vextq_s16(vreinterpretq_s16_u16(src_1.val[0]),      \
    218                                  vreinterpretq_s16_u16(src_1.val[1]), 3);  \
    219        int16x8_t rv4 = vextq_s16(vreinterpretq_s16_u16(src_1.val[0]),      \
    220                                  vreinterpretq_s16_u16(src_1.val[1]), 4);  \
    221        int16x8_t rv5 = vextq_s16(vreinterpretq_s16_u16(src_1.val[0]),      \
    222                                  vreinterpretq_s16_u16(src_1.val[1]), 5);  \
    223        int16x8_t rv6 = vextq_s16(vreinterpretq_s16_u16(src_1.val[0]),      \
    224                                  vreinterpretq_s16_u16(src_1.val[1]), 6);  \
    225        int16x8_t rv7 = vextq_s16(vreinterpretq_s16_u16(src_1.val[0]),      \
    226                                  vreinterpretq_s16_u16(src_1.val[1]), 7);  \
    227        tmp[k] = (fn)(rv0, rv1, rv2, rv3, rv4, rv5, rv6, rv7, __VA_ARGS__); \
    228      }                                                                     \
    229    } else {                                                                \
    230      for (int k = 0; k < 15; ++k) {                                        \
    231        const int iy = clamp(iy4 + k - 7, 0, height - 1);                   \
    232        const uint16_t *src = ref + iy * stride + ix4;                      \
    233        int16x8_t rv0 = vreinterpretq_s16_u16(vld1q_u16(src - 7));          \
    234        int16x8_t rv1 = vreinterpretq_s16_u16(vld1q_u16(src - 6));          \
    235        int16x8_t rv2 = vreinterpretq_s16_u16(vld1q_u16(src - 5));          \
    236        int16x8_t rv3 = vreinterpretq_s16_u16(vld1q_u16(src - 4));          \
    237        int16x8_t rv4 = vreinterpretq_s16_u16(vld1q_u16(src - 3));          \
    238        int16x8_t rv5 = vreinterpretq_s16_u16(vld1q_u16(src - 2));          \
    239        int16x8_t rv6 = vreinterpretq_s16_u16(vld1q_u16(src - 1));          \
    240        int16x8_t rv7 = vreinterpretq_s16_u16(vld1q_u16(src - 0));          \
    241        tmp[k] = (fn)(rv0, rv1, rv2, rv3, rv4, rv5, rv6, rv7, __VA_ARGS__); \
    242      }                                                                     \
    243    }                                                                       \
    244  } while (0)
    245 
    246  if (p_width == 4) {
    247    if (beta == 0) {
    248      if (alpha == 0) {
    249        APPLY_HORIZONTAL_SHIFT_4X1(highbd_horizontal_filter_4x1_f1, bd, sx4);
    250      } else {
    251        APPLY_HORIZONTAL_SHIFT_4X1(highbd_horizontal_filter_4x1_f4, bd, sx4,
    252                                   alpha);
    253      }
    254    } else {
    255      if (alpha == 0) {
    256        APPLY_HORIZONTAL_SHIFT_4X1(highbd_horizontal_filter_4x1_f1, bd,
    257                                   (sx4 + beta * (k - 3)));
    258      } else {
    259        APPLY_HORIZONTAL_SHIFT_4X1(highbd_horizontal_filter_4x1_f4, bd,
    260                                   (sx4 + beta * (k - 3)), alpha);
    261      }
    262    }
    263  } else {
    264    if (beta == 0) {
    265      if (alpha == 0) {
    266        APPLY_HORIZONTAL_SHIFT_8X1(highbd_horizontal_filter_8x1_f1, bd, sx4);
    267      } else {
    268        APPLY_HORIZONTAL_SHIFT_8X1(highbd_horizontal_filter_8x1_f8, bd, sx4,
    269                                   alpha);
    270      }
    271    } else {
    272      if (alpha == 0) {
    273        APPLY_HORIZONTAL_SHIFT_8X1(highbd_horizontal_filter_8x1_f1, bd,
    274                                   (sx4 + beta * (k - 3)));
    275      } else {
    276        APPLY_HORIZONTAL_SHIFT_8X1(highbd_horizontal_filter_8x1_f8, bd,
    277                                   (sx4 + beta * (k - 3)), alpha);
    278      }
    279    }
    280  }
    281 
    282 #undef APPLY_HORIZONTAL_SHIFT_4X1
    283 #undef APPLY_HORIZONTAL_SHIFT_8X1
    284 }
    285 
    286 static AOM_FORCE_INLINE void highbd_vertical_filter_4x1_f4(
    287    uint16_t *pred, int p_stride, int bd, uint16_t *dst, int dst_stride,
    288    bool is_compound, bool do_average, bool use_dist_wtd_comp_avg, int fwd,
    289    int bwd, int16_t gamma, const int16x8_t *tmp, int i, int sy, int j) {
    290  int32x4_t sum0 = gamma == 0 ? vertical_filter_4x1_f1(tmp, sy)
    291                              : vertical_filter_4x1_f4(tmp, sy, gamma);
    292 
    293  const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS;
    294  const int offset_bits_vert = bd + 2 * FILTER_BITS - round0;
    295 
    296  sum0 = vaddq_s32(sum0, vdupq_n_s32(1 << offset_bits_vert));
    297 
    298  uint16_t *dst16 = &pred[i * p_stride + j];
    299 
    300  if (!is_compound) {
    301    const int reduce_bits_vert = 2 * FILTER_BITS - round0;
    302    sum0 = vrshlq_s32(sum0, vdupq_n_s32(-reduce_bits_vert));
    303 
    304    const int res_sub_const = (1 << (bd - 1)) + (1 << bd);
    305    sum0 = vsubq_s32(sum0, vdupq_n_s32(res_sub_const));
    306    uint16x4_t res0 = clip_pixel_highbd_vec(sum0, bd);
    307    vst1_u16(dst16, res0);
    308    return;
    309  }
    310 
    311  sum0 = vrshrq_n_s32(sum0, COMPOUND_ROUND1_BITS);
    312 
    313  uint16_t *p = &dst[i * dst_stride + j];
    314 
    315  if (!do_average) {
    316    vst1_u16(p, vqmovun_s32(sum0));
    317    return;
    318  }
    319 
    320  uint16x4_t p0 = vld1_u16(p);
    321  int32x4_t p_vec0 = vreinterpretq_s32_u32(vmovl_u16(p0));
    322  if (use_dist_wtd_comp_avg) {
    323    p_vec0 = vmulq_n_s32(p_vec0, fwd);
    324    p_vec0 = vmlaq_n_s32(p_vec0, sum0, bwd);
    325    p_vec0 = vshrq_n_s32(p_vec0, DIST_PRECISION_BITS);
    326  } else {
    327    p_vec0 = vhaddq_s32(p_vec0, sum0);
    328  }
    329 
    330  const int offset_bits = bd + 2 * FILTER_BITS - round0;
    331  const int round1 = COMPOUND_ROUND1_BITS;
    332  const int res_sub_const =
    333      (1 << (offset_bits - round1)) + (1 << (offset_bits - round1 - 1));
    334  const int round_bits = 2 * FILTER_BITS - round0 - round1;
    335 
    336  p_vec0 = vsubq_s32(p_vec0, vdupq_n_s32(res_sub_const));
    337  p_vec0 = vrshlq_s32(p_vec0, vdupq_n_s32(-round_bits));
    338  uint16x4_t res0 = clip_pixel_highbd_vec(p_vec0, bd);
    339  vst1_u16(dst16, res0);
    340 }
    341 
    342 static AOM_FORCE_INLINE void highbd_vertical_filter_8x1_f8(
    343    uint16_t *pred, int p_stride, int bd, uint16_t *dst, int dst_stride,
    344    bool is_compound, bool do_average, bool use_dist_wtd_comp_avg, int fwd,
    345    int bwd, int16_t gamma, const int16x8_t *tmp, int i, int sy, int j) {
    346  int32x4x2_t sums = gamma == 0 ? vertical_filter_8x1_f1(tmp, sy)
    347                                : vertical_filter_8x1_f8(tmp, sy, gamma);
    348  int32x4_t sum0 = sums.val[0];
    349  int32x4_t sum1 = sums.val[1];
    350 
    351  const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS;
    352  const int offset_bits_vert = bd + 2 * FILTER_BITS - round0;
    353 
    354  sum0 = vaddq_s32(sum0, vdupq_n_s32(1 << offset_bits_vert));
    355  sum1 = vaddq_s32(sum1, vdupq_n_s32(1 << offset_bits_vert));
    356 
    357  uint16_t *dst16 = &pred[i * p_stride + j];
    358 
    359  if (!is_compound) {
    360    const int reduce_bits_vert = 2 * FILTER_BITS - round0;
    361    sum0 = vrshlq_s32(sum0, vdupq_n_s32(-reduce_bits_vert));
    362    sum1 = vrshlq_s32(sum1, vdupq_n_s32(-reduce_bits_vert));
    363 
    364    const int res_sub_const = (1 << (bd - 1)) + (1 << bd);
    365    sum0 = vsubq_s32(sum0, vdupq_n_s32(res_sub_const));
    366    sum1 = vsubq_s32(sum1, vdupq_n_s32(res_sub_const));
    367    uint16x4_t res0 = clip_pixel_highbd_vec(sum0, bd);
    368    uint16x4_t res1 = clip_pixel_highbd_vec(sum1, bd);
    369    vst1_u16(dst16, res0);
    370    vst1_u16(dst16 + 4, res1);
    371    return;
    372  }
    373 
    374  sum0 = vrshrq_n_s32(sum0, COMPOUND_ROUND1_BITS);
    375  sum1 = vrshrq_n_s32(sum1, COMPOUND_ROUND1_BITS);
    376 
    377  uint16_t *p = &dst[i * dst_stride + j];
    378 
    379  if (!do_average) {
    380    vst1_u16(p, vqmovun_s32(sum0));
    381    vst1_u16(p + 4, vqmovun_s32(sum1));
    382    return;
    383  }
    384 
    385  uint16x8_t p0 = vld1q_u16(p);
    386  int32x4_t p_vec0 = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(p0)));
    387  int32x4_t p_vec1 = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(p0)));
    388  if (use_dist_wtd_comp_avg) {
    389    p_vec0 = vmulq_n_s32(p_vec0, fwd);
    390    p_vec1 = vmulq_n_s32(p_vec1, fwd);
    391    p_vec0 = vmlaq_n_s32(p_vec0, sum0, bwd);
    392    p_vec1 = vmlaq_n_s32(p_vec1, sum1, bwd);
    393    p_vec0 = vshrq_n_s32(p_vec0, DIST_PRECISION_BITS);
    394    p_vec1 = vshrq_n_s32(p_vec1, DIST_PRECISION_BITS);
    395  } else {
    396    p_vec0 = vhaddq_s32(p_vec0, sum0);
    397    p_vec1 = vhaddq_s32(p_vec1, sum1);
    398  }
    399 
    400  const int offset_bits = bd + 2 * FILTER_BITS - round0;
    401  const int round1 = COMPOUND_ROUND1_BITS;
    402  const int res_sub_const =
    403      (1 << (offset_bits - round1)) + (1 << (offset_bits - round1 - 1));
    404  const int round_bits = 2 * FILTER_BITS - round0 - round1;
    405 
    406  p_vec0 = vsubq_s32(p_vec0, vdupq_n_s32(res_sub_const));
    407  p_vec1 = vsubq_s32(p_vec1, vdupq_n_s32(res_sub_const));
    408 
    409  p_vec0 = vrshlq_s32(p_vec0, vdupq_n_s32(-round_bits));
    410  p_vec1 = vrshlq_s32(p_vec1, vdupq_n_s32(-round_bits));
    411  uint16x4_t res0 = clip_pixel_highbd_vec(p_vec0, bd);
    412  uint16x4_t res1 = clip_pixel_highbd_vec(p_vec1, bd);
    413  vst1_u16(dst16, res0);
    414  vst1_u16(dst16 + 4, res1);
    415 }
    416 
    417 static AOM_FORCE_INLINE void warp_affine_vertical(
    418    uint16_t *pred, int p_width, int p_height, int p_stride, int bd,
    419    uint16_t *dst, int dst_stride, bool is_compound, bool do_average,
    420    bool use_dist_wtd_comp_avg, int fwd, int bwd, int16_t gamma, int16_t delta,
    421    const int16x8_t *tmp, int i, int sy4, int j) {
    422  int limit_height = p_height > 4 ? 8 : 4;
    423 
    424  if (p_width > 4) {
    425    // p_width == 8
    426    for (int k = 0; k < limit_height; ++k) {
    427      int sy = sy4 + delta * k;
    428      highbd_vertical_filter_8x1_f8(
    429          pred, p_stride, bd, dst, dst_stride, is_compound, do_average,
    430          use_dist_wtd_comp_avg, fwd, bwd, gamma, tmp + k, i + k, sy, j);
    431    }
    432  } else {
    433    // p_width == 4
    434    for (int k = 0; k < limit_height; ++k) {
    435      int sy = sy4 + delta * k;
    436      highbd_vertical_filter_4x1_f4(
    437          pred, p_stride, bd, dst, dst_stride, is_compound, do_average,
    438          use_dist_wtd_comp_avg, fwd, bwd, gamma, tmp + k, i + k, sy, j);
    439    }
    440  }
    441 }
    442 
    443 static AOM_FORCE_INLINE void highbd_warp_affine_common(
    444    const int32_t *mat, const uint16_t *ref, int width, int height, int stride,
    445    uint16_t *pred, int p_col, int p_row, int p_width, int p_height,
    446    int p_stride, int subsampling_x, int subsampling_y, int bd,
    447    ConvolveParams *conv_params, int16_t alpha, int16_t beta, int16_t gamma,
    448    int16_t delta) {
    449  uint16_t *const dst = conv_params->dst;
    450  const int dst_stride = conv_params->dst_stride;
    451  const bool is_compound = conv_params->is_compound;
    452  const bool do_average = conv_params->do_average;
    453  const bool use_dist_wtd_comp_avg = conv_params->use_dist_wtd_comp_avg;
    454  const int fwd = conv_params->fwd_offset;
    455  const int bwd = conv_params->bck_offset;
    456 
    457  assert(IMPLIES(is_compound, dst != NULL));
    458 
    459  for (int i = 0; i < p_height; i += 8) {
    460    for (int j = 0; j < p_width; j += 8) {
    461      // Calculate the center of this 8x8 block,
    462      // project to luma coordinates (if in a subsampled chroma plane),
    463      // apply the affine transformation,
    464      // then convert back to the original coordinates (if necessary)
    465      const int32_t src_x = (j + 4 + p_col) << subsampling_x;
    466      const int32_t src_y = (i + 4 + p_row) << subsampling_y;
    467      const int64_t dst_x =
    468          (int64_t)mat[2] * src_x + (int64_t)mat[3] * src_y + (int64_t)mat[0];
    469      const int64_t dst_y =
    470          (int64_t)mat[4] * src_x + (int64_t)mat[5] * src_y + (int64_t)mat[1];
    471      const int64_t x4 = dst_x >> subsampling_x;
    472      const int64_t y4 = dst_y >> subsampling_y;
    473 
    474      const int32_t ix4 = (int32_t)(x4 >> WARPEDMODEL_PREC_BITS);
    475      int32_t sx4 = x4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
    476      const int32_t iy4 = (int32_t)(y4 >> WARPEDMODEL_PREC_BITS);
    477      int32_t sy4 = y4 & ((1 << WARPEDMODEL_PREC_BITS) - 1);
    478 
    479      sx4 += alpha * (-4) + beta * (-4);
    480      sy4 += gamma * (-4) + delta * (-4);
    481 
    482      sx4 &= ~((1 << WARP_PARAM_REDUCE_BITS) - 1);
    483      sy4 &= ~((1 << WARP_PARAM_REDUCE_BITS) - 1);
    484 
    485      // Each horizontal filter result is formed by the sum of up to eight
    486      // multiplications by filter values and then a shift. Although both the
    487      // inputs and filters are loaded as int16, the input data is at most bd
    488      // bits and the filters are at most 8 bits each. Additionally since we
    489      // know all possible filter values we know that the sum of absolute
    490      // filter values will fit in at most 9 bits. With this in mind we can
    491      // conclude that the sum of each filter application will fit in bd + 9
    492      // bits. The shift following the summation is ROUND0_BITS (which is 3),
    493      // +2 for 12-bit, which gives us a final storage of:
    494      // bd ==  8: ( 8 + 9) - 3 => 14 bits
    495      // bd == 10: (10 + 9) - 3 => 16 bits
    496      // bd == 12: (12 + 9) - 5 => 16 bits
    497      // So it is safe to use int16x8_t as the intermediate storage type here.
    498      int16x8_t tmp[15];
    499 
    500      warp_affine_horizontal(ref, width, height, stride, p_width, alpha, beta,
    501                             iy4, sx4, ix4, tmp, bd);
    502      warp_affine_vertical(pred, p_width, p_height, p_stride, bd, dst,
    503                           dst_stride, is_compound, do_average,
    504                           use_dist_wtd_comp_avg, fwd, bwd, gamma, delta, tmp,
    505                           i, sy4, j);
    506    }
    507  }
    508 }
    509 
    510 #endif  // AOM_AV1_COMMON_ARM_HIGHBD_WARP_PLANE_NEON_H_