tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

highbd_warp_plane_sve.c (10768B)


      1 /*
      2 * Copyright (c) 2024, Alliance for Open Media. All rights reserved.
      3 *
      4 * This source code is subject to the terms of the BSD 2 Clause License and
      5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
      6 * was not distributed with this source code in the LICENSE file, you can
      7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
      8 * Media Patent License 1.0 was not distributed with this source code in the
      9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
     10 */
     11 
     12 #include <arm_neon.h>
     13 #include <assert.h>
     14 #include <stdbool.h>
     15 #include <arm_neon_sve_bridge.h>
     16 
     17 #include "aom_dsp/aom_dsp_common.h"
     18 #include "aom_dsp/arm/aom_neon_sve_bridge.h"
     19 #include "aom_dsp/arm/mem_neon.h"
     20 #include "aom_dsp/arm/transpose_neon.h"
     21 #include "aom_ports/mem.h"
     22 #include "av1/common/scale.h"
     23 #include "av1/common/warped_motion.h"
     24 #include "config/av1_rtcd.h"
     25 #include "highbd_warp_plane_neon.h"
     26 
     27 static AOM_FORCE_INLINE int16x8_t
     28 highbd_horizontal_filter_4x1_f4(int16x8_t rv0, int16x8_t rv1, int16x8_t rv2,
     29                                int16x8_t rv3, int bd, int sx, int alpha) {
     30  int16x8_t f[4];
     31  load_filters_4(f, sx, alpha);
     32 
     33  int64x2_t m0 = aom_sdotq_s16(vdupq_n_s64(0), rv0, f[0]);
     34  int64x2_t m1 = aom_sdotq_s16(vdupq_n_s64(0), rv1, f[1]);
     35  int64x2_t m2 = aom_sdotq_s16(vdupq_n_s64(0), rv2, f[2]);
     36  int64x2_t m3 = aom_sdotq_s16(vdupq_n_s64(0), rv3, f[3]);
     37 
     38  int64x2_t m01 = vpaddq_s64(m0, m1);
     39  int64x2_t m23 = vpaddq_s64(m2, m3);
     40 
     41  const int round0 = bd == 12 ? ROUND0_BITS + 2 : ROUND0_BITS;
     42  const int offset_bits_horiz = bd + FILTER_BITS - 1;
     43 
     44  int32x4_t res = vcombine_s32(vmovn_s64(m01), vmovn_s64(m23));
     45  res = vaddq_s32(res, vdupq_n_s32(1 << offset_bits_horiz));
     46  res = vrshlq_s32(res, vdupq_n_s32(-round0));
     47  return vcombine_s16(vmovn_s32(res), vdup_n_s16(0));
     48 }
     49 
     50 static AOM_FORCE_INLINE int16x8_t highbd_horizontal_filter_8x1_f8(
     51    int16x8_t rv0, int16x8_t rv1, int16x8_t rv2, int16x8_t rv3, int16x8_t rv4,
     52    int16x8_t rv5, int16x8_t rv6, int16x8_t rv7, int bd, int sx, int alpha) {
     53  int16x8_t f[8];
     54  load_filters_8(f, sx, alpha);
     55 
     56  int64x2_t m0 = aom_sdotq_s16(vdupq_n_s64(0), rv0, f[0]);
     57  int64x2_t m1 = aom_sdotq_s16(vdupq_n_s64(0), rv1, f[1]);
     58  int64x2_t m2 = aom_sdotq_s16(vdupq_n_s64(0), rv2, f[2]);
     59  int64x2_t m3 = aom_sdotq_s16(vdupq_n_s64(0), rv3, f[3]);
     60  int64x2_t m4 = aom_sdotq_s16(vdupq_n_s64(0), rv4, f[4]);
     61  int64x2_t m5 = aom_sdotq_s16(vdupq_n_s64(0), rv5, f[5]);
     62  int64x2_t m6 = aom_sdotq_s16(vdupq_n_s64(0), rv6, f[6]);
     63  int64x2_t m7 = aom_sdotq_s16(vdupq_n_s64(0), rv7, f[7]);
     64 
     65  int64x2_t m01 = vpaddq_s64(m0, m1);
     66  int64x2_t m23 = vpaddq_s64(m2, m3);
     67  int64x2_t m45 = vpaddq_s64(m4, m5);
     68  int64x2_t m67 = vpaddq_s64(m6, m7);
     69 
     70  const int round0 = bd == 12 ? ROUND0_BITS + 2 : ROUND0_BITS;
     71  const int offset_bits_horiz = bd + FILTER_BITS - 1;
     72 
     73  int32x4_t res0 = vcombine_s32(vmovn_s64(m01), vmovn_s64(m23));
     74  int32x4_t res1 = vcombine_s32(vmovn_s64(m45), vmovn_s64(m67));
     75  res0 = vaddq_s32(res0, vdupq_n_s32(1 << offset_bits_horiz));
     76  res1 = vaddq_s32(res1, vdupq_n_s32(1 << offset_bits_horiz));
     77  res0 = vrshlq_s32(res0, vdupq_n_s32(-round0));
     78  res1 = vrshlq_s32(res1, vdupq_n_s32(-round0));
     79  return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1));
     80 }
     81 
     82 static AOM_FORCE_INLINE int16x8_t
     83 highbd_horizontal_filter_4x1_f1(int16x8_t rv0, int16x8_t rv1, int16x8_t rv2,
     84                                int16x8_t rv3, int bd, int sx) {
     85  int16x8_t f = load_filters_1(sx);
     86 
     87  int64x2_t m0 = aom_sdotq_s16(vdupq_n_s64(0), rv0, f);
     88  int64x2_t m1 = aom_sdotq_s16(vdupq_n_s64(0), rv1, f);
     89  int64x2_t m2 = aom_sdotq_s16(vdupq_n_s64(0), rv2, f);
     90  int64x2_t m3 = aom_sdotq_s16(vdupq_n_s64(0), rv3, f);
     91 
     92  int64x2_t m01 = vpaddq_s64(m0, m1);
     93  int64x2_t m23 = vpaddq_s64(m2, m3);
     94 
     95  const int round0 = bd == 12 ? ROUND0_BITS + 2 : ROUND0_BITS;
     96  const int offset_bits_horiz = bd + FILTER_BITS - 1;
     97 
     98  int32x4_t res = vcombine_s32(vmovn_s64(m01), vmovn_s64(m23));
     99  res = vaddq_s32(res, vdupq_n_s32(1 << offset_bits_horiz));
    100  res = vrshlq_s32(res, vdupq_n_s32(-round0));
    101  return vcombine_s16(vmovn_s32(res), vdup_n_s16(0));
    102 }
    103 
    104 static AOM_FORCE_INLINE int16x8_t highbd_horizontal_filter_8x1_f1(
    105    int16x8_t rv0, int16x8_t rv1, int16x8_t rv2, int16x8_t rv3, int16x8_t rv4,
    106    int16x8_t rv5, int16x8_t rv6, int16x8_t rv7, int bd, int sx) {
    107  int16x8_t f = load_filters_1(sx);
    108 
    109  int64x2_t m0 = aom_sdotq_s16(vdupq_n_s64(0), rv0, f);
    110  int64x2_t m1 = aom_sdotq_s16(vdupq_n_s64(0), rv1, f);
    111  int64x2_t m2 = aom_sdotq_s16(vdupq_n_s64(0), rv2, f);
    112  int64x2_t m3 = aom_sdotq_s16(vdupq_n_s64(0), rv3, f);
    113  int64x2_t m4 = aom_sdotq_s16(vdupq_n_s64(0), rv4, f);
    114  int64x2_t m5 = aom_sdotq_s16(vdupq_n_s64(0), rv5, f);
    115  int64x2_t m6 = aom_sdotq_s16(vdupq_n_s64(0), rv6, f);
    116  int64x2_t m7 = aom_sdotq_s16(vdupq_n_s64(0), rv7, f);
    117 
    118  int64x2_t m01 = vpaddq_s64(m0, m1);
    119  int64x2_t m23 = vpaddq_s64(m2, m3);
    120  int64x2_t m45 = vpaddq_s64(m4, m5);
    121  int64x2_t m67 = vpaddq_s64(m6, m7);
    122 
    123  const int round0 = bd == 12 ? ROUND0_BITS + 2 : ROUND0_BITS;
    124  const int offset_bits_horiz = bd + FILTER_BITS - 1;
    125 
    126  int32x4_t res0 = vcombine_s32(vmovn_s64(m01), vmovn_s64(m23));
    127  int32x4_t res1 = vcombine_s32(vmovn_s64(m45), vmovn_s64(m67));
    128  res0 = vaddq_s32(res0, vdupq_n_s32(1 << offset_bits_horiz));
    129  res1 = vaddq_s32(res1, vdupq_n_s32(1 << offset_bits_horiz));
    130  res0 = vrshlq_s32(res0, vdupq_n_s32(-round0));
    131  res1 = vrshlq_s32(res1, vdupq_n_s32(-round0));
    132  return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1));
    133 }
    134 
    135 static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp,
    136                                                         int sy) {
    137  const int16x8_t f = load_filters_1(sy);
    138  const int16x4_t f0123 = vget_low_s16(f);
    139  const int16x4_t f4567 = vget_high_s16(f);
    140 
    141  // No benefit to using SDOT here, the cost of rearrangement is too high.
    142  int32x4_t m0123 = vmull_lane_s16(vget_low_s16(tmp[0]), f0123, 0);
    143  m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[1]), f0123, 1);
    144  m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[2]), f0123, 2);
    145  m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[3]), f0123, 3);
    146  m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[4]), f4567, 0);
    147  m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[5]), f4567, 1);
    148  m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[6]), f4567, 2);
    149  m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[7]), f4567, 3);
    150  return m0123;
    151 }
    152 
    153 static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp,
    154                                                           int sy) {
    155  const int16x8_t f = load_filters_1(sy);
    156  const int16x4_t f0123 = vget_low_s16(f);
    157  const int16x4_t f4567 = vget_high_s16(f);
    158 
    159  // No benefit to using SDOT here, the cost of rearrangement is too high.
    160  int32x4_t m0123 = vmull_lane_s16(vget_low_s16(tmp[0]), f0123, 0);
    161  m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[1]), f0123, 1);
    162  m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[2]), f0123, 2);
    163  m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[3]), f0123, 3);
    164  m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[4]), f4567, 0);
    165  m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[5]), f4567, 1);
    166  m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[6]), f4567, 2);
    167  m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[7]), f4567, 3);
    168 
    169  int32x4_t m4567 = vmull_lane_s16(vget_high_s16(tmp[0]), f0123, 0);
    170  m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[1]), f0123, 1);
    171  m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[2]), f0123, 2);
    172  m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[3]), f0123, 3);
    173  m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[4]), f4567, 0);
    174  m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[5]), f4567, 1);
    175  m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[6]), f4567, 2);
    176  m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[7]), f4567, 3);
    177  return (int32x4x2_t){ { m0123, m4567 } };
    178 }
    179 
    180 static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp,
    181                                                         int sy, int gamma) {
    182  int16x8_t s0, s1, s2, s3;
    183  transpose_elems_s16_4x8(
    184      vget_low_s16(tmp[0]), vget_low_s16(tmp[1]), vget_low_s16(tmp[2]),
    185      vget_low_s16(tmp[3]), vget_low_s16(tmp[4]), vget_low_s16(tmp[5]),
    186      vget_low_s16(tmp[6]), vget_low_s16(tmp[7]), &s0, &s1, &s2, &s3);
    187 
    188  int16x8_t f[4];
    189  load_filters_4(f, sy, gamma);
    190 
    191  int64x2_t m0 = aom_sdotq_s16(vdupq_n_s64(0), s0, f[0]);
    192  int64x2_t m1 = aom_sdotq_s16(vdupq_n_s64(0), s1, f[1]);
    193  int64x2_t m2 = aom_sdotq_s16(vdupq_n_s64(0), s2, f[2]);
    194  int64x2_t m3 = aom_sdotq_s16(vdupq_n_s64(0), s3, f[3]);
    195 
    196  int64x2_t m01 = vpaddq_s64(m0, m1);
    197  int64x2_t m23 = vpaddq_s64(m2, m3);
    198  return vcombine_s32(vmovn_s64(m01), vmovn_s64(m23));
    199 }
    200 
    201 static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp,
    202                                                           int sy, int gamma) {
    203  int16x8_t s0 = tmp[0];
    204  int16x8_t s1 = tmp[1];
    205  int16x8_t s2 = tmp[2];
    206  int16x8_t s3 = tmp[3];
    207  int16x8_t s4 = tmp[4];
    208  int16x8_t s5 = tmp[5];
    209  int16x8_t s6 = tmp[6];
    210  int16x8_t s7 = tmp[7];
    211  transpose_elems_inplace_s16_8x8(&s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
    212 
    213  int16x8_t f[8];
    214  load_filters_8(f, sy, gamma);
    215 
    216  int64x2_t m0 = aom_sdotq_s16(vdupq_n_s64(0), s0, f[0]);
    217  int64x2_t m1 = aom_sdotq_s16(vdupq_n_s64(0), s1, f[1]);
    218  int64x2_t m2 = aom_sdotq_s16(vdupq_n_s64(0), s2, f[2]);
    219  int64x2_t m3 = aom_sdotq_s16(vdupq_n_s64(0), s3, f[3]);
    220  int64x2_t m4 = aom_sdotq_s16(vdupq_n_s64(0), s4, f[4]);
    221  int64x2_t m5 = aom_sdotq_s16(vdupq_n_s64(0), s5, f[5]);
    222  int64x2_t m6 = aom_sdotq_s16(vdupq_n_s64(0), s6, f[6]);
    223  int64x2_t m7 = aom_sdotq_s16(vdupq_n_s64(0), s7, f[7]);
    224 
    225  int64x2_t m01 = vpaddq_s64(m0, m1);
    226  int64x2_t m23 = vpaddq_s64(m2, m3);
    227  int64x2_t m45 = vpaddq_s64(m4, m5);
    228  int64x2_t m67 = vpaddq_s64(m6, m7);
    229 
    230  int32x4x2_t ret;
    231  ret.val[0] = vcombine_s32(vmovn_s64(m01), vmovn_s64(m23));
    232  ret.val[1] = vcombine_s32(vmovn_s64(m45), vmovn_s64(m67));
    233  return ret;
    234 }
    235 
    236 void av1_highbd_warp_affine_sve(const int32_t *mat, const uint16_t *ref,
    237                                int width, int height, int stride,
    238                                uint16_t *pred, int p_col, int p_row,
    239                                int p_width, int p_height, int p_stride,
    240                                int subsampling_x, int subsampling_y, int bd,
    241                                ConvolveParams *conv_params, int16_t alpha,
    242                                int16_t beta, int16_t gamma, int16_t delta) {
    243  highbd_warp_affine_common(mat, ref, width, height, stride, pred, p_col, p_row,
    244                            p_width, p_height, p_stride, subsampling_x,
    245                            subsampling_y, bd, conv_params, alpha, beta, gamma,
    246                            delta);
    247 }