tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

disflow_sse4.c (17224B)


      1 /*
      2 * Copyright (c) 2024, Alliance for Open Media. All rights reserved.
      3 *
      4 * This source code is subject to the terms of the BSD 2 Clause License and
      5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
      6 * was not distributed with this source code in the LICENSE file, you can
      7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
      8 * Media Patent License 1.0 was not distributed with this source code in the
      9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
     10 */
     11 
     12 #include <assert.h>
     13 #include <math.h>
     14 #include <smmintrin.h>
     15 
     16 #include "aom_dsp/aom_dsp_common.h"
     17 #include "aom_dsp/flow_estimation/disflow.h"
     18 #include "aom_dsp/x86/synonyms.h"
     19 
     20 #include "config/aom_dsp_rtcd.h"
     21 
     22 #if DISFLOW_PATCH_SIZE != 8
     23 #error "Need to change disflow_sse4.c if DISFLOW_PATCH_SIZE != 8"
     24 #endif
     25 
     26 // Compute horizontal and vertical kernels and return them packed into a
     27 // register. The coefficient ordering is:
     28 //   h0, h1, v0, v1, h2, h3, v2, v3
     29 // This is chosen because it takes less work than fully separating the kernels,
     30 // but it is separated enough that we can pick out each coefficient pair in the
     31 // main compute_flow_at_point function
     32 static inline __m128i compute_cubic_kernels(double u, double v) {
     33  const __m128d x = _mm_set_pd(v, u);
     34 
     35  const __m128d x2 = _mm_mul_pd(x, x);
     36  const __m128d x3 = _mm_mul_pd(x2, x);
     37 
     38  // Macro to multiply a value v by a constant coefficient c
     39 #define MULC(c, v) _mm_mul_pd(_mm_set1_pd(c), v)
     40 
     41  // Compute floating-point kernel
     42  // Note: To ensure results are bit-identical to the C code, we need to perform
     43  // exactly the same sequence of operations here as in the C code.
     44  __m128d k0 = _mm_sub_pd(_mm_add_pd(MULC(-0.5, x), x2), MULC(0.5, x3));
     45  __m128d k1 =
     46      _mm_add_pd(_mm_sub_pd(_mm_set1_pd(1.0), MULC(2.5, x2)), MULC(1.5, x3));
     47  __m128d k2 =
     48      _mm_sub_pd(_mm_add_pd(MULC(0.5, x), MULC(2.0, x2)), MULC(1.5, x3));
     49  __m128d k3 = _mm_add_pd(MULC(-0.5, x2), MULC(0.5, x3));
     50 #undef MULC
     51 
     52  // Integerize
     53  __m128d prec = _mm_set1_pd((double)(1 << DISFLOW_INTERP_BITS));
     54 
     55  k0 = _mm_round_pd(_mm_mul_pd(k0, prec),
     56                    _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
     57  k1 = _mm_round_pd(_mm_mul_pd(k1, prec),
     58                    _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
     59  k2 = _mm_round_pd(_mm_mul_pd(k2, prec),
     60                    _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
     61  k3 = _mm_round_pd(_mm_mul_pd(k3, prec),
     62                    _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
     63 
     64  const __m128i c0 = _mm_cvtpd_epi32(k0);
     65  const __m128i c1 = _mm_cvtpd_epi32(k1);
     66  const __m128i c2 = _mm_cvtpd_epi32(k2);
     67  const __m128i c3 = _mm_cvtpd_epi32(k3);
     68 
     69  // Rearrange results and convert down to 16 bits, giving the target output
     70  // ordering
     71  const __m128i c01 = _mm_unpacklo_epi32(c0, c1);
     72  const __m128i c23 = _mm_unpacklo_epi32(c2, c3);
     73  return _mm_packs_epi32(c01, c23);
     74 }
     75 
     76 // Compare two regions of width x height pixels, one rooted at position
     77 // (x, y) in src and the other at (x + u, y + v) in ref.
     78 // This function returns the sum of squared pixel differences between
     79 // the two regions.
     80 //
     81 // TODO(rachelbarker): Test speed/quality impact of using bilinear interpolation
     82 // instad of bicubic interpolation
     83 static inline void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
     84                                       int width, int height, int stride, int x,
     85                                       int y, double u, double v,
     86                                       const int16_t *dx, const int16_t *dy,
     87                                       int *b) {
     88  // This function is written to do 8x8 convolutions only
     89  assert(DISFLOW_PATCH_SIZE == 8);
     90 
     91  // Accumulate 4 32-bit partial sums for each element of b
     92  // These will be flattened at the end.
     93  __m128i b0_acc = _mm_setzero_si128();
     94  __m128i b1_acc = _mm_setzero_si128();
     95 
     96  // Split offset into integer and fractional parts, and compute cubic
     97  // interpolation kernels
     98  const int u_int = (int)floor(u);
     99  const int v_int = (int)floor(v);
    100  const double u_frac = u - floor(u);
    101  const double v_frac = v - floor(v);
    102 
    103  const __m128i kernels = compute_cubic_kernels(u_frac, v_frac);
    104 
    105  // Storage for intermediate values between the two convolution directions
    106  DECLARE_ALIGNED(16, int16_t,
    107                  tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 3)]);
    108  int16_t *tmp = tmp_ + DISFLOW_PATCH_SIZE;  // Offset by one row
    109 
    110  // Clamp coordinates so that all pixels we fetch will remain within the
    111  // allocated border region, but allow them to go far enough out that
    112  // the border pixels' values do not change.
    113  // Since we are calculating an 8x8 block, the bottom-right pixel
    114  // in the block has coordinates (x0 + 7, y0 + 7). Then, the cubic
    115  // interpolation has 4 taps, meaning that the output of pixel
    116  // (x_w, y_w) depends on the pixels in the range
    117  // ([x_w - 1, x_w + 2], [y_w - 1, y_w + 2]).
    118  //
    119  // Thus the most extreme coordinates which will be fetched are
    120  // (x0 - 1, y0 - 1) and (x0 + 9, y0 + 9).
    121  const int x0 = clamp(x + u_int, -9, width);
    122  const int y0 = clamp(y + v_int, -9, height);
    123 
    124  // Horizontal convolution
    125 
    126  // Prepare the kernel vectors
    127  // We split the kernel into two vectors with kernel indices:
    128  // 0, 1, 0, 1, 0, 1, 0, 1, and
    129  // 2, 3, 2, 3, 2, 3, 2, 3
    130  __m128i h_kernel_01 = _mm_set1_epi32(_mm_extract_epi32(kernels, 0));
    131  __m128i h_kernel_23 = _mm_set1_epi32(_mm_extract_epi32(kernels, 2));
    132 
    133  __m128i round_const_h = _mm_set1_epi32(1 << (DISFLOW_INTERP_BITS - 6 - 1));
    134 
    135  for (int i = -1; i < DISFLOW_PATCH_SIZE + 2; ++i) {
    136    const int y_w = y0 + i;
    137    const uint8_t *ref_row = &ref[y_w * stride + (x0 - 1)];
    138    int16_t *tmp_row = &tmp[i * DISFLOW_PATCH_SIZE];
    139 
    140    // Load this row of pixels.
    141    // For an 8x8 patch, we need to load the 8 image pixels + 3 extras,
    142    // for a total of 11 pixels. Here we load 16 pixels, but only use
    143    // the first 11.
    144    __m128i row = _mm_loadu_si128((__m128i *)ref_row);
    145 
    146    // Expand pixels to int16s
    147    __m128i px_0to7_i16 = _mm_cvtepu8_epi16(row);
    148    __m128i px_4to10_i16 = _mm_cvtepu8_epi16(_mm_srli_si128(row, 4));
    149 
    150    // Compute first four outputs
    151    // input pixels 0, 1, 1, 2, 2, 3, 3, 4
    152    // * kernel     0, 1, 0, 1, 0, 1, 0, 1
    153    __m128i px0 =
    154        _mm_unpacklo_epi16(px_0to7_i16, _mm_srli_si128(px_0to7_i16, 2));
    155    // input pixels 2, 3, 3, 4, 4, 5, 5, 6
    156    // * kernel     2, 3, 2, 3, 2, 3, 2, 3
    157    __m128i px1 = _mm_unpacklo_epi16(_mm_srli_si128(px_0to7_i16, 4),
    158                                     _mm_srli_si128(px_0to7_i16, 6));
    159    // Convolve with kernel and sum 2x2 boxes to form first 4 outputs
    160    __m128i sum0 = _mm_add_epi32(_mm_madd_epi16(px0, h_kernel_01),
    161                                 _mm_madd_epi16(px1, h_kernel_23));
    162 
    163    __m128i out0 = _mm_srai_epi32(_mm_add_epi32(sum0, round_const_h),
    164                                  DISFLOW_INTERP_BITS - 6);
    165 
    166    // Compute second four outputs
    167    __m128i px2 =
    168        _mm_unpacklo_epi16(px_4to10_i16, _mm_srli_si128(px_4to10_i16, 2));
    169    __m128i px3 = _mm_unpacklo_epi16(_mm_srli_si128(px_4to10_i16, 4),
    170                                     _mm_srli_si128(px_4to10_i16, 6));
    171    __m128i sum1 = _mm_add_epi32(_mm_madd_epi16(px2, h_kernel_01),
    172                                 _mm_madd_epi16(px3, h_kernel_23));
    173 
    174    // Round by just enough bits that the result is
    175    // guaranteed to fit into an i16. Then the next stage can use 16 x 16 -> 32
    176    // bit multiplies, which should be a fair bit faster than 32 x 32 -> 32
    177    // as it does now
    178    // This means shifting down so we have 6 extra bits, for a maximum value
    179    // of +18360, which can occur if u_frac == 0.5 and the input pixels are
    180    // {0, 255, 255, 0}.
    181    __m128i out1 = _mm_srai_epi32(_mm_add_epi32(sum1, round_const_h),
    182                                  DISFLOW_INTERP_BITS - 6);
    183 
    184    _mm_storeu_si128((__m128i *)tmp_row, _mm_packs_epi32(out0, out1));
    185  }
    186 
    187  // Vertical convolution
    188  const int round_bits = DISFLOW_INTERP_BITS + 6 - DISFLOW_DERIV_SCALE_LOG2;
    189  __m128i round_const_v = _mm_set1_epi32(1 << (round_bits - 1));
    190 
    191  __m128i v_kernel_01 = _mm_set1_epi32(_mm_extract_epi32(kernels, 1));
    192  __m128i v_kernel_23 = _mm_set1_epi32(_mm_extract_epi32(kernels, 3));
    193 
    194  for (int i = 0; i < DISFLOW_PATCH_SIZE; ++i) {
    195    int16_t *tmp_row = &tmp[i * DISFLOW_PATCH_SIZE];
    196 
    197    // Load 4 rows of 8 x 16-bit values
    198    __m128i px0 = _mm_loadu_si128((__m128i *)(tmp_row - DISFLOW_PATCH_SIZE));
    199    __m128i px1 = _mm_loadu_si128((__m128i *)tmp_row);
    200    __m128i px2 = _mm_loadu_si128((__m128i *)(tmp_row + DISFLOW_PATCH_SIZE));
    201    __m128i px3 =
    202        _mm_loadu_si128((__m128i *)(tmp_row + 2 * DISFLOW_PATCH_SIZE));
    203 
    204    // We want to calculate px0 * v_kernel[0] + px1 * v_kernel[1] + ... ,
    205    // but each multiply expands its output to 32 bits. So we need to be
    206    // a little clever about how we do this
    207    __m128i sum0 = _mm_add_epi32(
    208        _mm_madd_epi16(_mm_unpacklo_epi16(px0, px1), v_kernel_01),
    209        _mm_madd_epi16(_mm_unpacklo_epi16(px2, px3), v_kernel_23));
    210    __m128i sum1 = _mm_add_epi32(
    211        _mm_madd_epi16(_mm_unpackhi_epi16(px0, px1), v_kernel_01),
    212        _mm_madd_epi16(_mm_unpackhi_epi16(px2, px3), v_kernel_23));
    213 
    214    __m128i sum0_rounded =
    215        _mm_srai_epi32(_mm_add_epi32(sum0, round_const_v), round_bits);
    216    __m128i sum1_rounded =
    217        _mm_srai_epi32(_mm_add_epi32(sum1, round_const_v), round_bits);
    218 
    219    __m128i warped = _mm_packs_epi32(sum0_rounded, sum1_rounded);
    220    __m128i src_pixels_u8 =
    221        _mm_loadl_epi64((__m128i *)&src[(y + i) * stride + x]);
    222    __m128i src_pixels = _mm_slli_epi16(_mm_cvtepu8_epi16(src_pixels_u8), 3);
    223 
    224    // Calculate delta from the target patch
    225    __m128i dt = _mm_sub_epi16(warped, src_pixels);
    226 
    227    // Load 8 elements each of dx and dt, to pair with the 8 elements of dt
    228    // that we have just computed. Then compute 8 partial sums of dx * dt
    229    // and dy * dt, implicitly sum to give 4 partial sums of each, and
    230    // accumulate.
    231    __m128i dx_row = _mm_loadu_si128((__m128i *)&dx[i * DISFLOW_PATCH_SIZE]);
    232    __m128i dy_row = _mm_loadu_si128((__m128i *)&dy[i * DISFLOW_PATCH_SIZE]);
    233    b0_acc = _mm_add_epi32(b0_acc, _mm_madd_epi16(dx_row, dt));
    234    b1_acc = _mm_add_epi32(b1_acc, _mm_madd_epi16(dy_row, dt));
    235  }
    236 
    237  // Flatten the two sets of partial sums to find the final value of b
    238  // We need to set b[0] = sum(b0_acc), b[1] = sum(b1_acc).
    239  // We need to do 6 additions in total; a `hadd` instruction can take care
    240  // of four of them, leaving two scalar additions.
    241  __m128i partial_sum = _mm_hadd_epi32(b0_acc, b1_acc);
    242  b[0] = _mm_extract_epi32(partial_sum, 0) + _mm_extract_epi32(partial_sum, 1);
    243  b[1] = _mm_extract_epi32(partial_sum, 2) + _mm_extract_epi32(partial_sum, 3);
    244 }
    245 
    246 // Compute the x and y gradients of the source patch in a single pass,
    247 // and store into dx and dy respectively.
    248 static inline void sobel_filter(const uint8_t *src, int src_stride, int16_t *dx,
    249                                int16_t *dy) {
    250  // Loop setup: Load the first two rows (of 10 input rows) and apply
    251  // the horizontal parts of the two filters
    252  __m128i row_m1 = _mm_loadu_si128((__m128i *)(src - src_stride - 1));
    253  __m128i row_m1_a = _mm_cvtepu8_epi16(row_m1);
    254  __m128i row_m1_b = _mm_cvtepu8_epi16(_mm_srli_si128(row_m1, 1));
    255  __m128i row_m1_c = _mm_cvtepu8_epi16(_mm_srli_si128(row_m1, 2));
    256 
    257  __m128i row_m1_hsmooth = _mm_add_epi16(_mm_add_epi16(row_m1_a, row_m1_c),
    258                                         _mm_slli_epi16(row_m1_b, 1));
    259  __m128i row_m1_hdiff = _mm_sub_epi16(row_m1_a, row_m1_c);
    260 
    261  __m128i row = _mm_loadu_si128((__m128i *)(src - 1));
    262  __m128i row_a = _mm_cvtepu8_epi16(row);
    263  __m128i row_b = _mm_cvtepu8_epi16(_mm_srli_si128(row, 1));
    264  __m128i row_c = _mm_cvtepu8_epi16(_mm_srli_si128(row, 2));
    265 
    266  __m128i row_hsmooth =
    267      _mm_add_epi16(_mm_add_epi16(row_a, row_c), _mm_slli_epi16(row_b, 1));
    268  __m128i row_hdiff = _mm_sub_epi16(row_a, row_c);
    269 
    270  // Main loop: For each of the 8 output rows:
    271  // * Load row i+1 and apply both horizontal filters
    272  // * Apply vertical filters and store results
    273  // * Shift rows for next iteration
    274  for (int i = 0; i < DISFLOW_PATCH_SIZE; i++) {
    275    // Load row i+1 and apply both horizontal filters
    276    const __m128i row_p1 =
    277        _mm_loadu_si128((__m128i *)(src + (i + 1) * src_stride - 1));
    278    const __m128i row_p1_a = _mm_cvtepu8_epi16(row_p1);
    279    const __m128i row_p1_b = _mm_cvtepu8_epi16(_mm_srli_si128(row_p1, 1));
    280    const __m128i row_p1_c = _mm_cvtepu8_epi16(_mm_srli_si128(row_p1, 2));
    281 
    282    const __m128i row_p1_hsmooth = _mm_add_epi16(
    283        _mm_add_epi16(row_p1_a, row_p1_c), _mm_slli_epi16(row_p1_b, 1));
    284    const __m128i row_p1_hdiff = _mm_sub_epi16(row_p1_a, row_p1_c);
    285 
    286    // Apply vertical filters and store results
    287    // dx = vertical smooth(horizontal diff(input))
    288    // dy = vertical diff(horizontal smooth(input))
    289    const __m128i dx_row =
    290        _mm_add_epi16(_mm_add_epi16(row_m1_hdiff, row_p1_hdiff),
    291                      _mm_slli_epi16(row_hdiff, 1));
    292    const __m128i dy_row = _mm_sub_epi16(row_m1_hsmooth, row_p1_hsmooth);
    293 
    294    _mm_storeu_si128((__m128i *)(dx + i * DISFLOW_PATCH_SIZE), dx_row);
    295    _mm_storeu_si128((__m128i *)(dy + i * DISFLOW_PATCH_SIZE), dy_row);
    296 
    297    // Shift rows for next iteration
    298    // This allows a lot of work to be reused, reducing the number of
    299    // horizontal filtering operations from 2*3*8 = 48 to 2*10 = 20
    300    row_m1_hsmooth = row_hsmooth;
    301    row_m1_hdiff = row_hdiff;
    302    row_hsmooth = row_p1_hsmooth;
    303    row_hdiff = row_p1_hdiff;
    304  }
    305 }
    306 
    307 static inline void compute_flow_matrix(const int16_t *dx, int dx_stride,
    308                                       const int16_t *dy, int dy_stride,
    309                                       double *M) {
    310  __m128i acc[4] = { 0 };
    311 
    312  for (int i = 0; i < DISFLOW_PATCH_SIZE; i++) {
    313    __m128i dx_row = _mm_loadu_si128((__m128i *)&dx[i * dx_stride]);
    314    __m128i dy_row = _mm_loadu_si128((__m128i *)&dy[i * dy_stride]);
    315 
    316    acc[0] = _mm_add_epi32(acc[0], _mm_madd_epi16(dx_row, dx_row));
    317    acc[1] = _mm_add_epi32(acc[1], _mm_madd_epi16(dx_row, dy_row));
    318    // Don't compute acc[2], as it should be equal to acc[1]
    319    acc[3] = _mm_add_epi32(acc[3], _mm_madd_epi16(dy_row, dy_row));
    320  }
    321 
    322  // Condense sums
    323  __m128i partial_sum_0 = _mm_hadd_epi32(acc[0], acc[1]);
    324  __m128i partial_sum_1 = _mm_hadd_epi32(acc[1], acc[3]);
    325  __m128i result = _mm_hadd_epi32(partial_sum_0, partial_sum_1);
    326 
    327  // Apply regularization
    328  // We follow the standard regularization method of adding `k * I` before
    329  // inverting. This ensures that the matrix will be invertible.
    330  //
    331  // Setting the regularization strength k to 1 seems to work well here, as
    332  // typical values coming from the other equations are very large (1e5 to
    333  // 1e6, with an upper limit of around 6e7, at the time of writing).
    334  // It also preserves the property that all matrix values are whole numbers,
    335  // which is convenient for integerized SIMD implementation.
    336  result = _mm_add_epi32(result, _mm_set_epi32(1, 0, 0, 1));
    337 
    338  // Convert results to doubles and store
    339  _mm_storeu_pd(M, _mm_cvtepi32_pd(result));
    340  _mm_storeu_pd(M + 2, _mm_cvtepi32_pd(_mm_srli_si128(result, 8)));
    341 }
    342 
    343 // Try to invert the matrix M
    344 // Note: Due to the nature of how a least-squares matrix is constructed, all of
    345 // the eigenvalues will be >= 0, and therefore det M >= 0 as well.
    346 // The regularization term `+ k * I` further ensures that det M >= k^2.
    347 // As mentioned in compute_flow_matrix(), here we use k = 1, so det M >= 1.
    348 // So we don't have to worry about non-invertible matrices here.
    349 static inline void invert_2x2(const double *M, double *M_inv) {
    350  double det = (M[0] * M[3]) - (M[1] * M[2]);
    351  assert(det >= 1);
    352  const double det_inv = 1 / det;
    353 
    354  M_inv[0] = M[3] * det_inv;
    355  M_inv[1] = -M[1] * det_inv;
    356  M_inv[2] = -M[2] * det_inv;
    357  M_inv[3] = M[0] * det_inv;
    358 }
    359 
    360 void aom_compute_flow_at_point_sse4_1(const uint8_t *src, const uint8_t *ref,
    361                                      int x, int y, int width, int height,
    362                                      int stride, double *u, double *v) {
    363  DECLARE_ALIGNED(16, double, M[4]);
    364  DECLARE_ALIGNED(16, double, M_inv[4]);
    365  DECLARE_ALIGNED(16, int16_t, dx[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE]);
    366  DECLARE_ALIGNED(16, int16_t, dy[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE]);
    367  int b[2];
    368 
    369  // Compute gradients within this patch
    370  const uint8_t *src_patch = &src[y * stride + x];
    371  sobel_filter(src_patch, stride, dx, dy);
    372 
    373  compute_flow_matrix(dx, DISFLOW_PATCH_SIZE, dy, DISFLOW_PATCH_SIZE, M);
    374  invert_2x2(M, M_inv);
    375 
    376  for (int itr = 0; itr < DISFLOW_MAX_ITR; itr++) {
    377    compute_flow_vector(src, ref, width, height, stride, x, y, *u, *v, dx, dy,
    378                        b);
    379 
    380    // Solve flow equations to find a better estimate for the flow vector
    381    // at this point
    382    const double step_u = M_inv[0] * b[0] + M_inv[1] * b[1];
    383    const double step_v = M_inv[2] * b[0] + M_inv[3] * b[1];
    384    *u += fclamp(step_u * DISFLOW_STEP_SIZE, -2, 2);
    385    *v += fclamp(step_v * DISFLOW_STEP_SIZE, -2, 2);
    386 
    387    if (fabs(step_u) + fabs(step_v) < DISFLOW_STEP_SIZE_THRESOLD) {
    388      // Stop iteration when we're close to convergence
    389      break;
    390    }
    391  }
    392 }