tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

sum_squares_sse2.c (17406B)


      1 /*
      2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved.
      3 *
      4 * This source code is subject to the terms of the BSD 2 Clause License and
      5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
      6 * was not distributed with this source code in the LICENSE file, you can
      7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
      8 * Media Patent License 1.0 was not distributed with this source code in the
      9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
     10 */
     11 
     12 #include <assert.h>
     13 #include <emmintrin.h>
     14 #include <stdio.h>
     15 
     16 #include "aom_dsp/x86/synonyms.h"
     17 #include "aom_dsp/x86/sum_squares_sse2.h"
     18 #include "config/aom_dsp_rtcd.h"
     19 
     20 static inline __m128i xx_loadh_64(__m128i a, const void *b) {
     21  const __m128d ad = _mm_castsi128_pd(a);
     22  return _mm_castpd_si128(_mm_loadh_pd(ad, (double *)b));
     23 }
     24 
     25 static inline uint64_t xx_cvtsi128_si64(__m128i a) {
     26 #if AOM_ARCH_X86_64
     27  return (uint64_t)_mm_cvtsi128_si64(a);
     28 #else
     29  {
     30    uint64_t tmp;
     31    _mm_storel_epi64((__m128i *)&tmp, a);
     32    return tmp;
     33  }
     34 #endif
     35 }
     36 
     37 static inline __m128i sum_squares_i16_4x4_sse2(const int16_t *src, int stride) {
     38  const __m128i v_val_0_w = xx_loadl_64(src + 0 * stride);
     39  const __m128i v_val_2_w = xx_loadl_64(src + 2 * stride);
     40  const __m128i v_val_01_w = xx_loadh_64(v_val_0_w, src + 1 * stride);
     41  const __m128i v_val_23_w = xx_loadh_64(v_val_2_w, src + 3 * stride);
     42  const __m128i v_sq_01_d = _mm_madd_epi16(v_val_01_w, v_val_01_w);
     43  const __m128i v_sq_23_d = _mm_madd_epi16(v_val_23_w, v_val_23_w);
     44 
     45  return _mm_add_epi32(v_sq_01_d, v_sq_23_d);
     46 }
     47 
     48 uint64_t aom_sum_squares_2d_i16_4x4_sse2(const int16_t *src, int stride) {
     49  const __m128i v_sum_0123_d = sum_squares_i16_4x4_sse2(src, stride);
     50  __m128i v_sum_d =
     51      _mm_add_epi32(v_sum_0123_d, _mm_srli_epi64(v_sum_0123_d, 32));
     52  v_sum_d = _mm_add_epi32(v_sum_d, _mm_srli_si128(v_sum_d, 8));
     53  return (uint64_t)_mm_cvtsi128_si32(v_sum_d);
     54 }
     55 
     56 uint64_t aom_sum_sse_2d_i16_4x4_sse2(const int16_t *src, int stride, int *sum) {
     57  const __m128i one_reg = _mm_set1_epi16(1);
     58  const __m128i v_val_0_w = xx_loadl_64(src + 0 * stride);
     59  const __m128i v_val_2_w = xx_loadl_64(src + 2 * stride);
     60  __m128i v_val_01_w = xx_loadh_64(v_val_0_w, src + 1 * stride);
     61  __m128i v_val_23_w = xx_loadh_64(v_val_2_w, src + 3 * stride);
     62 
     63  __m128i v_sum_0123_d = _mm_add_epi16(v_val_01_w, v_val_23_w);
     64  v_sum_0123_d = _mm_madd_epi16(v_sum_0123_d, one_reg);
     65  v_sum_0123_d = _mm_add_epi32(v_sum_0123_d, _mm_srli_si128(v_sum_0123_d, 8));
     66  v_sum_0123_d = _mm_add_epi32(v_sum_0123_d, _mm_srli_si128(v_sum_0123_d, 4));
     67  *sum = _mm_cvtsi128_si32(v_sum_0123_d);
     68 
     69  const __m128i v_sq_01_d = _mm_madd_epi16(v_val_01_w, v_val_01_w);
     70  const __m128i v_sq_23_d = _mm_madd_epi16(v_val_23_w, v_val_23_w);
     71  __m128i v_sq_0123_d = _mm_add_epi32(v_sq_01_d, v_sq_23_d);
     72  v_sq_0123_d = _mm_add_epi32(v_sq_0123_d, _mm_srli_si128(v_sq_0123_d, 8));
     73  v_sq_0123_d = _mm_add_epi32(v_sq_0123_d, _mm_srli_si128(v_sq_0123_d, 4));
     74  return (uint64_t)_mm_cvtsi128_si32(v_sq_0123_d);
     75 }
     76 
     77 uint64_t aom_sum_squares_2d_i16_4xn_sse2(const int16_t *src, int stride,
     78                                         int height) {
     79  int r = 0;
     80  __m128i v_acc_q = _mm_setzero_si128();
     81  do {
     82    const __m128i v_acc_d = sum_squares_i16_4x4_sse2(src, stride);
     83    v_acc_q = _mm_add_epi32(v_acc_q, v_acc_d);
     84    src += stride << 2;
     85    r += 4;
     86  } while (r < height);
     87  const __m128i v_zext_mask_q = _mm_set1_epi64x(~0u);
     88  __m128i v_acc_64 = _mm_add_epi64(_mm_srli_epi64(v_acc_q, 32),
     89                                   _mm_and_si128(v_acc_q, v_zext_mask_q));
     90  v_acc_64 = _mm_add_epi64(v_acc_64, _mm_srli_si128(v_acc_64, 8));
     91  return xx_cvtsi128_si64(v_acc_64);
     92 }
     93 
     94 uint64_t aom_sum_sse_2d_i16_4xn_sse2(const int16_t *src, int stride, int height,
     95                                     int *sum) {
     96  int r = 0;
     97  uint64_t sse = 0;
     98  do {
     99    int curr_sum = 0;
    100    sse += aom_sum_sse_2d_i16_4x4_sse2(src, stride, &curr_sum);
    101    *sum += curr_sum;
    102    src += stride << 2;
    103    r += 4;
    104  } while (r < height);
    105  return sse;
    106 }
    107 
    108 #ifdef __GNUC__
    109 // This prevents GCC/Clang from inlining this function into
    110 // aom_sum_squares_2d_i16_sse2, which in turn saves some stack
    111 // maintenance instructions in the common case of 4x4.
    112 __attribute__((noinline))
    113 #endif
    114 uint64_t
    115 aom_sum_squares_2d_i16_nxn_sse2(const int16_t *src, int stride, int width,
    116                                int height) {
    117  int r = 0;
    118 
    119  const __m128i v_zext_mask_q = _mm_set1_epi64x(~0u);
    120  __m128i v_acc_q = _mm_setzero_si128();
    121 
    122  do {
    123    __m128i v_acc_d = _mm_setzero_si128();
    124    int c = 0;
    125    do {
    126      const int16_t *b = src + c;
    127 
    128      const __m128i v_val_0_w = xx_load_128(b + 0 * stride);
    129      const __m128i v_val_1_w = xx_load_128(b + 1 * stride);
    130      const __m128i v_val_2_w = xx_load_128(b + 2 * stride);
    131      const __m128i v_val_3_w = xx_load_128(b + 3 * stride);
    132 
    133      const __m128i v_sq_0_d = _mm_madd_epi16(v_val_0_w, v_val_0_w);
    134      const __m128i v_sq_1_d = _mm_madd_epi16(v_val_1_w, v_val_1_w);
    135      const __m128i v_sq_2_d = _mm_madd_epi16(v_val_2_w, v_val_2_w);
    136      const __m128i v_sq_3_d = _mm_madd_epi16(v_val_3_w, v_val_3_w);
    137 
    138      const __m128i v_sum_01_d = _mm_add_epi32(v_sq_0_d, v_sq_1_d);
    139      const __m128i v_sum_23_d = _mm_add_epi32(v_sq_2_d, v_sq_3_d);
    140 
    141      const __m128i v_sum_0123_d = _mm_add_epi32(v_sum_01_d, v_sum_23_d);
    142 
    143      v_acc_d = _mm_add_epi32(v_acc_d, v_sum_0123_d);
    144      c += 8;
    145    } while (c < width);
    146 
    147    v_acc_q = _mm_add_epi64(v_acc_q, _mm_and_si128(v_acc_d, v_zext_mask_q));
    148    v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_epi64(v_acc_d, 32));
    149 
    150    src += 4 * stride;
    151    r += 4;
    152  } while (r < height);
    153 
    154  v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_si128(v_acc_q, 8));
    155  return xx_cvtsi128_si64(v_acc_q);
    156 }
    157 
    158 #ifdef __GNUC__
    159 // This prevents GCC/Clang from inlining this function into
    160 // aom_sum_sse_2d_i16_nxn_sse2, which in turn saves some stack
    161 // maintenance instructions in the common case of 4x4.
    162 __attribute__((noinline))
    163 #endif
    164 uint64_t
    165 aom_sum_sse_2d_i16_nxn_sse2(const int16_t *src, int stride, int width,
    166                            int height, int *sum) {
    167  int r = 0;
    168  uint64_t result;
    169  const __m128i zero_reg = _mm_setzero_si128();
    170  const __m128i one_reg = _mm_set1_epi16(1);
    171 
    172  __m128i v_sse_total = zero_reg;
    173  __m128i v_sum_total = zero_reg;
    174 
    175  do {
    176    int c = 0;
    177    __m128i v_sse_row = zero_reg;
    178    do {
    179      const int16_t *b = src + c;
    180 
    181      __m128i v_val_0_w = xx_load_128(b + 0 * stride);
    182      __m128i v_val_1_w = xx_load_128(b + 1 * stride);
    183      __m128i v_val_2_w = xx_load_128(b + 2 * stride);
    184      __m128i v_val_3_w = xx_load_128(b + 3 * stride);
    185 
    186      const __m128i v_sq_0_d = _mm_madd_epi16(v_val_0_w, v_val_0_w);
    187      const __m128i v_sq_1_d = _mm_madd_epi16(v_val_1_w, v_val_1_w);
    188      const __m128i v_sq_2_d = _mm_madd_epi16(v_val_2_w, v_val_2_w);
    189      const __m128i v_sq_3_d = _mm_madd_epi16(v_val_3_w, v_val_3_w);
    190      const __m128i v_sq_01_d = _mm_add_epi32(v_sq_0_d, v_sq_1_d);
    191      const __m128i v_sq_23_d = _mm_add_epi32(v_sq_2_d, v_sq_3_d);
    192      const __m128i v_sq_0123_d = _mm_add_epi32(v_sq_01_d, v_sq_23_d);
    193      v_sse_row = _mm_add_epi32(v_sse_row, v_sq_0123_d);
    194 
    195      const __m128i v_sum_01 = _mm_add_epi16(v_val_0_w, v_val_1_w);
    196      const __m128i v_sum_23 = _mm_add_epi16(v_val_2_w, v_val_3_w);
    197      __m128i v_sum_0123_d = _mm_add_epi16(v_sum_01, v_sum_23);
    198      v_sum_0123_d = _mm_madd_epi16(v_sum_0123_d, one_reg);
    199      v_sum_total = _mm_add_epi32(v_sum_total, v_sum_0123_d);
    200 
    201      c += 8;
    202    } while (c < width);
    203 
    204    const __m128i v_sse_row_low = _mm_unpacklo_epi32(v_sse_row, zero_reg);
    205    const __m128i v_sse_row_hi = _mm_unpackhi_epi32(v_sse_row, zero_reg);
    206    v_sse_row = _mm_add_epi64(v_sse_row_low, v_sse_row_hi);
    207    v_sse_total = _mm_add_epi64(v_sse_total, v_sse_row);
    208    src += 4 * stride;
    209    r += 4;
    210  } while (r < height);
    211 
    212  v_sum_total = _mm_add_epi32(v_sum_total, _mm_srli_si128(v_sum_total, 8));
    213  v_sum_total = _mm_add_epi32(v_sum_total, _mm_srli_si128(v_sum_total, 4));
    214  *sum += _mm_cvtsi128_si32(v_sum_total);
    215 
    216  v_sse_total = _mm_add_epi64(v_sse_total, _mm_srli_si128(v_sse_total, 8));
    217  xx_storel_64(&result, v_sse_total);
    218  return result;
    219 }
    220 
    221 uint64_t aom_sum_squares_2d_i16_sse2(const int16_t *src, int stride, int width,
    222                                     int height) {
    223  // 4 elements per row only requires half an XMM register, so this
    224  // must be a special case, but also note that over 75% of all calls
    225  // are with size == 4, so it is also the common case.
    226  if (LIKELY(width == 4 && height == 4)) {
    227    return aom_sum_squares_2d_i16_4x4_sse2(src, stride);
    228  } else if (LIKELY(width == 4 && (height & 3) == 0)) {
    229    return aom_sum_squares_2d_i16_4xn_sse2(src, stride, height);
    230  } else if (LIKELY((width & 7) == 0 && (height & 3) == 0)) {
    231    // Generic case
    232    return aom_sum_squares_2d_i16_nxn_sse2(src, stride, width, height);
    233  } else {
    234    return aom_sum_squares_2d_i16_c(src, stride, width, height);
    235  }
    236 }
    237 
    238 uint64_t aom_sum_sse_2d_i16_sse2(const int16_t *src, int src_stride, int width,
    239                                 int height, int *sum) {
    240  if (LIKELY(width == 4 && height == 4)) {
    241    return aom_sum_sse_2d_i16_4x4_sse2(src, src_stride, sum);
    242  } else if (LIKELY(width == 4 && (height & 3) == 0)) {
    243    return aom_sum_sse_2d_i16_4xn_sse2(src, src_stride, height, sum);
    244  } else if (LIKELY((width & 7) == 0 && (height & 3) == 0)) {
    245    // Generic case
    246    return aom_sum_sse_2d_i16_nxn_sse2(src, src_stride, width, height, sum);
    247  } else {
    248    return aom_sum_sse_2d_i16_c(src, src_stride, width, height, sum);
    249  }
    250 }
    251 
    252 //////////////////////////////////////////////////////////////////////////////
    253 // 1D version
    254 //////////////////////////////////////////////////////////////////////////////
    255 
    256 static uint64_t aom_sum_squares_i16_64n_sse2(const int16_t *src, uint32_t n) {
    257  const __m128i v_zext_mask_q = _mm_set1_epi64x(~0u);
    258  __m128i v_acc0_q = _mm_setzero_si128();
    259  __m128i v_acc1_q = _mm_setzero_si128();
    260 
    261  const int16_t *const end = src + n;
    262 
    263  assert(n % 64 == 0);
    264 
    265  while (src < end) {
    266    const __m128i v_val_0_w = xx_load_128(src);
    267    const __m128i v_val_1_w = xx_load_128(src + 8);
    268    const __m128i v_val_2_w = xx_load_128(src + 16);
    269    const __m128i v_val_3_w = xx_load_128(src + 24);
    270    const __m128i v_val_4_w = xx_load_128(src + 32);
    271    const __m128i v_val_5_w = xx_load_128(src + 40);
    272    const __m128i v_val_6_w = xx_load_128(src + 48);
    273    const __m128i v_val_7_w = xx_load_128(src + 56);
    274 
    275    const __m128i v_sq_0_d = _mm_madd_epi16(v_val_0_w, v_val_0_w);
    276    const __m128i v_sq_1_d = _mm_madd_epi16(v_val_1_w, v_val_1_w);
    277    const __m128i v_sq_2_d = _mm_madd_epi16(v_val_2_w, v_val_2_w);
    278    const __m128i v_sq_3_d = _mm_madd_epi16(v_val_3_w, v_val_3_w);
    279    const __m128i v_sq_4_d = _mm_madd_epi16(v_val_4_w, v_val_4_w);
    280    const __m128i v_sq_5_d = _mm_madd_epi16(v_val_5_w, v_val_5_w);
    281    const __m128i v_sq_6_d = _mm_madd_epi16(v_val_6_w, v_val_6_w);
    282    const __m128i v_sq_7_d = _mm_madd_epi16(v_val_7_w, v_val_7_w);
    283 
    284    const __m128i v_sum_01_d = _mm_add_epi32(v_sq_0_d, v_sq_1_d);
    285    const __m128i v_sum_23_d = _mm_add_epi32(v_sq_2_d, v_sq_3_d);
    286    const __m128i v_sum_45_d = _mm_add_epi32(v_sq_4_d, v_sq_5_d);
    287    const __m128i v_sum_67_d = _mm_add_epi32(v_sq_6_d, v_sq_7_d);
    288 
    289    const __m128i v_sum_0123_d = _mm_add_epi32(v_sum_01_d, v_sum_23_d);
    290    const __m128i v_sum_4567_d = _mm_add_epi32(v_sum_45_d, v_sum_67_d);
    291 
    292    const __m128i v_sum_d = _mm_add_epi32(v_sum_0123_d, v_sum_4567_d);
    293 
    294    v_acc0_q = _mm_add_epi64(v_acc0_q, _mm_and_si128(v_sum_d, v_zext_mask_q));
    295    v_acc1_q = _mm_add_epi64(v_acc1_q, _mm_srli_epi64(v_sum_d, 32));
    296 
    297    src += 64;
    298  }
    299 
    300  v_acc0_q = _mm_add_epi64(v_acc0_q, v_acc1_q);
    301  v_acc0_q = _mm_add_epi64(v_acc0_q, _mm_srli_si128(v_acc0_q, 8));
    302  return xx_cvtsi128_si64(v_acc0_q);
    303 }
    304 
    305 uint64_t aom_sum_squares_i16_sse2(const int16_t *src, uint32_t n) {
    306  if (n % 64 == 0) {
    307    return aom_sum_squares_i16_64n_sse2(src, n);
    308  } else if (n > 64) {
    309    const uint32_t k = n & ~63u;
    310    return aom_sum_squares_i16_64n_sse2(src, k) +
    311           aom_sum_squares_i16_c(src + k, n - k);
    312  } else {
    313    return aom_sum_squares_i16_c(src, n);
    314  }
    315 }
    316 
    317 // Accumulate sum of 16-bit elements in the vector
    318 static inline int32_t mm_accumulate_epi16(__m128i vec_a) {
    319  __m128i vtmp = _mm_srli_si128(vec_a, 8);
    320  vec_a = _mm_add_epi16(vec_a, vtmp);
    321  vtmp = _mm_srli_si128(vec_a, 4);
    322  vec_a = _mm_add_epi16(vec_a, vtmp);
    323  vtmp = _mm_srli_si128(vec_a, 2);
    324  vec_a = _mm_add_epi16(vec_a, vtmp);
    325  return _mm_extract_epi16(vec_a, 0);
    326 }
    327 
    328 // Accumulate sum of 32-bit elements in the vector
    329 static inline int32_t mm_accumulate_epi32(__m128i vec_a) {
    330  __m128i vtmp = _mm_srli_si128(vec_a, 8);
    331  vec_a = _mm_add_epi32(vec_a, vtmp);
    332  vtmp = _mm_srli_si128(vec_a, 4);
    333  vec_a = _mm_add_epi32(vec_a, vtmp);
    334  return _mm_cvtsi128_si32(vec_a);
    335 }
    336 
    337 uint64_t aom_var_2d_u8_sse2(uint8_t *src, int src_stride, int width,
    338                            int height) {
    339  uint8_t *srcp;
    340  uint64_t s = 0, ss = 0;
    341  __m128i vzero = _mm_setzero_si128();
    342  __m128i v_acc_sum = vzero;
    343  __m128i v_acc_sqs = vzero;
    344  int i, j;
    345 
    346  // Process 16 elements in a row
    347  for (i = 0; i < width - 15; i += 16) {
    348    srcp = src + i;
    349    // Process 8 columns at a time
    350    for (j = 0; j < height - 7; j += 8) {
    351      __m128i vsrc[8];
    352      for (int k = 0; k < 8; k++) {
    353        vsrc[k] = _mm_loadu_si128((__m128i *)srcp);
    354        srcp += src_stride;
    355      }
    356      for (int k = 0; k < 8; k++) {
    357        __m128i vsrc0 = _mm_unpacklo_epi8(vsrc[k], vzero);
    358        __m128i vsrc1 = _mm_unpackhi_epi8(vsrc[k], vzero);
    359        v_acc_sum = _mm_add_epi16(v_acc_sum, vsrc0);
    360        v_acc_sum = _mm_add_epi16(v_acc_sum, vsrc1);
    361 
    362        __m128i vsqs0 = _mm_madd_epi16(vsrc0, vsrc0);
    363        __m128i vsqs1 = _mm_madd_epi16(vsrc1, vsrc1);
    364        v_acc_sqs = _mm_add_epi32(v_acc_sqs, vsqs0);
    365        v_acc_sqs = _mm_add_epi32(v_acc_sqs, vsqs1);
    366      }
    367 
    368      // Update total sum and clear the vectors
    369      s += mm_accumulate_epi16(v_acc_sum);
    370      ss += mm_accumulate_epi32(v_acc_sqs);
    371      v_acc_sum = vzero;
    372      v_acc_sqs = vzero;
    373    }
    374 
    375    // Process remaining rows (height not a multiple of 8)
    376    for (; j < height; j++) {
    377      __m128i vsrc = _mm_loadu_si128((__m128i *)srcp);
    378      __m128i vsrc0 = _mm_unpacklo_epi8(vsrc, vzero);
    379      __m128i vsrc1 = _mm_unpackhi_epi8(vsrc, vzero);
    380      v_acc_sum = _mm_add_epi16(v_acc_sum, vsrc0);
    381      v_acc_sum = _mm_add_epi16(v_acc_sum, vsrc1);
    382 
    383      __m128i vsqs0 = _mm_madd_epi16(vsrc0, vsrc0);
    384      __m128i vsqs1 = _mm_madd_epi16(vsrc1, vsrc1);
    385      v_acc_sqs = _mm_add_epi32(v_acc_sqs, vsqs0);
    386      v_acc_sqs = _mm_add_epi32(v_acc_sqs, vsqs1);
    387 
    388      srcp += src_stride;
    389    }
    390 
    391    // Update total sum and clear the vectors
    392    s += mm_accumulate_epi16(v_acc_sum);
    393    ss += mm_accumulate_epi32(v_acc_sqs);
    394    v_acc_sum = vzero;
    395    v_acc_sqs = vzero;
    396  }
    397 
    398  // Process the remaining area using C
    399  srcp = src;
    400  for (int k = 0; k < height; k++) {
    401    for (int m = i; m < width; m++) {
    402      uint8_t val = srcp[m];
    403      s += val;
    404      ss += val * val;
    405    }
    406    srcp += src_stride;
    407  }
    408  return (ss - s * s / (width * height));
    409 }
    410 
    411 #if CONFIG_AV1_HIGHBITDEPTH
    412 uint64_t aom_var_2d_u16_sse2(uint8_t *src, int src_stride, int width,
    413                             int height) {
    414  uint16_t *srcp1 = CONVERT_TO_SHORTPTR(src), *srcp;
    415  uint64_t s = 0, ss = 0;
    416  __m128i vzero = _mm_setzero_si128();
    417  __m128i v_acc_sum = vzero;
    418  __m128i v_acc_sqs = vzero;
    419  int i, j;
    420 
    421  // Process 8 elements in a row
    422  for (i = 0; i < width - 8; i += 8) {
    423    srcp = srcp1 + i;
    424    // Process 8 columns at a time
    425    for (j = 0; j < height - 8; j += 8) {
    426      __m128i vsrc[8];
    427      for (int k = 0; k < 8; k++) {
    428        vsrc[k] = _mm_loadu_si128((__m128i *)srcp);
    429        srcp += src_stride;
    430      }
    431      for (int k = 0; k < 8; k++) {
    432        __m128i vsrc0 = _mm_unpacklo_epi16(vsrc[k], vzero);
    433        __m128i vsrc1 = _mm_unpackhi_epi16(vsrc[k], vzero);
    434        v_acc_sum = _mm_add_epi32(vsrc0, v_acc_sum);
    435        v_acc_sum = _mm_add_epi32(vsrc1, v_acc_sum);
    436 
    437        __m128i vsqs0 = _mm_madd_epi16(vsrc[k], vsrc[k]);
    438        v_acc_sqs = _mm_add_epi32(v_acc_sqs, vsqs0);
    439      }
    440 
    441      // Update total sum and clear the vectors
    442      s += mm_accumulate_epi32(v_acc_sum);
    443      ss += mm_accumulate_epi32(v_acc_sqs);
    444      v_acc_sum = vzero;
    445      v_acc_sqs = vzero;
    446    }
    447 
    448    // Process remaining rows (height not a multiple of 8)
    449    for (; j < height; j++) {
    450      __m128i vsrc = _mm_loadu_si128((__m128i *)srcp);
    451      __m128i vsrc0 = _mm_unpacklo_epi16(vsrc, vzero);
    452      __m128i vsrc1 = _mm_unpackhi_epi16(vsrc, vzero);
    453      v_acc_sum = _mm_add_epi32(vsrc0, v_acc_sum);
    454      v_acc_sum = _mm_add_epi32(vsrc1, v_acc_sum);
    455 
    456      __m128i vsqs0 = _mm_madd_epi16(vsrc, vsrc);
    457      v_acc_sqs = _mm_add_epi32(v_acc_sqs, vsqs0);
    458      srcp += src_stride;
    459    }
    460 
    461    // Update total sum and clear the vectors
    462    s += mm_accumulate_epi32(v_acc_sum);
    463    ss += mm_accumulate_epi32(v_acc_sqs);
    464    v_acc_sum = vzero;
    465    v_acc_sqs = vzero;
    466  }
    467 
    468  // Process the remaining area using C
    469  srcp = srcp1;
    470  for (int k = 0; k < height; k++) {
    471    for (int m = i; m < width; m++) {
    472      uint16_t val = srcp[m];
    473      s += val;
    474      ss += val * val;
    475    }
    476    srcp += src_stride;
    477  }
    478  return (ss - s * s / (width * height));
    479 }
    480 #endif  // CONFIG_AV1_HIGHBITDEPTH