tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

avg_intrin_sse2.c (26055B)


      1 /*
      2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved.
      3 *
      4 * This source code is subject to the terms of the BSD 2 Clause License and
      5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
      6 * was not distributed with this source code in the LICENSE file, you can
      7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
      8 * Media Patent License 1.0 was not distributed with this source code in the
      9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
     10 */
     11 
     12 #include <immintrin.h>
     13 
     14 #include "config/aom_dsp_rtcd.h"
     15 #include "aom/aom_integer.h"
     16 #include "aom_dsp/x86/bitdepth_conversion_sse2.h"
     17 #include "aom_dsp/x86/mem_sse2.h"
     18 #include "aom_dsp/x86/synonyms.h"
     19 #include "aom_ports/mem.h"
     20 
     21 static inline void sign_extend_16bit_to_32bit_sse2(__m128i in, __m128i zero,
     22                                                   __m128i *out_lo,
     23                                                   __m128i *out_hi) {
     24  const __m128i sign_bits = _mm_cmplt_epi16(in, zero);
     25  *out_lo = _mm_unpacklo_epi16(in, sign_bits);
     26  *out_hi = _mm_unpackhi_epi16(in, sign_bits);
     27 }
     28 
     29 static inline __m128i invert_sign_32_sse2(__m128i a, __m128i sign) {
     30  a = _mm_xor_si128(a, sign);
     31  return _mm_sub_epi32(a, sign);
     32 }
     33 
     34 void aom_minmax_8x8_sse2(const uint8_t *s, int p, const uint8_t *d, int dp,
     35                         int *min, int *max) {
     36  __m128i u0, s0, d0, diff, maxabsdiff, minabsdiff, negdiff, absdiff0, absdiff;
     37  u0 = _mm_setzero_si128();
     38  // Row 0
     39  s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0);
     40  d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d)), u0);
     41  diff = _mm_subs_epi16(s0, d0);
     42  negdiff = _mm_subs_epi16(u0, diff);
     43  absdiff0 = _mm_max_epi16(diff, negdiff);
     44  // Row 1
     45  s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + p)), u0);
     46  d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + dp)), u0);
     47  diff = _mm_subs_epi16(s0, d0);
     48  negdiff = _mm_subs_epi16(u0, diff);
     49  absdiff = _mm_max_epi16(diff, negdiff);
     50  maxabsdiff = _mm_max_epi16(absdiff0, absdiff);
     51  minabsdiff = _mm_min_epi16(absdiff0, absdiff);
     52  // Row 2
     53  s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 2 * p)), u0);
     54  d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 2 * dp)), u0);
     55  diff = _mm_subs_epi16(s0, d0);
     56  negdiff = _mm_subs_epi16(u0, diff);
     57  absdiff = _mm_max_epi16(diff, negdiff);
     58  maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
     59  minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
     60  // Row 3
     61  s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 3 * p)), u0);
     62  d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 3 * dp)), u0);
     63  diff = _mm_subs_epi16(s0, d0);
     64  negdiff = _mm_subs_epi16(u0, diff);
     65  absdiff = _mm_max_epi16(diff, negdiff);
     66  maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
     67  minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
     68  // Row 4
     69  s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 4 * p)), u0);
     70  d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 4 * dp)), u0);
     71  diff = _mm_subs_epi16(s0, d0);
     72  negdiff = _mm_subs_epi16(u0, diff);
     73  absdiff = _mm_max_epi16(diff, negdiff);
     74  maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
     75  minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
     76  // Row 5
     77  s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 5 * p)), u0);
     78  d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 5 * dp)), u0);
     79  diff = _mm_subs_epi16(s0, d0);
     80  negdiff = _mm_subs_epi16(u0, diff);
     81  absdiff = _mm_max_epi16(diff, negdiff);
     82  maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
     83  minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
     84  // Row 6
     85  s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 6 * p)), u0);
     86  d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 6 * dp)), u0);
     87  diff = _mm_subs_epi16(s0, d0);
     88  negdiff = _mm_subs_epi16(u0, diff);
     89  absdiff = _mm_max_epi16(diff, negdiff);
     90  maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
     91  minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
     92  // Row 7
     93  s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 7 * p)), u0);
     94  d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d + 7 * dp)), u0);
     95  diff = _mm_subs_epi16(s0, d0);
     96  negdiff = _mm_subs_epi16(u0, diff);
     97  absdiff = _mm_max_epi16(diff, negdiff);
     98  maxabsdiff = _mm_max_epi16(maxabsdiff, absdiff);
     99  minabsdiff = _mm_min_epi16(minabsdiff, absdiff);
    100 
    101  maxabsdiff = _mm_max_epi16(maxabsdiff, _mm_srli_si128(maxabsdiff, 8));
    102  maxabsdiff = _mm_max_epi16(maxabsdiff, _mm_srli_epi64(maxabsdiff, 32));
    103  maxabsdiff = _mm_max_epi16(maxabsdiff, _mm_srli_epi64(maxabsdiff, 16));
    104  *max = _mm_extract_epi16(maxabsdiff, 0);
    105 
    106  minabsdiff = _mm_min_epi16(minabsdiff, _mm_srli_si128(minabsdiff, 8));
    107  minabsdiff = _mm_min_epi16(minabsdiff, _mm_srli_epi64(minabsdiff, 32));
    108  minabsdiff = _mm_min_epi16(minabsdiff, _mm_srli_epi64(minabsdiff, 16));
    109  *min = _mm_extract_epi16(minabsdiff, 0);
    110 }
    111 
    112 unsigned int aom_avg_8x8_sse2(const uint8_t *s, int p) {
    113  __m128i sum0, sum1, s0, s1, s2, s3, u0;
    114  unsigned int avg = 0;
    115  u0 = _mm_setzero_si128();
    116  s0 = loadh_epi64((const __m128i *)(s + p),
    117                   _mm_loadl_epi64((const __m128i *)(s)));
    118  s1 = loadh_epi64((const __m128i *)(s + 3 * p),
    119                   _mm_loadl_epi64((const __m128i *)(s + 2 * p)));
    120  s2 = loadh_epi64((const __m128i *)(s + 5 * p),
    121                   _mm_loadl_epi64((const __m128i *)(s + 4 * p)));
    122  s3 = loadh_epi64((const __m128i *)(s + 7 * p),
    123                   _mm_loadl_epi64((const __m128i *)(s + 6 * p)));
    124  s0 = _mm_sad_epu8(s0, u0);
    125  s1 = _mm_sad_epu8(s1, u0);
    126  s2 = _mm_sad_epu8(s2, u0);
    127  s3 = _mm_sad_epu8(s3, u0);
    128 
    129  sum0 = _mm_add_epi16(s0, s1);
    130  sum1 = _mm_add_epi16(s2, s3);
    131  sum0 = _mm_add_epi16(sum0, sum1);
    132  sum0 = _mm_add_epi16(sum0, _mm_srli_si128(sum0, 8));
    133  avg = _mm_cvtsi128_si32(sum0);
    134  return (avg + 32) >> 6;
    135 }
    136 
    137 static void calc_avg_8x8_dual_sse2(const uint8_t *s, int p, int *avg) {
    138  __m128i sum0, sum1, s0, s1, s2, s3, u0;
    139  u0 = _mm_setzero_si128();
    140  s0 = _mm_sad_epu8(_mm_loadu_si128((const __m128i *)(s)), u0);
    141  s1 = _mm_sad_epu8(_mm_loadu_si128((const __m128i *)(s + p)), u0);
    142  s2 = _mm_sad_epu8(_mm_loadu_si128((const __m128i *)(s + 2 * p)), u0);
    143  s3 = _mm_sad_epu8(_mm_loadu_si128((const __m128i *)(s + 3 * p)), u0);
    144  sum0 = _mm_add_epi16(s0, s1);
    145  sum1 = _mm_add_epi16(s2, s3);
    146  s0 = _mm_sad_epu8(_mm_loadu_si128((const __m128i *)(s + 4 * p)), u0);
    147  s1 = _mm_sad_epu8(_mm_loadu_si128((const __m128i *)(s + 5 * p)), u0);
    148  s2 = _mm_sad_epu8(_mm_loadu_si128((const __m128i *)(s + 6 * p)), u0);
    149  s3 = _mm_sad_epu8(_mm_loadu_si128((const __m128i *)(s + 7 * p)), u0);
    150  sum0 = _mm_add_epi16(sum0, _mm_add_epi16(s0, s1));
    151  sum1 = _mm_add_epi16(sum1, _mm_add_epi16(s2, s3));
    152  sum0 = _mm_add_epi16(sum0, sum1);
    153 
    154  // (avg + 32) >> 6
    155  __m128i rounding = _mm_set1_epi32(32);
    156  sum0 = _mm_add_epi32(sum0, rounding);
    157  sum0 = _mm_srli_epi32(sum0, 6);
    158  avg[0] = _mm_cvtsi128_si32(sum0);
    159  avg[1] = _mm_extract_epi16(sum0, 4);
    160 }
    161 
    162 void aom_avg_8x8_quad_sse2(const uint8_t *s, int p, int x16_idx, int y16_idx,
    163                           int *avg) {
    164  const uint8_t *s_ptr = s + y16_idx * p + x16_idx;
    165  for (int k = 0; k < 2; k++) {
    166    calc_avg_8x8_dual_sse2(s_ptr, p, avg + k * 2);
    167    s_ptr += 8 * p;
    168  }
    169 }
    170 
    171 unsigned int aom_avg_4x4_sse2(const uint8_t *s, int p) {
    172  __m128i s0, s1, u0;
    173  unsigned int avg = 0;
    174  u0 = _mm_setzero_si128();
    175  s0 = _mm_unpacklo_epi32(xx_loadl_32(s), xx_loadl_32(s + p));
    176  s1 = _mm_unpacklo_epi32(xx_loadl_32(s + p * 2), xx_loadl_32(s + p * 3));
    177  s0 = _mm_sad_epu8(s0, u0);
    178  s1 = _mm_sad_epu8(s1, u0);
    179  s0 = _mm_add_epi16(s0, s1);
    180  avg = _mm_cvtsi128_si32(s0);
    181  return (avg + 8) >> 4;
    182 }
    183 
    184 static inline void hadamard_col4_sse2(__m128i *in, int iter) {
    185  const __m128i a0 = in[0];
    186  const __m128i a1 = in[1];
    187  const __m128i a2 = in[2];
    188  const __m128i a3 = in[3];
    189  const __m128i b0 = _mm_srai_epi16(_mm_add_epi16(a0, a1), 1);
    190  const __m128i b1 = _mm_srai_epi16(_mm_sub_epi16(a0, a1), 1);
    191  const __m128i b2 = _mm_srai_epi16(_mm_add_epi16(a2, a3), 1);
    192  const __m128i b3 = _mm_srai_epi16(_mm_sub_epi16(a2, a3), 1);
    193  in[0] = _mm_add_epi16(b0, b2);
    194  in[1] = _mm_add_epi16(b1, b3);
    195  in[2] = _mm_sub_epi16(b0, b2);
    196  in[3] = _mm_sub_epi16(b1, b3);
    197 
    198  if (iter == 0) {
    199    const __m128i ba = _mm_unpacklo_epi16(in[0], in[1]);
    200    const __m128i dc = _mm_unpacklo_epi16(in[2], in[3]);
    201    const __m128i dcba_lo = _mm_unpacklo_epi32(ba, dc);
    202    const __m128i dcba_hi = _mm_unpackhi_epi32(ba, dc);
    203    in[0] = dcba_lo;
    204    in[1] = _mm_srli_si128(dcba_lo, 8);
    205    in[2] = dcba_hi;
    206    in[3] = _mm_srli_si128(dcba_hi, 8);
    207  }
    208 }
    209 
    210 void aom_hadamard_4x4_sse2(const int16_t *src_diff, ptrdiff_t src_stride,
    211                           tran_low_t *coeff) {
    212  __m128i src[4];
    213  src[0] = _mm_loadl_epi64((const __m128i *)src_diff);
    214  src[1] = _mm_loadl_epi64((const __m128i *)(src_diff += src_stride));
    215  src[2] = _mm_loadl_epi64((const __m128i *)(src_diff += src_stride));
    216  src[3] = _mm_loadl_epi64((const __m128i *)(src_diff + src_stride));
    217 
    218  hadamard_col4_sse2(src, 0);
    219  hadamard_col4_sse2(src, 1);
    220 
    221  store_tran_low(_mm_unpacklo_epi64(src[0], src[1]), coeff);
    222  coeff += 8;
    223  store_tran_low(_mm_unpacklo_epi64(src[2], src[3]), coeff);
    224 }
    225 
    226 static inline void hadamard_col8_sse2(__m128i *in, int iter) {
    227  __m128i a0 = in[0];
    228  __m128i a1 = in[1];
    229  __m128i a2 = in[2];
    230  __m128i a3 = in[3];
    231  __m128i a4 = in[4];
    232  __m128i a5 = in[5];
    233  __m128i a6 = in[6];
    234  __m128i a7 = in[7];
    235 
    236  __m128i b0 = _mm_add_epi16(a0, a1);
    237  __m128i b1 = _mm_sub_epi16(a0, a1);
    238  __m128i b2 = _mm_add_epi16(a2, a3);
    239  __m128i b3 = _mm_sub_epi16(a2, a3);
    240  __m128i b4 = _mm_add_epi16(a4, a5);
    241  __m128i b5 = _mm_sub_epi16(a4, a5);
    242  __m128i b6 = _mm_add_epi16(a6, a7);
    243  __m128i b7 = _mm_sub_epi16(a6, a7);
    244 
    245  a0 = _mm_add_epi16(b0, b2);
    246  a1 = _mm_add_epi16(b1, b3);
    247  a2 = _mm_sub_epi16(b0, b2);
    248  a3 = _mm_sub_epi16(b1, b3);
    249  a4 = _mm_add_epi16(b4, b6);
    250  a5 = _mm_add_epi16(b5, b7);
    251  a6 = _mm_sub_epi16(b4, b6);
    252  a7 = _mm_sub_epi16(b5, b7);
    253 
    254  if (iter == 0) {
    255    b0 = _mm_add_epi16(a0, a4);
    256    b7 = _mm_add_epi16(a1, a5);
    257    b3 = _mm_add_epi16(a2, a6);
    258    b4 = _mm_add_epi16(a3, a7);
    259    b2 = _mm_sub_epi16(a0, a4);
    260    b6 = _mm_sub_epi16(a1, a5);
    261    b1 = _mm_sub_epi16(a2, a6);
    262    b5 = _mm_sub_epi16(a3, a7);
    263 
    264    a0 = _mm_unpacklo_epi16(b0, b1);
    265    a1 = _mm_unpacklo_epi16(b2, b3);
    266    a2 = _mm_unpackhi_epi16(b0, b1);
    267    a3 = _mm_unpackhi_epi16(b2, b3);
    268    a4 = _mm_unpacklo_epi16(b4, b5);
    269    a5 = _mm_unpacklo_epi16(b6, b7);
    270    a6 = _mm_unpackhi_epi16(b4, b5);
    271    a7 = _mm_unpackhi_epi16(b6, b7);
    272 
    273    b0 = _mm_unpacklo_epi32(a0, a1);
    274    b1 = _mm_unpacklo_epi32(a4, a5);
    275    b2 = _mm_unpackhi_epi32(a0, a1);
    276    b3 = _mm_unpackhi_epi32(a4, a5);
    277    b4 = _mm_unpacklo_epi32(a2, a3);
    278    b5 = _mm_unpacklo_epi32(a6, a7);
    279    b6 = _mm_unpackhi_epi32(a2, a3);
    280    b7 = _mm_unpackhi_epi32(a6, a7);
    281 
    282    in[0] = _mm_unpacklo_epi64(b0, b1);
    283    in[1] = _mm_unpackhi_epi64(b0, b1);
    284    in[2] = _mm_unpacklo_epi64(b2, b3);
    285    in[3] = _mm_unpackhi_epi64(b2, b3);
    286    in[4] = _mm_unpacklo_epi64(b4, b5);
    287    in[5] = _mm_unpackhi_epi64(b4, b5);
    288    in[6] = _mm_unpacklo_epi64(b6, b7);
    289    in[7] = _mm_unpackhi_epi64(b6, b7);
    290  } else {
    291    in[0] = _mm_add_epi16(a0, a4);
    292    in[7] = _mm_add_epi16(a1, a5);
    293    in[3] = _mm_add_epi16(a2, a6);
    294    in[4] = _mm_add_epi16(a3, a7);
    295    in[2] = _mm_sub_epi16(a0, a4);
    296    in[6] = _mm_sub_epi16(a1, a5);
    297    in[1] = _mm_sub_epi16(a2, a6);
    298    in[5] = _mm_sub_epi16(a3, a7);
    299  }
    300 }
    301 
    302 static inline void hadamard_8x8_sse2(const int16_t *src_diff,
    303                                     ptrdiff_t src_stride, tran_low_t *coeff,
    304                                     int is_final) {
    305  __m128i src[8];
    306  src[0] = _mm_load_si128((const __m128i *)src_diff);
    307  src[1] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
    308  src[2] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
    309  src[3] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
    310  src[4] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
    311  src[5] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
    312  src[6] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
    313  src[7] = _mm_load_si128((const __m128i *)(src_diff + src_stride));
    314 
    315  hadamard_col8_sse2(src, 0);
    316  hadamard_col8_sse2(src, 1);
    317 
    318  if (is_final) {
    319    store_tran_low(src[0], coeff);
    320    coeff += 8;
    321    store_tran_low(src[1], coeff);
    322    coeff += 8;
    323    store_tran_low(src[2], coeff);
    324    coeff += 8;
    325    store_tran_low(src[3], coeff);
    326    coeff += 8;
    327    store_tran_low(src[4], coeff);
    328    coeff += 8;
    329    store_tran_low(src[5], coeff);
    330    coeff += 8;
    331    store_tran_low(src[6], coeff);
    332    coeff += 8;
    333    store_tran_low(src[7], coeff);
    334  } else {
    335    int16_t *coeff16 = (int16_t *)coeff;
    336    _mm_store_si128((__m128i *)coeff16, src[0]);
    337    coeff16 += 8;
    338    _mm_store_si128((__m128i *)coeff16, src[1]);
    339    coeff16 += 8;
    340    _mm_store_si128((__m128i *)coeff16, src[2]);
    341    coeff16 += 8;
    342    _mm_store_si128((__m128i *)coeff16, src[3]);
    343    coeff16 += 8;
    344    _mm_store_si128((__m128i *)coeff16, src[4]);
    345    coeff16 += 8;
    346    _mm_store_si128((__m128i *)coeff16, src[5]);
    347    coeff16 += 8;
    348    _mm_store_si128((__m128i *)coeff16, src[6]);
    349    coeff16 += 8;
    350    _mm_store_si128((__m128i *)coeff16, src[7]);
    351  }
    352 }
    353 
    354 void aom_hadamard_8x8_sse2(const int16_t *src_diff, ptrdiff_t src_stride,
    355                           tran_low_t *coeff) {
    356  hadamard_8x8_sse2(src_diff, src_stride, coeff, 1);
    357 }
    358 
    359 static inline void hadamard_lp_8x8_sse2(const int16_t *src_diff,
    360                                        ptrdiff_t src_stride, int16_t *coeff) {
    361  __m128i src[8];
    362  src[0] = _mm_load_si128((const __m128i *)src_diff);
    363  src[1] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
    364  src[2] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
    365  src[3] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
    366  src[4] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
    367  src[5] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
    368  src[6] = _mm_load_si128((const __m128i *)(src_diff += src_stride));
    369  src[7] = _mm_load_si128((const __m128i *)(src_diff + src_stride));
    370 
    371  hadamard_col8_sse2(src, 0);
    372  hadamard_col8_sse2(src, 1);
    373 
    374  _mm_store_si128((__m128i *)coeff, src[0]);
    375  coeff += 8;
    376  _mm_store_si128((__m128i *)coeff, src[1]);
    377  coeff += 8;
    378  _mm_store_si128((__m128i *)coeff, src[2]);
    379  coeff += 8;
    380  _mm_store_si128((__m128i *)coeff, src[3]);
    381  coeff += 8;
    382  _mm_store_si128((__m128i *)coeff, src[4]);
    383  coeff += 8;
    384  _mm_store_si128((__m128i *)coeff, src[5]);
    385  coeff += 8;
    386  _mm_store_si128((__m128i *)coeff, src[6]);
    387  coeff += 8;
    388  _mm_store_si128((__m128i *)coeff, src[7]);
    389 }
    390 
    391 void aom_hadamard_lp_8x8_sse2(const int16_t *src_diff, ptrdiff_t src_stride,
    392                              int16_t *coeff) {
    393  hadamard_lp_8x8_sse2(src_diff, src_stride, coeff);
    394 }
    395 
    396 void aom_hadamard_lp_8x8_dual_sse2(const int16_t *src_diff,
    397                                   ptrdiff_t src_stride, int16_t *coeff) {
    398  for (int i = 0; i < 2; i++) {
    399    hadamard_lp_8x8_sse2(src_diff + (i * 8), src_stride, coeff + (i * 64));
    400  }
    401 }
    402 
    403 void aom_hadamard_lp_16x16_sse2(const int16_t *src_diff, ptrdiff_t src_stride,
    404                                int16_t *coeff) {
    405  for (int idx = 0; idx < 4; ++idx) {
    406    const int16_t *src_ptr =
    407        src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8;
    408    hadamard_lp_8x8_sse2(src_ptr, src_stride, coeff + idx * 64);
    409  }
    410 
    411  int16_t *t_coeff = coeff;
    412  for (int idx = 0; idx < 64; idx += 8) {
    413    __m128i coeff0 = _mm_load_si128((const __m128i *)t_coeff);
    414    __m128i coeff1 = _mm_load_si128((const __m128i *)(t_coeff + 64));
    415    __m128i coeff2 = _mm_load_si128((const __m128i *)(t_coeff + 128));
    416    __m128i coeff3 = _mm_load_si128((const __m128i *)(t_coeff + 192));
    417 
    418    __m128i b0 = _mm_add_epi16(coeff0, coeff1);
    419    __m128i b1 = _mm_sub_epi16(coeff0, coeff1);
    420    __m128i b2 = _mm_add_epi16(coeff2, coeff3);
    421    __m128i b3 = _mm_sub_epi16(coeff2, coeff3);
    422 
    423    b0 = _mm_srai_epi16(b0, 1);
    424    b1 = _mm_srai_epi16(b1, 1);
    425    b2 = _mm_srai_epi16(b2, 1);
    426    b3 = _mm_srai_epi16(b3, 1);
    427 
    428    coeff0 = _mm_add_epi16(b0, b2);
    429    coeff1 = _mm_add_epi16(b1, b3);
    430    coeff2 = _mm_sub_epi16(b0, b2);
    431    coeff3 = _mm_sub_epi16(b1, b3);
    432 
    433    _mm_store_si128((__m128i *)t_coeff, coeff0);
    434    _mm_store_si128((__m128i *)(t_coeff + 64), coeff1);
    435    _mm_store_si128((__m128i *)(t_coeff + 128), coeff2);
    436    _mm_store_si128((__m128i *)(t_coeff + 192), coeff3);
    437 
    438    t_coeff += 8;
    439  }
    440 }
    441 
    442 static inline void hadamard_16x16_sse2(const int16_t *src_diff,
    443                                       ptrdiff_t src_stride, tran_low_t *coeff,
    444                                       int is_final) {
    445  // For high bitdepths, it is unnecessary to store_tran_low
    446  // (mult/unpack/store), then load_tran_low (load/pack) the same memory in the
    447  // next stage.  Output to an intermediate buffer first, then store_tran_low()
    448  // in the final stage.
    449  DECLARE_ALIGNED(32, int16_t, temp_coeff[16 * 16]);
    450  int16_t *t_coeff = temp_coeff;
    451  int16_t *coeff16 = (int16_t *)coeff;
    452  int idx;
    453  for (idx = 0; idx < 4; ++idx) {
    454    const int16_t *src_ptr =
    455        src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8;
    456    hadamard_8x8_sse2(src_ptr, src_stride, (tran_low_t *)(t_coeff + idx * 64),
    457                      0);
    458  }
    459 
    460  for (idx = 0; idx < 64; idx += 8) {
    461    __m128i coeff0 = _mm_load_si128((const __m128i *)t_coeff);
    462    __m128i coeff1 = _mm_load_si128((const __m128i *)(t_coeff + 64));
    463    __m128i coeff2 = _mm_load_si128((const __m128i *)(t_coeff + 128));
    464    __m128i coeff3 = _mm_load_si128((const __m128i *)(t_coeff + 192));
    465 
    466    __m128i b0 = _mm_add_epi16(coeff0, coeff1);
    467    __m128i b1 = _mm_sub_epi16(coeff0, coeff1);
    468    __m128i b2 = _mm_add_epi16(coeff2, coeff3);
    469    __m128i b3 = _mm_sub_epi16(coeff2, coeff3);
    470 
    471    b0 = _mm_srai_epi16(b0, 1);
    472    b1 = _mm_srai_epi16(b1, 1);
    473    b2 = _mm_srai_epi16(b2, 1);
    474    b3 = _mm_srai_epi16(b3, 1);
    475 
    476    coeff0 = _mm_add_epi16(b0, b2);
    477    coeff1 = _mm_add_epi16(b1, b3);
    478    coeff2 = _mm_sub_epi16(b0, b2);
    479    coeff3 = _mm_sub_epi16(b1, b3);
    480 
    481    if (is_final) {
    482      store_tran_low_offset_4(coeff0, coeff);
    483      store_tran_low_offset_4(coeff1, coeff + 64);
    484      store_tran_low_offset_4(coeff2, coeff + 128);
    485      store_tran_low_offset_4(coeff3, coeff + 192);
    486      coeff += 4;
    487    } else {
    488      _mm_store_si128((__m128i *)coeff16, coeff0);
    489      _mm_store_si128((__m128i *)(coeff16 + 64), coeff1);
    490      _mm_store_si128((__m128i *)(coeff16 + 128), coeff2);
    491      _mm_store_si128((__m128i *)(coeff16 + 192), coeff3);
    492      coeff16 += 8;
    493    }
    494 
    495    t_coeff += 8;
    496    // Increment the pointer additionally by 0 and 8 in alternate
    497    // iterations(instead of 8) to ensure the coherency with the implementation
    498    // of store_tran_low_offset_4()
    499    coeff += (((idx >> 3) & 1) << 3);
    500  }
    501 }
    502 
    503 void aom_hadamard_16x16_sse2(const int16_t *src_diff, ptrdiff_t src_stride,
    504                             tran_low_t *coeff) {
    505  hadamard_16x16_sse2(src_diff, src_stride, coeff, 1);
    506 }
    507 
    508 void aom_hadamard_32x32_sse2(const int16_t *src_diff, ptrdiff_t src_stride,
    509                             tran_low_t *coeff) {
    510  // For high bitdepths, it is unnecessary to store_tran_low
    511  // (mult/unpack/store), then load_tran_low (load/pack) the same memory in the
    512  // next stage.  Output to an intermediate buffer first, then store_tran_low()
    513  // in the final stage.
    514  DECLARE_ALIGNED(32, int16_t, temp_coeff[32 * 32]);
    515  int16_t *t_coeff = temp_coeff;
    516  int idx;
    517  __m128i coeff0_lo, coeff1_lo, coeff2_lo, coeff3_lo, b0_lo, b1_lo, b2_lo,
    518      b3_lo;
    519  __m128i coeff0_hi, coeff1_hi, coeff2_hi, coeff3_hi, b0_hi, b1_hi, b2_hi,
    520      b3_hi;
    521  __m128i b0, b1, b2, b3;
    522  const __m128i zero = _mm_setzero_si128();
    523  for (idx = 0; idx < 4; ++idx) {
    524    const int16_t *src_ptr =
    525        src_diff + (idx >> 1) * 16 * src_stride + (idx & 0x01) * 16;
    526    hadamard_16x16_sse2(src_ptr, src_stride,
    527                        (tran_low_t *)(t_coeff + idx * 256), 0);
    528  }
    529 
    530  for (idx = 0; idx < 256; idx += 8) {
    531    __m128i coeff0 = _mm_load_si128((const __m128i *)t_coeff);
    532    __m128i coeff1 = _mm_load_si128((const __m128i *)(t_coeff + 256));
    533    __m128i coeff2 = _mm_load_si128((const __m128i *)(t_coeff + 512));
    534    __m128i coeff3 = _mm_load_si128((const __m128i *)(t_coeff + 768));
    535 
    536    // Sign extend 16 bit to 32 bit.
    537    sign_extend_16bit_to_32bit_sse2(coeff0, zero, &coeff0_lo, &coeff0_hi);
    538    sign_extend_16bit_to_32bit_sse2(coeff1, zero, &coeff1_lo, &coeff1_hi);
    539    sign_extend_16bit_to_32bit_sse2(coeff2, zero, &coeff2_lo, &coeff2_hi);
    540    sign_extend_16bit_to_32bit_sse2(coeff3, zero, &coeff3_lo, &coeff3_hi);
    541 
    542    b0_lo = _mm_add_epi32(coeff0_lo, coeff1_lo);
    543    b0_hi = _mm_add_epi32(coeff0_hi, coeff1_hi);
    544 
    545    b1_lo = _mm_sub_epi32(coeff0_lo, coeff1_lo);
    546    b1_hi = _mm_sub_epi32(coeff0_hi, coeff1_hi);
    547 
    548    b2_lo = _mm_add_epi32(coeff2_lo, coeff3_lo);
    549    b2_hi = _mm_add_epi32(coeff2_hi, coeff3_hi);
    550 
    551    b3_lo = _mm_sub_epi32(coeff2_lo, coeff3_lo);
    552    b3_hi = _mm_sub_epi32(coeff2_hi, coeff3_hi);
    553 
    554    b0_lo = _mm_srai_epi32(b0_lo, 2);
    555    b1_lo = _mm_srai_epi32(b1_lo, 2);
    556    b2_lo = _mm_srai_epi32(b2_lo, 2);
    557    b3_lo = _mm_srai_epi32(b3_lo, 2);
    558 
    559    b0_hi = _mm_srai_epi32(b0_hi, 2);
    560    b1_hi = _mm_srai_epi32(b1_hi, 2);
    561    b2_hi = _mm_srai_epi32(b2_hi, 2);
    562    b3_hi = _mm_srai_epi32(b3_hi, 2);
    563 
    564    b0 = _mm_packs_epi32(b0_lo, b0_hi);
    565    b1 = _mm_packs_epi32(b1_lo, b1_hi);
    566    b2 = _mm_packs_epi32(b2_lo, b2_hi);
    567    b3 = _mm_packs_epi32(b3_lo, b3_hi);
    568 
    569    coeff0 = _mm_add_epi16(b0, b2);
    570    coeff1 = _mm_add_epi16(b1, b3);
    571    store_tran_low_offset_4(coeff0, coeff);
    572    store_tran_low_offset_4(coeff1, coeff + 256);
    573 
    574    coeff2 = _mm_sub_epi16(b0, b2);
    575    coeff3 = _mm_sub_epi16(b1, b3);
    576    store_tran_low_offset_4(coeff2, coeff + 512);
    577    store_tran_low_offset_4(coeff3, coeff + 768);
    578 
    579    // Increment the pointer by 4 and 12 in alternate iterations(instead of 8)
    580    // to ensure the coherency with the implementation of
    581    // store_tran_low_offset_4()
    582    coeff += (4 + (((idx >> 3) & 1) << 3));
    583    t_coeff += 8;
    584  }
    585 }
    586 
    587 int aom_satd_sse2(const tran_low_t *coeff, int length) {
    588  int i;
    589  const __m128i zero = _mm_setzero_si128();
    590  __m128i accum = zero;
    591 
    592  for (i = 0; i < length; i += 4) {
    593    const __m128i src_line = _mm_load_si128((const __m128i *)coeff);
    594    const __m128i coeff_sign = _mm_srai_epi32(src_line, 31);
    595    const __m128i abs_coeff = invert_sign_32_sse2(src_line, coeff_sign);
    596    accum = _mm_add_epi32(accum, abs_coeff);
    597    coeff += 4;
    598  }
    599 
    600  {  // cascading summation of accum
    601    __m128i hi = _mm_srli_si128(accum, 8);
    602    accum = _mm_add_epi32(accum, hi);
    603    hi = _mm_srli_epi64(accum, 32);
    604    accum = _mm_add_epi32(accum, hi);
    605  }
    606 
    607  return _mm_cvtsi128_si32(accum);
    608 }
    609 
    610 int aom_satd_lp_sse2(const int16_t *coeff, int length) {
    611  const __m128i zero = _mm_setzero_si128();
    612  const __m128i one = _mm_set1_epi16(1);
    613  __m128i accum = zero;
    614 
    615  for (int i = 0; i < length; i += 16) {
    616    const __m128i src_line0 = _mm_loadu_si128((const __m128i *)coeff);
    617    const __m128i src_line1 = _mm_loadu_si128((const __m128i *)(coeff + 8));
    618    const __m128i inv0 = _mm_sub_epi16(zero, src_line0);
    619    const __m128i inv1 = _mm_sub_epi16(zero, src_line1);
    620    const __m128i abs0 = _mm_max_epi16(src_line0, inv0);  // abs(src_line)
    621    const __m128i abs1 = _mm_max_epi16(src_line1, inv1);  // abs(src_line)
    622    const __m128i sum0 = _mm_madd_epi16(abs0, one);
    623    const __m128i sum1 = _mm_madd_epi16(abs1, one);
    624    accum = _mm_add_epi32(accum, sum0);
    625    accum = _mm_add_epi32(accum, sum1);
    626    coeff += 16;
    627  }
    628 
    629  {  // cascading summation of accum
    630    __m128i hi = _mm_srli_si128(accum, 8);
    631    accum = _mm_add_epi32(accum, hi);
    632    hi = _mm_srli_epi64(accum, 32);
    633    accum = _mm_add_epi32(accum, hi);
    634  }
    635 
    636  return _mm_cvtsi128_si32(accum);
    637 }
    638 
    639 void aom_int_pro_row_sse2(int16_t *hbuf, const uint8_t *ref,
    640                          const int ref_stride, const int width,
    641                          const int height, int norm_factor) {
    642  // SIMD implementation assumes width and height to be multiple of 16 and 2
    643  // respectively. For any odd width or height, SIMD support needs to be added.
    644  assert(width % 16 == 0 && height % 2 == 0);
    645  __m128i zero = _mm_setzero_si128();
    646 
    647  for (int wd = 0; wd < width; wd += 16) {
    648    const uint8_t *ref_tmp = ref + wd;
    649    int16_t *hbuf_tmp = hbuf + wd;
    650    __m128i s0 = zero;
    651    __m128i s1 = zero;
    652    int idx = 0;
    653    do {
    654      __m128i src_line = _mm_loadu_si128((const __m128i *)ref_tmp);
    655      __m128i t0 = _mm_unpacklo_epi8(src_line, zero);
    656      __m128i t1 = _mm_unpackhi_epi8(src_line, zero);
    657      s0 = _mm_add_epi16(s0, t0);
    658      s1 = _mm_add_epi16(s1, t1);
    659      ref_tmp += ref_stride;
    660 
    661      src_line = _mm_loadu_si128((const __m128i *)ref_tmp);
    662      t0 = _mm_unpacklo_epi8(src_line, zero);
    663      t1 = _mm_unpackhi_epi8(src_line, zero);
    664      s0 = _mm_add_epi16(s0, t0);
    665      s1 = _mm_add_epi16(s1, t1);
    666      ref_tmp += ref_stride;
    667      idx += 2;
    668    } while (idx < height);
    669 
    670    s0 = _mm_srai_epi16(s0, norm_factor);
    671    s1 = _mm_srai_epi16(s1, norm_factor);
    672    _mm_storeu_si128((__m128i *)(hbuf_tmp), s0);
    673    _mm_storeu_si128((__m128i *)(hbuf_tmp + 8), s1);
    674  }
    675 }
    676 
    677 void aom_int_pro_col_sse2(int16_t *vbuf, const uint8_t *ref,
    678                          const int ref_stride, const int width,
    679                          const int height, int norm_factor) {
    680  // SIMD implementation assumes width to be multiple of 16.
    681  assert(width % 16 == 0);
    682 
    683  for (int ht = 0; ht < height; ht++) {
    684    const uint8_t *ref_tmp = ref + (ht * ref_stride);
    685    __m128i zero = _mm_setzero_si128();
    686    __m128i s0 = zero;
    687    __m128i s1, src_line;
    688    for (int i = 0; i < width; i += 16) {
    689      src_line = _mm_loadu_si128((const __m128i *)ref_tmp);
    690      s1 = _mm_sad_epu8(src_line, zero);
    691      s0 = _mm_add_epi16(s0, s1);
    692      ref_tmp += 16;
    693    }
    694 
    695    s1 = _mm_srli_si128(s0, 8);
    696    s0 = _mm_add_epi16(s0, s1);
    697    vbuf[ht] = _mm_cvtsi128_si32(s0) >> norm_factor;
    698  }
    699 }