tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

dec_sse2.c (47956B)


      1 // Copyright 2011 Google Inc. All Rights Reserved.
      2 //
      3 // Use of this source code is governed by a BSD-style license
      4 // that can be found in the COPYING file in the root of the source
      5 // tree. An additional intellectual property rights grant can be found
      6 // in the file PATENTS. All contributing project authors may
      7 // be found in the AUTHORS file in the root of the source tree.
      8 // -----------------------------------------------------------------------------
      9 //
     10 // SSE2 version of some decoding functions (idct, loop filtering).
     11 //
     12 // Author: somnath@google.com (Somnath Banerjee)
     13 //         cduvivier@google.com (Christian Duvivier)
     14 
     15 #include "src/dsp/dsp.h"
     16 
     17 #if defined(WEBP_USE_SSE2)
     18 
     19 // The 3-coeff sparse transform in SSE2 is not really faster than the plain-C
     20 // one it seems => disable it by default. Uncomment the following to enable:
     21 #if !defined(USE_TRANSFORM_AC3)
     22 #define USE_TRANSFORM_AC3 0   // ALTERNATE_CODE
     23 #endif
     24 
     25 #include <emmintrin.h>
     26 
     27 #include "src/dec/vp8i_dec.h"
     28 #include "src/dsp/common_sse2.h"
     29 #include "src/dsp/cpu.h"
     30 #include "src/utils/utils.h"
     31 #include "src/webp/types.h"
     32 
     33 //------------------------------------------------------------------------------
     34 // Transforms (Paragraph 14.4)
     35 
     36 static void Transform_SSE2(const int16_t* WEBP_RESTRICT in,
     37                           uint8_t* WEBP_RESTRICT dst, int do_two) {
     38  // This implementation makes use of 16-bit fixed point versions of two
     39  // multiply constants:
     40  //    K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16
     41  //    K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16
     42  //
     43  // To be able to use signed 16-bit integers, we use the following trick to
     44  // have constants within range:
     45  // - Associated constants are obtained by subtracting the 16-bit fixed point
     46  //   version of one:
     47  //      k = K - (1 << 16)  =>  K = k + (1 << 16)
     48  //      K1 = 85267  =>  k1 =  20091
     49  //      K2 = 35468  =>  k2 = -30068
     50  // - The multiplication of a variable by a constant become the sum of the
     51  //   variable and the multiplication of that variable by the associated
     52  //   constant:
     53  //      (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x
     54  const __m128i k1 = _mm_set1_epi16(20091);
     55  const __m128i k2 = _mm_set1_epi16(-30068);
     56  __m128i T0, T1, T2, T3;
     57 
     58  // Load and concatenate the transform coefficients (we'll do two transforms
     59  // in parallel). In the case of only one transform, the second half of the
     60  // vectors will just contain random value we'll never use nor store.
     61  __m128i in0, in1, in2, in3;
     62  {
     63    in0 = _mm_loadl_epi64((const __m128i*)&in[0]);
     64    in1 = _mm_loadl_epi64((const __m128i*)&in[4]);
     65    in2 = _mm_loadl_epi64((const __m128i*)&in[8]);
     66    in3 = _mm_loadl_epi64((const __m128i*)&in[12]);
     67    // a00 a10 a20 a30   x x x x
     68    // a01 a11 a21 a31   x x x x
     69    // a02 a12 a22 a32   x x x x
     70    // a03 a13 a23 a33   x x x x
     71    if (do_two) {
     72      const __m128i inB0 = _mm_loadl_epi64((const __m128i*)&in[16]);
     73      const __m128i inB1 = _mm_loadl_epi64((const __m128i*)&in[20]);
     74      const __m128i inB2 = _mm_loadl_epi64((const __m128i*)&in[24]);
     75      const __m128i inB3 = _mm_loadl_epi64((const __m128i*)&in[28]);
     76      in0 = _mm_unpacklo_epi64(in0, inB0);
     77      in1 = _mm_unpacklo_epi64(in1, inB1);
     78      in2 = _mm_unpacklo_epi64(in2, inB2);
     79      in3 = _mm_unpacklo_epi64(in3, inB3);
     80      // a00 a10 a20 a30   b00 b10 b20 b30
     81      // a01 a11 a21 a31   b01 b11 b21 b31
     82      // a02 a12 a22 a32   b02 b12 b22 b32
     83      // a03 a13 a23 a33   b03 b13 b23 b33
     84    }
     85  }
     86 
     87  // Vertical pass and subsequent transpose.
     88  {
     89    // First pass, c and d calculations are longer because of the "trick"
     90    // multiplications.
     91    const __m128i a = _mm_add_epi16(in0, in2);
     92    const __m128i b = _mm_sub_epi16(in0, in2);
     93    // c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3
     94    const __m128i c1 = _mm_mulhi_epi16(in1, k2);
     95    const __m128i c2 = _mm_mulhi_epi16(in3, k1);
     96    const __m128i c3 = _mm_sub_epi16(in1, in3);
     97    const __m128i c4 = _mm_sub_epi16(c1, c2);
     98    const __m128i c = _mm_add_epi16(c3, c4);
     99    // d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3
    100    const __m128i d1 = _mm_mulhi_epi16(in1, k1);
    101    const __m128i d2 = _mm_mulhi_epi16(in3, k2);
    102    const __m128i d3 = _mm_add_epi16(in1, in3);
    103    const __m128i d4 = _mm_add_epi16(d1, d2);
    104    const __m128i d = _mm_add_epi16(d3, d4);
    105 
    106    // Second pass.
    107    const __m128i tmp0 = _mm_add_epi16(a, d);
    108    const __m128i tmp1 = _mm_add_epi16(b, c);
    109    const __m128i tmp2 = _mm_sub_epi16(b, c);
    110    const __m128i tmp3 = _mm_sub_epi16(a, d);
    111 
    112    // Transpose the two 4x4.
    113    VP8Transpose_2_4x4_16b(&tmp0, &tmp1, &tmp2, &tmp3, &T0, &T1, &T2, &T3);
    114  }
    115 
    116  // Horizontal pass and subsequent transpose.
    117  {
    118    // First pass, c and d calculations are longer because of the "trick"
    119    // multiplications.
    120    const __m128i four = _mm_set1_epi16(4);
    121    const __m128i dc = _mm_add_epi16(T0, four);
    122    const __m128i a =  _mm_add_epi16(dc, T2);
    123    const __m128i b =  _mm_sub_epi16(dc, T2);
    124    // c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3
    125    const __m128i c1 = _mm_mulhi_epi16(T1, k2);
    126    const __m128i c2 = _mm_mulhi_epi16(T3, k1);
    127    const __m128i c3 = _mm_sub_epi16(T1, T3);
    128    const __m128i c4 = _mm_sub_epi16(c1, c2);
    129    const __m128i c = _mm_add_epi16(c3, c4);
    130    // d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3
    131    const __m128i d1 = _mm_mulhi_epi16(T1, k1);
    132    const __m128i d2 = _mm_mulhi_epi16(T3, k2);
    133    const __m128i d3 = _mm_add_epi16(T1, T3);
    134    const __m128i d4 = _mm_add_epi16(d1, d2);
    135    const __m128i d = _mm_add_epi16(d3, d4);
    136 
    137    // Second pass.
    138    const __m128i tmp0 = _mm_add_epi16(a, d);
    139    const __m128i tmp1 = _mm_add_epi16(b, c);
    140    const __m128i tmp2 = _mm_sub_epi16(b, c);
    141    const __m128i tmp3 = _mm_sub_epi16(a, d);
    142    const __m128i shifted0 = _mm_srai_epi16(tmp0, 3);
    143    const __m128i shifted1 = _mm_srai_epi16(tmp1, 3);
    144    const __m128i shifted2 = _mm_srai_epi16(tmp2, 3);
    145    const __m128i shifted3 = _mm_srai_epi16(tmp3, 3);
    146 
    147    // Transpose the two 4x4.
    148    VP8Transpose_2_4x4_16b(&shifted0, &shifted1, &shifted2, &shifted3, &T0, &T1,
    149                           &T2, &T3);
    150  }
    151 
    152  // Add inverse transform to 'dst' and store.
    153  {
    154    const __m128i zero = _mm_setzero_si128();
    155    // Load the reference(s).
    156    __m128i dst0, dst1, dst2, dst3;
    157    if (do_two) {
    158      // Load eight bytes/pixels per line.
    159      dst0 = _mm_loadl_epi64((__m128i*)(dst + 0 * BPS));
    160      dst1 = _mm_loadl_epi64((__m128i*)(dst + 1 * BPS));
    161      dst2 = _mm_loadl_epi64((__m128i*)(dst + 2 * BPS));
    162      dst3 = _mm_loadl_epi64((__m128i*)(dst + 3 * BPS));
    163    } else {
    164      // Load four bytes/pixels per line.
    165      dst0 = _mm_cvtsi32_si128(WebPMemToInt32(dst + 0 * BPS));
    166      dst1 = _mm_cvtsi32_si128(WebPMemToInt32(dst + 1 * BPS));
    167      dst2 = _mm_cvtsi32_si128(WebPMemToInt32(dst + 2 * BPS));
    168      dst3 = _mm_cvtsi32_si128(WebPMemToInt32(dst + 3 * BPS));
    169    }
    170    // Convert to 16b.
    171    dst0 = _mm_unpacklo_epi8(dst0, zero);
    172    dst1 = _mm_unpacklo_epi8(dst1, zero);
    173    dst2 = _mm_unpacklo_epi8(dst2, zero);
    174    dst3 = _mm_unpacklo_epi8(dst3, zero);
    175    // Add the inverse transform(s).
    176    dst0 = _mm_add_epi16(dst0, T0);
    177    dst1 = _mm_add_epi16(dst1, T1);
    178    dst2 = _mm_add_epi16(dst2, T2);
    179    dst3 = _mm_add_epi16(dst3, T3);
    180    // Unsigned saturate to 8b.
    181    dst0 = _mm_packus_epi16(dst0, dst0);
    182    dst1 = _mm_packus_epi16(dst1, dst1);
    183    dst2 = _mm_packus_epi16(dst2, dst2);
    184    dst3 = _mm_packus_epi16(dst3, dst3);
    185    // Store the results.
    186    if (do_two) {
    187      // Store eight bytes/pixels per line.
    188      _mm_storel_epi64((__m128i*)(dst + 0 * BPS), dst0);
    189      _mm_storel_epi64((__m128i*)(dst + 1 * BPS), dst1);
    190      _mm_storel_epi64((__m128i*)(dst + 2 * BPS), dst2);
    191      _mm_storel_epi64((__m128i*)(dst + 3 * BPS), dst3);
    192    } else {
    193      // Store four bytes/pixels per line.
    194      WebPInt32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(dst0));
    195      WebPInt32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(dst1));
    196      WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(dst2));
    197      WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(dst3));
    198    }
    199  }
    200 }
    201 
    202 #if (USE_TRANSFORM_AC3 == 1)
    203 
    204 static void TransformAC3_SSE2(const int16_t* WEBP_RESTRICT in,
    205                              uint8_t* WEBP_RESTRICT dst) {
    206  const __m128i A = _mm_set1_epi16(in[0] + 4);
    207  const __m128i c4 = _mm_set1_epi16(WEBP_TRANSFORM_AC3_MUL2(in[4]));
    208  const __m128i d4 = _mm_set1_epi16(WEBP_TRANSFORM_AC3_MUL1(in[4]));
    209  const int c1 = WEBP_TRANSFORM_AC3_MUL2(in[1]);
    210  const int d1 = WEBP_TRANSFORM_AC3_MUL1(in[1]);
    211  const __m128i CD = _mm_set_epi16(0, 0, 0, 0, -d1, -c1, c1, d1);
    212  const __m128i B = _mm_adds_epi16(A, CD);
    213  const __m128i m0 = _mm_adds_epi16(B, d4);
    214  const __m128i m1 = _mm_adds_epi16(B, c4);
    215  const __m128i m2 = _mm_subs_epi16(B, c4);
    216  const __m128i m3 = _mm_subs_epi16(B, d4);
    217  const __m128i zero = _mm_setzero_si128();
    218  // Load the source pixels.
    219  __m128i dst0 = _mm_cvtsi32_si128(WebPMemToInt32(dst + 0 * BPS));
    220  __m128i dst1 = _mm_cvtsi32_si128(WebPMemToInt32(dst + 1 * BPS));
    221  __m128i dst2 = _mm_cvtsi32_si128(WebPMemToInt32(dst + 2 * BPS));
    222  __m128i dst3 = _mm_cvtsi32_si128(WebPMemToInt32(dst + 3 * BPS));
    223  // Convert to 16b.
    224  dst0 = _mm_unpacklo_epi8(dst0, zero);
    225  dst1 = _mm_unpacklo_epi8(dst1, zero);
    226  dst2 = _mm_unpacklo_epi8(dst2, zero);
    227  dst3 = _mm_unpacklo_epi8(dst3, zero);
    228  // Add the inverse transform.
    229  dst0 = _mm_adds_epi16(dst0, _mm_srai_epi16(m0, 3));
    230  dst1 = _mm_adds_epi16(dst1, _mm_srai_epi16(m1, 3));
    231  dst2 = _mm_adds_epi16(dst2, _mm_srai_epi16(m2, 3));
    232  dst3 = _mm_adds_epi16(dst3, _mm_srai_epi16(m3, 3));
    233  // Unsigned saturate to 8b.
    234  dst0 = _mm_packus_epi16(dst0, dst0);
    235  dst1 = _mm_packus_epi16(dst1, dst1);
    236  dst2 = _mm_packus_epi16(dst2, dst2);
    237  dst3 = _mm_packus_epi16(dst3, dst3);
    238  // Store the results.
    239  WebPInt32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(dst0));
    240  WebPInt32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(dst1));
    241  WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(dst2));
    242  WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(dst3));
    243 }
    244 
    245 #endif   // USE_TRANSFORM_AC3
    246 
    247 //------------------------------------------------------------------------------
    248 // Loop Filter (Paragraph 15)
    249 
    250 // Compute abs(p - q) = subs(p - q) OR subs(q - p)
    251 #define MM_ABS(p, q)  _mm_or_si128(                                            \
    252    _mm_subs_epu8((q), (p)),                                                   \
    253    _mm_subs_epu8((p), (q)))
    254 
    255 // Shift each byte of "x" by 3 bits while preserving by the sign bit.
    256 static WEBP_INLINE void SignedShift8b_SSE2(__m128i* const x) {
    257  const __m128i zero = _mm_setzero_si128();
    258  const __m128i lo_0 = _mm_unpacklo_epi8(zero, *x);
    259  const __m128i hi_0 = _mm_unpackhi_epi8(zero, *x);
    260  const __m128i lo_1 = _mm_srai_epi16(lo_0, 3 + 8);
    261  const __m128i hi_1 = _mm_srai_epi16(hi_0, 3 + 8);
    262  *x = _mm_packs_epi16(lo_1, hi_1);
    263 }
    264 
    265 #define FLIP_SIGN_BIT2(a, b) do {                                              \
    266  (a) = _mm_xor_si128(a, sign_bit);                                            \
    267  (b) = _mm_xor_si128(b, sign_bit);                                            \
    268 } while (0)
    269 
    270 #define FLIP_SIGN_BIT4(a, b, c, d) do {                                        \
    271  FLIP_SIGN_BIT2(a, b);                                                        \
    272  FLIP_SIGN_BIT2(c, d);                                                        \
    273 } while (0)
    274 
    275 // input/output is uint8_t
    276 static WEBP_INLINE void GetNotHEV_SSE2(const __m128i* const p1,
    277                                       const __m128i* const p0,
    278                                       const __m128i* const q0,
    279                                       const __m128i* const q1,
    280                                       int hev_thresh, __m128i* const not_hev) {
    281  const __m128i zero = _mm_setzero_si128();
    282  const __m128i t_1 = MM_ABS(*p1, *p0);
    283  const __m128i t_2 = MM_ABS(*q1, *q0);
    284 
    285  const __m128i h = _mm_set1_epi8(hev_thresh);
    286  const __m128i t_max = _mm_max_epu8(t_1, t_2);
    287 
    288  const __m128i t_max_h = _mm_subs_epu8(t_max, h);
    289  *not_hev = _mm_cmpeq_epi8(t_max_h, zero);  // not_hev <= t1 && not_hev <= t2
    290 }
    291 
    292 // input pixels are int8_t
    293 static WEBP_INLINE void GetBaseDelta_SSE2(const __m128i* const p1,
    294                                          const __m128i* const p0,
    295                                          const __m128i* const q0,
    296                                          const __m128i* const q1,
    297                                          __m128i* const delta) {
    298  // beware of addition order, for saturation!
    299  const __m128i p1_q1 = _mm_subs_epi8(*p1, *q1);   // p1 - q1
    300  const __m128i q0_p0 = _mm_subs_epi8(*q0, *p0);   // q0 - p0
    301  const __m128i s1 = _mm_adds_epi8(p1_q1, q0_p0);  // p1 - q1 + 1 * (q0 - p0)
    302  const __m128i s2 = _mm_adds_epi8(q0_p0, s1);     // p1 - q1 + 2 * (q0 - p0)
    303  const __m128i s3 = _mm_adds_epi8(q0_p0, s2);     // p1 - q1 + 3 * (q0 - p0)
    304  *delta = s3;
    305 }
    306 
    307 // input and output are int8_t
    308 static WEBP_INLINE void DoSimpleFilter_SSE2(__m128i* const p0,
    309                                            __m128i* const q0,
    310                                            const __m128i* const fl) {
    311  const __m128i k3 = _mm_set1_epi8(3);
    312  const __m128i k4 = _mm_set1_epi8(4);
    313  __m128i v3 = _mm_adds_epi8(*fl, k3);
    314  __m128i v4 = _mm_adds_epi8(*fl, k4);
    315 
    316  SignedShift8b_SSE2(&v4);             // v4 >> 3
    317  SignedShift8b_SSE2(&v3);             // v3 >> 3
    318  *q0 = _mm_subs_epi8(*q0, v4);        // q0 -= v4
    319  *p0 = _mm_adds_epi8(*p0, v3);        // p0 += v3
    320 }
    321 
    322 // Updates values of 2 pixels at MB edge during complex filtering.
    323 // Update operations:
    324 // q = q - delta and p = p + delta; where delta = [(a_hi >> 7), (a_lo >> 7)]
    325 // Pixels 'pi' and 'qi' are int8_t on input, uint8_t on output (sign flip).
    326 static WEBP_INLINE void Update2Pixels_SSE2(__m128i* const pi, __m128i* const qi,
    327                                           const __m128i* const a0_lo,
    328                                           const __m128i* const a0_hi) {
    329  const __m128i a1_lo = _mm_srai_epi16(*a0_lo, 7);
    330  const __m128i a1_hi = _mm_srai_epi16(*a0_hi, 7);
    331  const __m128i delta = _mm_packs_epi16(a1_lo, a1_hi);
    332  const __m128i sign_bit = _mm_set1_epi8((char)0x80);
    333  *pi = _mm_adds_epi8(*pi, delta);
    334  *qi = _mm_subs_epi8(*qi, delta);
    335  FLIP_SIGN_BIT2(*pi, *qi);
    336 }
    337 
    338 // input pixels are uint8_t
    339 static WEBP_INLINE void NeedsFilter_SSE2(const __m128i* const p1,
    340                                         const __m128i* const p0,
    341                                         const __m128i* const q0,
    342                                         const __m128i* const q1,
    343                                         int thresh, __m128i* const mask) {
    344  const __m128i m_thresh = _mm_set1_epi8((char)thresh);
    345  const __m128i t1 = MM_ABS(*p1, *q1);        // abs(p1 - q1)
    346  const __m128i kFE = _mm_set1_epi8((char)0xFE);
    347  const __m128i t2 = _mm_and_si128(t1, kFE);  // set lsb of each byte to zero
    348  const __m128i t3 = _mm_srli_epi16(t2, 1);   // abs(p1 - q1) / 2
    349 
    350  const __m128i t4 = MM_ABS(*p0, *q0);        // abs(p0 - q0)
    351  const __m128i t5 = _mm_adds_epu8(t4, t4);   // abs(p0 - q0) * 2
    352  const __m128i t6 = _mm_adds_epu8(t5, t3);   // abs(p0-q0)*2 + abs(p1-q1)/2
    353 
    354  const __m128i t7 = _mm_subs_epu8(t6, m_thresh);  // mask <= m_thresh
    355  *mask = _mm_cmpeq_epi8(t7, _mm_setzero_si128());
    356 }
    357 
    358 //------------------------------------------------------------------------------
    359 // Edge filtering functions
    360 
    361 // Applies filter on 2 pixels (p0 and q0)
    362 static WEBP_INLINE void DoFilter2_SSE2(__m128i* const p1, __m128i* const p0,
    363                                       __m128i* const q0, __m128i* const q1,
    364                                       int thresh) {
    365  __m128i a, mask;
    366  const __m128i sign_bit = _mm_set1_epi8((char)0x80);
    367  // convert p1/q1 to int8_t (for GetBaseDelta_SSE2)
    368  const __m128i p1s = _mm_xor_si128(*p1, sign_bit);
    369  const __m128i q1s = _mm_xor_si128(*q1, sign_bit);
    370 
    371  NeedsFilter_SSE2(p1, p0, q0, q1, thresh, &mask);
    372 
    373  FLIP_SIGN_BIT2(*p0, *q0);
    374  GetBaseDelta_SSE2(&p1s, p0, q0, &q1s, &a);
    375  a = _mm_and_si128(a, mask);     // mask filter values we don't care about
    376  DoSimpleFilter_SSE2(p0, q0, &a);
    377  FLIP_SIGN_BIT2(*p0, *q0);
    378 }
    379 
    380 // Applies filter on 4 pixels (p1, p0, q0 and q1)
    381 static WEBP_INLINE void DoFilter4_SSE2(__m128i* const p1, __m128i* const p0,
    382                                       __m128i* const q0, __m128i* const q1,
    383                                       const __m128i* const mask,
    384                                       int hev_thresh) {
    385  const __m128i zero = _mm_setzero_si128();
    386  const __m128i sign_bit = _mm_set1_epi8((char)0x80);
    387  const __m128i k64 = _mm_set1_epi8(64);
    388  const __m128i k3 = _mm_set1_epi8(3);
    389  const __m128i k4 = _mm_set1_epi8(4);
    390  __m128i not_hev;
    391  __m128i t1, t2, t3;
    392 
    393  // compute hev mask
    394  GetNotHEV_SSE2(p1, p0, q0, q1, hev_thresh, &not_hev);
    395 
    396  // convert to signed values
    397  FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
    398 
    399  t1 = _mm_subs_epi8(*p1, *q1);        // p1 - q1
    400  t1 = _mm_andnot_si128(not_hev, t1);  // hev(p1 - q1)
    401  t2 = _mm_subs_epi8(*q0, *p0);        // q0 - p0
    402  t1 = _mm_adds_epi8(t1, t2);          // hev(p1 - q1) + 1 * (q0 - p0)
    403  t1 = _mm_adds_epi8(t1, t2);          // hev(p1 - q1) + 2 * (q0 - p0)
    404  t1 = _mm_adds_epi8(t1, t2);          // hev(p1 - q1) + 3 * (q0 - p0)
    405  t1 = _mm_and_si128(t1, *mask);       // mask filter values we don't care about
    406 
    407  t2 = _mm_adds_epi8(t1, k3);        // 3 * (q0 - p0) + hev(p1 - q1) + 3
    408  t3 = _mm_adds_epi8(t1, k4);        // 3 * (q0 - p0) + hev(p1 - q1) + 4
    409  SignedShift8b_SSE2(&t2);           // (3 * (q0 - p0) + hev(p1 - q1) + 3) >> 3
    410  SignedShift8b_SSE2(&t3);           // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 3
    411  *p0 = _mm_adds_epi8(*p0, t2);      // p0 += t2
    412  *q0 = _mm_subs_epi8(*q0, t3);      // q0 -= t3
    413  FLIP_SIGN_BIT2(*p0, *q0);
    414 
    415  // this is equivalent to signed (a + 1) >> 1 calculation
    416  t2 = _mm_add_epi8(t3, sign_bit);
    417  t3 = _mm_avg_epu8(t2, zero);
    418  t3 = _mm_sub_epi8(t3, k64);
    419 
    420  t3 = _mm_and_si128(not_hev, t3);   // if !hev
    421  *q1 = _mm_subs_epi8(*q1, t3);      // q1 -= t3
    422  *p1 = _mm_adds_epi8(*p1, t3);      // p1 += t3
    423  FLIP_SIGN_BIT2(*p1, *q1);
    424 }
    425 
    426 // Applies filter on 6 pixels (p2, p1, p0, q0, q1 and q2)
    427 static WEBP_INLINE void DoFilter6_SSE2(__m128i* const p2, __m128i* const p1,
    428                                       __m128i* const p0, __m128i* const q0,
    429                                       __m128i* const q1, __m128i* const q2,
    430                                       const __m128i* const mask,
    431                                       int hev_thresh) {
    432  const __m128i zero = _mm_setzero_si128();
    433  const __m128i sign_bit = _mm_set1_epi8((char)0x80);
    434  __m128i a, not_hev;
    435 
    436  // compute hev mask
    437  GetNotHEV_SSE2(p1, p0, q0, q1, hev_thresh, &not_hev);
    438 
    439  FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1);
    440  FLIP_SIGN_BIT2(*p2, *q2);
    441  GetBaseDelta_SSE2(p1, p0, q0, q1, &a);
    442 
    443  { // do simple filter on pixels with hev
    444    const __m128i m = _mm_andnot_si128(not_hev, *mask);
    445    const __m128i f = _mm_and_si128(a, m);
    446    DoSimpleFilter_SSE2(p0, q0, &f);
    447  }
    448 
    449  { // do strong filter on pixels with not hev
    450    const __m128i k9 = _mm_set1_epi16(0x0900);
    451    const __m128i k63 = _mm_set1_epi16(63);
    452 
    453    const __m128i m = _mm_and_si128(not_hev, *mask);
    454    const __m128i f = _mm_and_si128(a, m);
    455 
    456    const __m128i f_lo = _mm_unpacklo_epi8(zero, f);
    457    const __m128i f_hi = _mm_unpackhi_epi8(zero, f);
    458 
    459    const __m128i f9_lo = _mm_mulhi_epi16(f_lo, k9);    // Filter (lo) * 9
    460    const __m128i f9_hi = _mm_mulhi_epi16(f_hi, k9);    // Filter (hi) * 9
    461 
    462    const __m128i a2_lo = _mm_add_epi16(f9_lo, k63);    // Filter * 9 + 63
    463    const __m128i a2_hi = _mm_add_epi16(f9_hi, k63);    // Filter * 9 + 63
    464 
    465    const __m128i a1_lo = _mm_add_epi16(a2_lo, f9_lo);  // Filter * 18 + 63
    466    const __m128i a1_hi = _mm_add_epi16(a2_hi, f9_hi);  // Filter * 18 + 63
    467 
    468    const __m128i a0_lo = _mm_add_epi16(a1_lo, f9_lo);  // Filter * 27 + 63
    469    const __m128i a0_hi = _mm_add_epi16(a1_hi, f9_hi);  // Filter * 27 + 63
    470 
    471    Update2Pixels_SSE2(p2, q2, &a2_lo, &a2_hi);
    472    Update2Pixels_SSE2(p1, q1, &a1_lo, &a1_hi);
    473    Update2Pixels_SSE2(p0, q0, &a0_lo, &a0_hi);
    474  }
    475 }
    476 
    477 // reads 8 rows across a vertical edge.
    478 static WEBP_INLINE void Load8x4_SSE2(const uint8_t* const b, int stride,
    479                                     __m128i* const p, __m128i* const q) {
    480  // A0 = 63 62 61 60 23 22 21 20 43 42 41 40 03 02 01 00
    481  // A1 = 73 72 71 70 33 32 31 30 53 52 51 50 13 12 11 10
    482  const __m128i A0 = _mm_set_epi32(
    483      WebPMemToInt32(&b[6 * stride]), WebPMemToInt32(&b[2 * stride]),
    484      WebPMemToInt32(&b[4 * stride]), WebPMemToInt32(&b[0 * stride]));
    485  const __m128i A1 = _mm_set_epi32(
    486      WebPMemToInt32(&b[7 * stride]), WebPMemToInt32(&b[3 * stride]),
    487      WebPMemToInt32(&b[5 * stride]), WebPMemToInt32(&b[1 * stride]));
    488 
    489  // B0 = 53 43 52 42 51 41 50 40 13 03 12 02 11 01 10 00
    490  // B1 = 73 63 72 62 71 61 70 60 33 23 32 22 31 21 30 20
    491  const __m128i B0 = _mm_unpacklo_epi8(A0, A1);
    492  const __m128i B1 = _mm_unpackhi_epi8(A0, A1);
    493 
    494  // C0 = 33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00
    495  // C1 = 73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40
    496  const __m128i C0 = _mm_unpacklo_epi16(B0, B1);
    497  const __m128i C1 = _mm_unpackhi_epi16(B0, B1);
    498 
    499  // *p = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00
    500  // *q = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
    501  *p = _mm_unpacklo_epi32(C0, C1);
    502  *q = _mm_unpackhi_epi32(C0, C1);
    503 }
    504 
    505 static WEBP_INLINE void Load16x4_SSE2(const uint8_t* const r0,
    506                                      const uint8_t* const r8,
    507                                      int stride,
    508                                      __m128i* const p1, __m128i* const p0,
    509                                      __m128i* const q0, __m128i* const q1) {
    510  // Assume the pixels around the edge (|) are numbered as follows
    511  //                00 01 | 02 03
    512  //                10 11 | 12 13
    513  //                 ...  |  ...
    514  //                e0 e1 | e2 e3
    515  //                f0 f1 | f2 f3
    516  //
    517  // r0 is pointing to the 0th row (00)
    518  // r8 is pointing to the 8th row (80)
    519 
    520  // Load
    521  // p1 = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00
    522  // q0 = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02
    523  // p0 = f1 e1 d1 c1 b1 a1 91 81 f0 e0 d0 c0 b0 a0 90 80
    524  // q1 = f3 e3 d3 c3 b3 a3 93 83 f2 e2 d2 c2 b2 a2 92 82
    525  Load8x4_SSE2(r0, stride, p1, q0);
    526  Load8x4_SSE2(r8, stride, p0, q1);
    527 
    528  {
    529    // p1 = f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00
    530    // p0 = f1 e1 d1 c1 b1 a1 91 81 71 61 51 41 31 21 11 01
    531    // q0 = f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02
    532    // q1 = f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03
    533    const __m128i t1 = *p1;
    534    const __m128i t2 = *q0;
    535    *p1 = _mm_unpacklo_epi64(t1, *p0);
    536    *p0 = _mm_unpackhi_epi64(t1, *p0);
    537    *q0 = _mm_unpacklo_epi64(t2, *q1);
    538    *q1 = _mm_unpackhi_epi64(t2, *q1);
    539  }
    540 }
    541 
    542 static WEBP_INLINE void Store4x4_SSE2(__m128i* const x,
    543                                      uint8_t* dst, int stride) {
    544  int i;
    545  for (i = 0; i < 4; ++i, dst += stride) {
    546    WebPInt32ToMem(dst, _mm_cvtsi128_si32(*x));
    547    *x = _mm_srli_si128(*x, 4);
    548  }
    549 }
    550 
    551 // Transpose back and store
    552 static WEBP_INLINE void Store16x4_SSE2(const __m128i* const p1,
    553                                       const __m128i* const p0,
    554                                       const __m128i* const q0,
    555                                       const __m128i* const q1,
    556                                       uint8_t* r0, uint8_t* r8,
    557                                       int stride) {
    558  __m128i t1, p1_s, p0_s, q0_s, q1_s;
    559 
    560  // p0 = 71 70 61 60 51 50 41 40 31 30 21 20 11 10 01 00
    561  // p1 = f1 f0 e1 e0 d1 d0 c1 c0 b1 b0 a1 a0 91 90 81 80
    562  t1 = *p0;
    563  p0_s = _mm_unpacklo_epi8(*p1, t1);
    564  p1_s = _mm_unpackhi_epi8(*p1, t1);
    565 
    566  // q0 = 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02
    567  // q1 = f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82
    568  t1 = *q0;
    569  q0_s = _mm_unpacklo_epi8(t1, *q1);
    570  q1_s = _mm_unpackhi_epi8(t1, *q1);
    571 
    572  // p0 = 33 32 31 30 23 22 21 20 13 12 11 10 03 02 01 00
    573  // q0 = 73 72 71 70 63 62 61 60 53 52 51 50 43 42 41 40
    574  t1 = p0_s;
    575  p0_s = _mm_unpacklo_epi16(t1, q0_s);
    576  q0_s = _mm_unpackhi_epi16(t1, q0_s);
    577 
    578  // p1 = b3 b2 b1 b0 a3 a2 a1 a0 93 92 91 90 83 82 81 80
    579  // q1 = f3 f2 f1 f0 e3 e2 e1 e0 d3 d2 d1 d0 c3 c2 c1 c0
    580  t1 = p1_s;
    581  p1_s = _mm_unpacklo_epi16(t1, q1_s);
    582  q1_s = _mm_unpackhi_epi16(t1, q1_s);
    583 
    584  Store4x4_SSE2(&p0_s, r0, stride);
    585  r0 += 4 * stride;
    586  Store4x4_SSE2(&q0_s, r0, stride);
    587 
    588  Store4x4_SSE2(&p1_s, r8, stride);
    589  r8 += 4 * stride;
    590  Store4x4_SSE2(&q1_s, r8, stride);
    591 }
    592 
    593 //------------------------------------------------------------------------------
    594 // Simple In-loop filtering (Paragraph 15.2)
    595 
    596 static void SimpleVFilter16_SSE2(uint8_t* p, int stride, int thresh) {
    597  // Load
    598  __m128i p1 = _mm_loadu_si128((__m128i*)&p[-2 * stride]);
    599  __m128i p0 = _mm_loadu_si128((__m128i*)&p[-stride]);
    600  __m128i q0 = _mm_loadu_si128((__m128i*)&p[0]);
    601  __m128i q1 = _mm_loadu_si128((__m128i*)&p[stride]);
    602 
    603  DoFilter2_SSE2(&p1, &p0, &q0, &q1, thresh);
    604 
    605  // Store
    606  _mm_storeu_si128((__m128i*)&p[-stride], p0);
    607  _mm_storeu_si128((__m128i*)&p[0], q0);
    608 }
    609 
    610 static void SimpleHFilter16_SSE2(uint8_t* p, int stride, int thresh) {
    611  __m128i p1, p0, q0, q1;
    612 
    613  p -= 2;  // beginning of p1
    614 
    615  Load16x4_SSE2(p, p + 8 * stride, stride, &p1, &p0, &q0, &q1);
    616  DoFilter2_SSE2(&p1, &p0, &q0, &q1, thresh);
    617  Store16x4_SSE2(&p1, &p0, &q0, &q1, p, p + 8 * stride, stride);
    618 }
    619 
    620 static void SimpleVFilter16i_SSE2(uint8_t* p, int stride, int thresh) {
    621  int k;
    622  for (k = 3; k > 0; --k) {
    623    p += 4 * stride;
    624    SimpleVFilter16_SSE2(p, stride, thresh);
    625  }
    626 }
    627 
    628 static void SimpleHFilter16i_SSE2(uint8_t* p, int stride, int thresh) {
    629  int k;
    630  for (k = 3; k > 0; --k) {
    631    p += 4;
    632    SimpleHFilter16_SSE2(p, stride, thresh);
    633  }
    634 }
    635 
    636 //------------------------------------------------------------------------------
    637 // Complex In-loop filtering (Paragraph 15.3)
    638 
    639 #define MAX_DIFF1(p3, p2, p1, p0, m) do {                                      \
    640  (m) = MM_ABS(p1, p0);                                                        \
    641  (m) = _mm_max_epu8(m, MM_ABS(p3, p2));                                       \
    642  (m) = _mm_max_epu8(m, MM_ABS(p2, p1));                                       \
    643 } while (0)
    644 
    645 #define MAX_DIFF2(p3, p2, p1, p0, m) do {                                      \
    646  (m) = _mm_max_epu8(m, MM_ABS(p1, p0));                                       \
    647  (m) = _mm_max_epu8(m, MM_ABS(p3, p2));                                       \
    648  (m) = _mm_max_epu8(m, MM_ABS(p2, p1));                                       \
    649 } while (0)
    650 
    651 #define LOAD_H_EDGES4(p, stride, e1, e2, e3, e4) do {                          \
    652  (e1) = _mm_loadu_si128((__m128i*)&(p)[0 * (stride)]);                        \
    653  (e2) = _mm_loadu_si128((__m128i*)&(p)[1 * (stride)]);                        \
    654  (e3) = _mm_loadu_si128((__m128i*)&(p)[2 * (stride)]);                        \
    655  (e4) = _mm_loadu_si128((__m128i*)&(p)[3 * (stride)]);                        \
    656 } while (0)
    657 
    658 #define LOADUV_H_EDGE(p, u, v, stride) do {                                    \
    659  const __m128i U = _mm_loadl_epi64((__m128i*)&(u)[(stride)]);                 \
    660  const __m128i V = _mm_loadl_epi64((__m128i*)&(v)[(stride)]);                 \
    661  (p) = _mm_unpacklo_epi64(U, V);                                              \
    662 } while (0)
    663 
    664 #define LOADUV_H_EDGES4(u, v, stride, e1, e2, e3, e4) do {                     \
    665  LOADUV_H_EDGE(e1, u, v, 0 * (stride));                                       \
    666  LOADUV_H_EDGE(e2, u, v, 1 * (stride));                                       \
    667  LOADUV_H_EDGE(e3, u, v, 2 * (stride));                                       \
    668  LOADUV_H_EDGE(e4, u, v, 3 * (stride));                                       \
    669 } while (0)
    670 
    671 #define STOREUV(p, u, v, stride) do {                                          \
    672  _mm_storel_epi64((__m128i*)&(u)[(stride)], p);                               \
    673  (p) = _mm_srli_si128(p, 8);                                                  \
    674  _mm_storel_epi64((__m128i*)&(v)[(stride)], p);                               \
    675 } while (0)
    676 
    677 static WEBP_INLINE void ComplexMask_SSE2(const __m128i* const p1,
    678                                         const __m128i* const p0,
    679                                         const __m128i* const q0,
    680                                         const __m128i* const q1,
    681                                         int thresh, int ithresh,
    682                                         __m128i* const mask) {
    683  const __m128i it = _mm_set1_epi8(ithresh);
    684  const __m128i diff = _mm_subs_epu8(*mask, it);
    685  const __m128i thresh_mask = _mm_cmpeq_epi8(diff, _mm_setzero_si128());
    686  __m128i filter_mask;
    687  NeedsFilter_SSE2(p1, p0, q0, q1, thresh, &filter_mask);
    688  *mask = _mm_and_si128(thresh_mask, filter_mask);
    689 }
    690 
    691 // on macroblock edges
    692 static void VFilter16_SSE2(uint8_t* p, int stride,
    693                           int thresh, int ithresh, int hev_thresh) {
    694  __m128i t1;
    695  __m128i mask;
    696  __m128i p2, p1, p0, q0, q1, q2;
    697 
    698  // Load p3, p2, p1, p0
    699  LOAD_H_EDGES4(p - 4 * stride, stride, t1, p2, p1, p0);
    700  MAX_DIFF1(t1, p2, p1, p0, mask);
    701 
    702  // Load q0, q1, q2, q3
    703  LOAD_H_EDGES4(p, stride, q0, q1, q2, t1);
    704  MAX_DIFF2(t1, q2, q1, q0, mask);
    705 
    706  ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask);
    707  DoFilter6_SSE2(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
    708 
    709  // Store
    710  _mm_storeu_si128((__m128i*)&p[-3 * stride], p2);
    711  _mm_storeu_si128((__m128i*)&p[-2 * stride], p1);
    712  _mm_storeu_si128((__m128i*)&p[-1 * stride], p0);
    713  _mm_storeu_si128((__m128i*)&p[+0 * stride], q0);
    714  _mm_storeu_si128((__m128i*)&p[+1 * stride], q1);
    715  _mm_storeu_si128((__m128i*)&p[+2 * stride], q2);
    716 }
    717 
    718 static void HFilter16_SSE2(uint8_t* p, int stride,
    719                           int thresh, int ithresh, int hev_thresh) {
    720  __m128i mask;
    721  __m128i p3, p2, p1, p0, q0, q1, q2, q3;
    722 
    723  uint8_t* const b = p - 4;
    724  Load16x4_SSE2(b, b + 8 * stride, stride, &p3, &p2, &p1, &p0);
    725  MAX_DIFF1(p3, p2, p1, p0, mask);
    726 
    727  Load16x4_SSE2(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3);
    728  MAX_DIFF2(q3, q2, q1, q0, mask);
    729 
    730  ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask);
    731  DoFilter6_SSE2(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
    732 
    733  Store16x4_SSE2(&p3, &p2, &p1, &p0, b, b + 8 * stride, stride);
    734  Store16x4_SSE2(&q0, &q1, &q2, &q3, p, p + 8 * stride, stride);
    735 }
    736 
    737 // on three inner edges
    738 static void VFilter16i_SSE2(uint8_t* p, int stride,
    739                            int thresh, int ithresh, int hev_thresh) {
    740  int k;
    741  __m128i p3, p2, p1, p0;   // loop invariants
    742 
    743  LOAD_H_EDGES4(p, stride, p3, p2, p1, p0);  // prologue
    744 
    745  for (k = 3; k > 0; --k) {
    746    __m128i mask, tmp1, tmp2;
    747    uint8_t* const b = p + 2 * stride;   // beginning of p1
    748    p += 4 * stride;
    749 
    750    MAX_DIFF1(p3, p2, p1, p0, mask);   // compute partial mask
    751    LOAD_H_EDGES4(p, stride, p3, p2, tmp1, tmp2);
    752    MAX_DIFF2(p3, p2, tmp1, tmp2, mask);
    753 
    754    // p3 and p2 are not just temporary variables here: they will be
    755    // re-used for next span. And q2/q3 will become p1/p0 accordingly.
    756    ComplexMask_SSE2(&p1, &p0, &p3, &p2, thresh, ithresh, &mask);
    757    DoFilter4_SSE2(&p1, &p0, &p3, &p2, &mask, hev_thresh);
    758 
    759    // Store
    760    _mm_storeu_si128((__m128i*)&b[0 * stride], p1);
    761    _mm_storeu_si128((__m128i*)&b[1 * stride], p0);
    762    _mm_storeu_si128((__m128i*)&b[2 * stride], p3);
    763    _mm_storeu_si128((__m128i*)&b[3 * stride], p2);
    764 
    765    // rotate samples
    766    p1 = tmp1;
    767    p0 = tmp2;
    768  }
    769 }
    770 
    771 static void HFilter16i_SSE2(uint8_t* p, int stride,
    772                            int thresh, int ithresh, int hev_thresh) {
    773  int k;
    774  __m128i p3, p2, p1, p0;   // loop invariants
    775 
    776  Load16x4_SSE2(p, p + 8 * stride, stride, &p3, &p2, &p1, &p0);  // prologue
    777 
    778  for (k = 3; k > 0; --k) {
    779    __m128i mask, tmp1, tmp2;
    780    uint8_t* const b = p + 2;   // beginning of p1
    781 
    782    p += 4;  // beginning of q0 (and next span)
    783 
    784    MAX_DIFF1(p3, p2, p1, p0, mask);   // compute partial mask
    785    Load16x4_SSE2(p, p + 8 * stride, stride, &p3, &p2, &tmp1, &tmp2);
    786    MAX_DIFF2(p3, p2, tmp1, tmp2, mask);
    787 
    788    ComplexMask_SSE2(&p1, &p0, &p3, &p2, thresh, ithresh, &mask);
    789    DoFilter4_SSE2(&p1, &p0, &p3, &p2, &mask, hev_thresh);
    790 
    791    Store16x4_SSE2(&p1, &p0, &p3, &p2, b, b + 8 * stride, stride);
    792 
    793    // rotate samples
    794    p1 = tmp1;
    795    p0 = tmp2;
    796  }
    797 }
    798 
    799 // 8-pixels wide variant, for chroma filtering
    800 static void VFilter8_SSE2(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v,
    801                          int stride, int thresh, int ithresh, int hev_thresh) {
    802  __m128i mask;
    803  __m128i t1, p2, p1, p0, q0, q1, q2;
    804 
    805  // Load p3, p2, p1, p0
    806  LOADUV_H_EDGES4(u - 4 * stride, v - 4 * stride, stride, t1, p2, p1, p0);
    807  MAX_DIFF1(t1, p2, p1, p0, mask);
    808 
    809  // Load q0, q1, q2, q3
    810  LOADUV_H_EDGES4(u, v, stride, q0, q1, q2, t1);
    811  MAX_DIFF2(t1, q2, q1, q0, mask);
    812 
    813  ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask);
    814  DoFilter6_SSE2(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
    815 
    816  // Store
    817  STOREUV(p2, u, v, -3 * stride);
    818  STOREUV(p1, u, v, -2 * stride);
    819  STOREUV(p0, u, v, -1 * stride);
    820  STOREUV(q0, u, v, 0 * stride);
    821  STOREUV(q1, u, v, 1 * stride);
    822  STOREUV(q2, u, v, 2 * stride);
    823 }
    824 
    825 static void HFilter8_SSE2(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v,
    826                          int stride, int thresh, int ithresh, int hev_thresh) {
    827  __m128i mask;
    828  __m128i p3, p2, p1, p0, q0, q1, q2, q3;
    829 
    830  uint8_t* const tu = u - 4;
    831  uint8_t* const tv = v - 4;
    832  Load16x4_SSE2(tu, tv, stride, &p3, &p2, &p1, &p0);
    833  MAX_DIFF1(p3, p2, p1, p0, mask);
    834 
    835  Load16x4_SSE2(u, v, stride, &q0, &q1, &q2, &q3);
    836  MAX_DIFF2(q3, q2, q1, q0, mask);
    837 
    838  ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask);
    839  DoFilter6_SSE2(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh);
    840 
    841  Store16x4_SSE2(&p3, &p2, &p1, &p0, tu, tv, stride);
    842  Store16x4_SSE2(&q0, &q1, &q2, &q3, u, v, stride);
    843 }
    844 
    845 static void VFilter8i_SSE2(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v,
    846                           int stride,
    847                           int thresh, int ithresh, int hev_thresh) {
    848  __m128i mask;
    849  __m128i t1, t2, p1, p0, q0, q1;
    850 
    851  // Load p3, p2, p1, p0
    852  LOADUV_H_EDGES4(u, v, stride, t2, t1, p1, p0);
    853  MAX_DIFF1(t2, t1, p1, p0, mask);
    854 
    855  u += 4 * stride;
    856  v += 4 * stride;
    857 
    858  // Load q0, q1, q2, q3
    859  LOADUV_H_EDGES4(u, v, stride, q0, q1, t1, t2);
    860  MAX_DIFF2(t2, t1, q1, q0, mask);
    861 
    862  ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask);
    863  DoFilter4_SSE2(&p1, &p0, &q0, &q1, &mask, hev_thresh);
    864 
    865  // Store
    866  STOREUV(p1, u, v, -2 * stride);
    867  STOREUV(p0, u, v, -1 * stride);
    868  STOREUV(q0, u, v, 0 * stride);
    869  STOREUV(q1, u, v, 1 * stride);
    870 }
    871 
    872 static void HFilter8i_SSE2(uint8_t* WEBP_RESTRICT u, uint8_t* WEBP_RESTRICT v,
    873                           int stride,
    874                           int thresh, int ithresh, int hev_thresh) {
    875  __m128i mask;
    876  __m128i t1, t2, p1, p0, q0, q1;
    877  Load16x4_SSE2(u, v, stride, &t2, &t1, &p1, &p0);   // p3, p2, p1, p0
    878  MAX_DIFF1(t2, t1, p1, p0, mask);
    879 
    880  u += 4;  // beginning of q0
    881  v += 4;
    882  Load16x4_SSE2(u, v, stride, &q0, &q1, &t1, &t2);  // q0, q1, q2, q3
    883  MAX_DIFF2(t2, t1, q1, q0, mask);
    884 
    885  ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask);
    886  DoFilter4_SSE2(&p1, &p0, &q0, &q1, &mask, hev_thresh);
    887 
    888  u -= 2;  // beginning of p1
    889  v -= 2;
    890  Store16x4_SSE2(&p1, &p0, &q0, &q1, u, v, stride);
    891 }
    892 
    893 //------------------------------------------------------------------------------
    894 // 4x4 predictions
    895 
    896 #define DST(x, y) dst[(x) + (y) * BPS]
    897 #define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2)
    898 
    899 // We use the following 8b-arithmetic tricks:
    900 //     (a + 2 * b + c + 2) >> 2 = (AC + b + 1) >> 1
    901 //   where: AC = (a + c) >> 1 = [(a + c + 1) >> 1] - [(a^c) & 1]
    902 // and:
    903 //     (a + 2 * b + c + 2) >> 2 = (AB + BC + 1) >> 1 - (ab|bc)&lsb
    904 //   where: AC = (a + b + 1) >> 1,   BC = (b + c + 1) >> 1
    905 //   and ab = a ^ b, bc = b ^ c, lsb = (AC^BC)&1
    906 
    907 static void VE4_SSE2(uint8_t* dst) {    // vertical
    908  const __m128i one = _mm_set1_epi8(1);
    909  const __m128i ABCDEFGH = _mm_loadl_epi64((__m128i*)(dst - BPS - 1));
    910  const __m128i BCDEFGH0 = _mm_srli_si128(ABCDEFGH, 1);
    911  const __m128i CDEFGH00 = _mm_srli_si128(ABCDEFGH, 2);
    912  const __m128i a = _mm_avg_epu8(ABCDEFGH, CDEFGH00);
    913  const __m128i lsb = _mm_and_si128(_mm_xor_si128(ABCDEFGH, CDEFGH00), one);
    914  const __m128i b = _mm_subs_epu8(a, lsb);
    915  const __m128i avg = _mm_avg_epu8(b, BCDEFGH0);
    916  const int vals = _mm_cvtsi128_si32(avg);
    917  int i;
    918  for (i = 0; i < 4; ++i) {
    919    WebPInt32ToMem(dst + i * BPS, vals);
    920  }
    921 }
    922 
    923 static void LD4_SSE2(uint8_t* dst) {   // Down-Left
    924  const __m128i one = _mm_set1_epi8(1);
    925  const __m128i ABCDEFGH = _mm_loadl_epi64((__m128i*)(dst - BPS));
    926  const __m128i BCDEFGH0 = _mm_srli_si128(ABCDEFGH, 1);
    927  const __m128i CDEFGH00 = _mm_srli_si128(ABCDEFGH, 2);
    928  const __m128i CDEFGHH0 = _mm_insert_epi16(CDEFGH00, dst[-BPS + 7], 3);
    929  const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, CDEFGHH0);
    930  const __m128i lsb = _mm_and_si128(_mm_xor_si128(ABCDEFGH, CDEFGHH0), one);
    931  const __m128i avg2 = _mm_subs_epu8(avg1, lsb);
    932  const __m128i abcdefg = _mm_avg_epu8(avg2, BCDEFGH0);
    933  WebPInt32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(               abcdefg    ));
    934  WebPInt32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 1)));
    935  WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 2)));
    936  WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 3)));
    937 }
    938 
    939 static void VR4_SSE2(uint8_t* dst) {   // Vertical-Right
    940  const __m128i one = _mm_set1_epi8(1);
    941  const int I = dst[-1 + 0 * BPS];
    942  const int J = dst[-1 + 1 * BPS];
    943  const int K = dst[-1 + 2 * BPS];
    944  const int X = dst[-1 - BPS];
    945  const __m128i XABCD = _mm_loadl_epi64((__m128i*)(dst - BPS - 1));
    946  const __m128i ABCD0 = _mm_srli_si128(XABCD, 1);
    947  const __m128i abcd = _mm_avg_epu8(XABCD, ABCD0);
    948  const __m128i _XABCD = _mm_slli_si128(XABCD, 1);
    949  const __m128i IXABCD = _mm_insert_epi16(_XABCD, (short)(I | (X << 8)), 0);
    950  const __m128i avg1 = _mm_avg_epu8(IXABCD, ABCD0);
    951  const __m128i lsb = _mm_and_si128(_mm_xor_si128(IXABCD, ABCD0), one);
    952  const __m128i avg2 = _mm_subs_epu8(avg1, lsb);
    953  const __m128i efgh = _mm_avg_epu8(avg2, XABCD);
    954  WebPInt32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(               abcd    ));
    955  WebPInt32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(               efgh    ));
    956  WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_slli_si128(abcd, 1)));
    957  WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_slli_si128(efgh, 1)));
    958 
    959  // these two are hard to implement in SSE2, so we keep the C-version:
    960  DST(0, 2) = AVG3(J, I, X);
    961  DST(0, 3) = AVG3(K, J, I);
    962 }
    963 
    964 static void VL4_SSE2(uint8_t* dst) {   // Vertical-Left
    965  const __m128i one = _mm_set1_epi8(1);
    966  const __m128i ABCDEFGH = _mm_loadl_epi64((__m128i*)(dst - BPS));
    967  const __m128i BCDEFGH_ = _mm_srli_si128(ABCDEFGH, 1);
    968  const __m128i CDEFGH__ = _mm_srli_si128(ABCDEFGH, 2);
    969  const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, BCDEFGH_);
    970  const __m128i avg2 = _mm_avg_epu8(CDEFGH__, BCDEFGH_);
    971  const __m128i avg3 = _mm_avg_epu8(avg1, avg2);
    972  const __m128i lsb1 = _mm_and_si128(_mm_xor_si128(avg1, avg2), one);
    973  const __m128i ab = _mm_xor_si128(ABCDEFGH, BCDEFGH_);
    974  const __m128i bc = _mm_xor_si128(CDEFGH__, BCDEFGH_);
    975  const __m128i abbc = _mm_or_si128(ab, bc);
    976  const __m128i lsb2 = _mm_and_si128(abbc, lsb1);
    977  const __m128i avg4 = _mm_subs_epu8(avg3, lsb2);
    978  const uint32_t extra_out =
    979      (uint32_t)_mm_cvtsi128_si32(_mm_srli_si128(avg4, 4));
    980  WebPInt32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(               avg1    ));
    981  WebPInt32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(               avg4    ));
    982  WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg1, 1)));
    983  WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg4, 1)));
    984 
    985  // these two are hard to get and irregular
    986  DST(3, 2) = (extra_out >> 0) & 0xff;
    987  DST(3, 3) = (extra_out >> 8) & 0xff;
    988 }
    989 
    990 static void RD4_SSE2(uint8_t* dst) {   // Down-right
    991  const __m128i one = _mm_set1_epi8(1);
    992  const __m128i XABCD = _mm_loadl_epi64((__m128i*)(dst - BPS - 1));
    993  const __m128i ____XABCD = _mm_slli_si128(XABCD, 4);
    994  const uint32_t I = dst[-1 + 0 * BPS];
    995  const uint32_t J = dst[-1 + 1 * BPS];
    996  const uint32_t K = dst[-1 + 2 * BPS];
    997  const uint32_t L = dst[-1 + 3 * BPS];
    998  const __m128i LKJI_____ =
    999      _mm_cvtsi32_si128((int)(L | (K << 8) | (J << 16) | (I << 24)));
   1000  const __m128i LKJIXABCD = _mm_or_si128(LKJI_____, ____XABCD);
   1001  const __m128i KJIXABCD_ = _mm_srli_si128(LKJIXABCD, 1);
   1002  const __m128i JIXABCD__ = _mm_srli_si128(LKJIXABCD, 2);
   1003  const __m128i avg1 = _mm_avg_epu8(JIXABCD__, LKJIXABCD);
   1004  const __m128i lsb = _mm_and_si128(_mm_xor_si128(JIXABCD__, LKJIXABCD), one);
   1005  const __m128i avg2 = _mm_subs_epu8(avg1, lsb);
   1006  const __m128i abcdefg = _mm_avg_epu8(avg2, KJIXABCD_);
   1007  WebPInt32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(               abcdefg    ));
   1008  WebPInt32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 1)));
   1009  WebPInt32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 2)));
   1010  WebPInt32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 3)));
   1011 }
   1012 
   1013 #undef DST
   1014 #undef AVG3
   1015 
   1016 //------------------------------------------------------------------------------
   1017 // Luma 16x16
   1018 
   1019 static WEBP_INLINE void TrueMotion_SSE2(uint8_t* dst, int size) {
   1020  const uint8_t* top = dst - BPS;
   1021  const __m128i zero = _mm_setzero_si128();
   1022  int y;
   1023  if (size == 4) {
   1024    const __m128i top_values = _mm_cvtsi32_si128(WebPMemToInt32(top));
   1025    const __m128i top_base = _mm_unpacklo_epi8(top_values, zero);
   1026    for (y = 0; y < 4; ++y, dst += BPS) {
   1027      const int val = dst[-1] - top[-1];
   1028      const __m128i base = _mm_set1_epi16(val);
   1029      const __m128i out = _mm_packus_epi16(_mm_add_epi16(base, top_base), zero);
   1030      WebPInt32ToMem(dst, _mm_cvtsi128_si32(out));
   1031    }
   1032  } else if (size == 8) {
   1033    const __m128i top_values = _mm_loadl_epi64((const __m128i*)top);
   1034    const __m128i top_base = _mm_unpacklo_epi8(top_values, zero);
   1035    for (y = 0; y < 8; ++y, dst += BPS) {
   1036      const int val = dst[-1] - top[-1];
   1037      const __m128i base = _mm_set1_epi16(val);
   1038      const __m128i out = _mm_packus_epi16(_mm_add_epi16(base, top_base), zero);
   1039      _mm_storel_epi64((__m128i*)dst, out);
   1040    }
   1041  } else {
   1042    const __m128i top_values = _mm_loadu_si128((const __m128i*)top);
   1043    const __m128i top_base_0 = _mm_unpacklo_epi8(top_values, zero);
   1044    const __m128i top_base_1 = _mm_unpackhi_epi8(top_values, zero);
   1045    for (y = 0; y < 16; ++y, dst += BPS) {
   1046      const int val = dst[-1] - top[-1];
   1047      const __m128i base = _mm_set1_epi16(val);
   1048      const __m128i out_0 = _mm_add_epi16(base, top_base_0);
   1049      const __m128i out_1 = _mm_add_epi16(base, top_base_1);
   1050      const __m128i out = _mm_packus_epi16(out_0, out_1);
   1051      _mm_storeu_si128((__m128i*)dst, out);
   1052    }
   1053  }
   1054 }
   1055 
   1056 static void TM4_SSE2(uint8_t* dst)   { TrueMotion_SSE2(dst, 4); }
   1057 static void TM8uv_SSE2(uint8_t* dst) { TrueMotion_SSE2(dst, 8); }
   1058 static void TM16_SSE2(uint8_t* dst)  { TrueMotion_SSE2(dst, 16); }
   1059 
   1060 static void VE16_SSE2(uint8_t* dst) {
   1061  const __m128i top = _mm_loadu_si128((const __m128i*)(dst - BPS));
   1062  int j;
   1063  for (j = 0; j < 16; ++j) {
   1064    _mm_storeu_si128((__m128i*)(dst + j * BPS), top);
   1065  }
   1066 }
   1067 
   1068 static void HE16_SSE2(uint8_t* dst) {     // horizontal
   1069  int j;
   1070  for (j = 16; j > 0; --j) {
   1071    const __m128i values = _mm_set1_epi8((char)dst[-1]);
   1072    _mm_storeu_si128((__m128i*)dst, values);
   1073    dst += BPS;
   1074  }
   1075 }
   1076 
   1077 static WEBP_INLINE void Put16_SSE2(uint8_t v, uint8_t* dst) {
   1078  int j;
   1079  const __m128i values = _mm_set1_epi8((char)v);
   1080  for (j = 0; j < 16; ++j) {
   1081    _mm_storeu_si128((__m128i*)(dst + j * BPS), values);
   1082  }
   1083 }
   1084 
   1085 static void DC16_SSE2(uint8_t* dst) {  // DC
   1086  const __m128i zero = _mm_setzero_si128();
   1087  const __m128i top = _mm_loadu_si128((const __m128i*)(dst - BPS));
   1088  const __m128i sad8x2 = _mm_sad_epu8(top, zero);
   1089  // sum the two sads: sad8x2[0:1] + sad8x2[8:9]
   1090  const __m128i sum = _mm_add_epi16(sad8x2, _mm_shuffle_epi32(sad8x2, 2));
   1091  int left = 0;
   1092  int j;
   1093  for (j = 0; j < 16; ++j) {
   1094    left += dst[-1 + j * BPS];
   1095  }
   1096  {
   1097    const int DC = _mm_cvtsi128_si32(sum) + left + 16;
   1098    Put16_SSE2(DC >> 5, dst);
   1099  }
   1100 }
   1101 
   1102 static void DC16NoTop_SSE2(uint8_t* dst) {  // DC with top samples unavailable
   1103  int DC = 8;
   1104  int j;
   1105  for (j = 0; j < 16; ++j) {
   1106    DC += dst[-1 + j * BPS];
   1107  }
   1108  Put16_SSE2(DC >> 4, dst);
   1109 }
   1110 
   1111 static void DC16NoLeft_SSE2(uint8_t* dst) {  // DC with left samples unavailable
   1112  const __m128i zero = _mm_setzero_si128();
   1113  const __m128i top = _mm_loadu_si128((const __m128i*)(dst - BPS));
   1114  const __m128i sad8x2 = _mm_sad_epu8(top, zero);
   1115  // sum the two sads: sad8x2[0:1] + sad8x2[8:9]
   1116  const __m128i sum = _mm_add_epi16(sad8x2, _mm_shuffle_epi32(sad8x2, 2));
   1117  const int DC = _mm_cvtsi128_si32(sum) + 8;
   1118  Put16_SSE2(DC >> 4, dst);
   1119 }
   1120 
   1121 static void DC16NoTopLeft_SSE2(uint8_t* dst) {  // DC with no top & left samples
   1122  Put16_SSE2(0x80, dst);
   1123 }
   1124 
   1125 //------------------------------------------------------------------------------
   1126 // Chroma
   1127 
   1128 static void VE8uv_SSE2(uint8_t* dst) {    // vertical
   1129  int j;
   1130  const __m128i top = _mm_loadl_epi64((const __m128i*)(dst - BPS));
   1131  for (j = 0; j < 8; ++j) {
   1132    _mm_storel_epi64((__m128i*)(dst + j * BPS), top);
   1133  }
   1134 }
   1135 
   1136 // helper for chroma-DC predictions
   1137 static WEBP_INLINE void Put8x8uv_SSE2(uint8_t v, uint8_t* dst) {
   1138  int j;
   1139  const __m128i values = _mm_set1_epi8((char)v);
   1140  for (j = 0; j < 8; ++j) {
   1141    _mm_storel_epi64((__m128i*)(dst + j * BPS), values);
   1142  }
   1143 }
   1144 
   1145 static void DC8uv_SSE2(uint8_t* dst) {     // DC
   1146  const __m128i zero = _mm_setzero_si128();
   1147  const __m128i top = _mm_loadl_epi64((const __m128i*)(dst - BPS));
   1148  const __m128i sum = _mm_sad_epu8(top, zero);
   1149  int left = 0;
   1150  int j;
   1151  for (j = 0; j < 8; ++j) {
   1152    left += dst[-1 + j * BPS];
   1153  }
   1154  {
   1155    const int DC = _mm_cvtsi128_si32(sum) + left + 8;
   1156    Put8x8uv_SSE2(DC >> 4, dst);
   1157  }
   1158 }
   1159 
   1160 static void DC8uvNoLeft_SSE2(uint8_t* dst) {   // DC with no left samples
   1161  const __m128i zero = _mm_setzero_si128();
   1162  const __m128i top = _mm_loadl_epi64((const __m128i*)(dst - BPS));
   1163  const __m128i sum = _mm_sad_epu8(top, zero);
   1164  const int DC = _mm_cvtsi128_si32(sum) + 4;
   1165  Put8x8uv_SSE2(DC >> 3, dst);
   1166 }
   1167 
   1168 static void DC8uvNoTop_SSE2(uint8_t* dst) {  // DC with no top samples
   1169  int dc0 = 4;
   1170  int i;
   1171  for (i = 0; i < 8; ++i) {
   1172    dc0 += dst[-1 + i * BPS];
   1173  }
   1174  Put8x8uv_SSE2(dc0 >> 3, dst);
   1175 }
   1176 
   1177 static void DC8uvNoTopLeft_SSE2(uint8_t* dst) {    // DC with nothing
   1178  Put8x8uv_SSE2(0x80, dst);
   1179 }
   1180 
   1181 //------------------------------------------------------------------------------
   1182 // Entry point
   1183 
   1184 extern void VP8DspInitSSE2(void);
   1185 
   1186 WEBP_TSAN_IGNORE_FUNCTION void VP8DspInitSSE2(void) {
   1187  VP8Transform = Transform_SSE2;
   1188 #if (USE_TRANSFORM_AC3 == 1)
   1189  VP8TransformAC3 = TransformAC3_SSE2;
   1190 #endif
   1191 
   1192  VP8VFilter16 = VFilter16_SSE2;
   1193  VP8HFilter16 = HFilter16_SSE2;
   1194  VP8VFilter8 = VFilter8_SSE2;
   1195  VP8HFilter8 = HFilter8_SSE2;
   1196  VP8VFilter16i = VFilter16i_SSE2;
   1197  VP8HFilter16i = HFilter16i_SSE2;
   1198  VP8VFilter8i = VFilter8i_SSE2;
   1199  VP8HFilter8i = HFilter8i_SSE2;
   1200 
   1201  VP8SimpleVFilter16 = SimpleVFilter16_SSE2;
   1202  VP8SimpleHFilter16 = SimpleHFilter16_SSE2;
   1203  VP8SimpleVFilter16i = SimpleVFilter16i_SSE2;
   1204  VP8SimpleHFilter16i = SimpleHFilter16i_SSE2;
   1205 
   1206  VP8PredLuma4[1] = TM4_SSE2;
   1207  VP8PredLuma4[2] = VE4_SSE2;
   1208  VP8PredLuma4[4] = RD4_SSE2;
   1209  VP8PredLuma4[5] = VR4_SSE2;
   1210  VP8PredLuma4[6] = LD4_SSE2;
   1211  VP8PredLuma4[7] = VL4_SSE2;
   1212 
   1213  VP8PredLuma16[0] = DC16_SSE2;
   1214  VP8PredLuma16[1] = TM16_SSE2;
   1215  VP8PredLuma16[2] = VE16_SSE2;
   1216  VP8PredLuma16[3] = HE16_SSE2;
   1217  VP8PredLuma16[4] = DC16NoTop_SSE2;
   1218  VP8PredLuma16[5] = DC16NoLeft_SSE2;
   1219  VP8PredLuma16[6] = DC16NoTopLeft_SSE2;
   1220 
   1221  VP8PredChroma8[0] = DC8uv_SSE2;
   1222  VP8PredChroma8[1] = TM8uv_SSE2;
   1223  VP8PredChroma8[2] = VE8uv_SSE2;
   1224  VP8PredChroma8[4] = DC8uvNoTop_SSE2;
   1225  VP8PredChroma8[5] = DC8uvNoLeft_SSE2;
   1226  VP8PredChroma8[6] = DC8uvNoTopLeft_SSE2;
   1227 }
   1228 
   1229 #else  // !WEBP_USE_SSE2
   1230 
   1231 WEBP_DSP_INIT_STUB(VP8DspInitSSE2)
   1232 
   1233 #endif  // WEBP_USE_SSE2