tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

lossless_enc_avx2.c (32487B)


      1 // Copyright 2025 Google Inc. All Rights Reserved.
      2 //
      3 // Use of this source code is governed by a BSD-style license
      4 // that can be found in the COPYING file in the root of the source
      5 // tree. An additional intellectual property rights grant can be found
      6 // in the file PATENTS. All contributing project authors may
      7 // be found in the AUTHORS file in the root of the source tree.
      8 // -----------------------------------------------------------------------------
      9 //
     10 // AVX2 variant of methods for lossless encoder
     11 //
     12 // Author: Vincent Rabaud (vrabaud@google.com)
     13 
     14 #include "src/dsp/dsp.h"
     15 
     16 #if defined(WEBP_USE_AVX2)
     17 #include <emmintrin.h>
     18 #include <immintrin.h>
     19 
     20 #include <assert.h>
     21 #include <stddef.h>
     22 
     23 #include "src/dsp/cpu.h"
     24 #include "src/dsp/lossless.h"
     25 #include "src/dsp/lossless_common.h"
     26 #include "src/utils/utils.h"
     27 #include "src/webp/format_constants.h"
     28 #include "src/webp/types.h"
     29 
     30 //------------------------------------------------------------------------------
     31 // Subtract-Green Transform
     32 
     33 static void SubtractGreenFromBlueAndRed_AVX2(uint32_t* argb_data,
     34                                             int num_pixels) {
     35  int i;
     36  const __m256i kCstShuffle = _mm256_set_epi8(
     37      -1, 29, -1, 29, -1, 25, -1, 25, -1, 21, -1, 21, -1, 17, -1, 17, -1, 13,
     38      -1, 13, -1, 9, -1, 9, -1, 5, -1, 5, -1, 1, -1, 1);
     39  for (i = 0; i + 8 <= num_pixels; i += 8) {
     40    const __m256i in = _mm256_loadu_si256((__m256i*)&argb_data[i]);  // argb
     41    const __m256i in_0g0g = _mm256_shuffle_epi8(in, kCstShuffle);
     42    const __m256i out = _mm256_sub_epi8(in, in_0g0g);
     43    _mm256_storeu_si256((__m256i*)&argb_data[i], out);
     44  }
     45  // fallthrough and finish off with plain-SSE
     46  if (i != num_pixels) {
     47    VP8LSubtractGreenFromBlueAndRed_SSE(argb_data + i, num_pixels - i);
     48  }
     49 }
     50 
     51 //------------------------------------------------------------------------------
     52 // Color Transform
     53 
     54 // For sign-extended multiplying constants, pre-shifted by 5:
     55 #define CST_5b(X) (((int16_t)((uint16_t)(X) << 8)) >> 5)
     56 
     57 #define MK_CST_16(HI, LO) \
     58  _mm256_set1_epi32((int)(((uint32_t)(HI) << 16) | ((LO) & 0xffff)))
     59 
     60 static void TransformColor_AVX2(const VP8LMultipliers* WEBP_RESTRICT const m,
     61                                uint32_t* WEBP_RESTRICT argb_data,
     62                                int num_pixels) {
     63  const __m256i mults_rb =
     64      MK_CST_16(CST_5b(m->green_to_red), CST_5b(m->green_to_blue));
     65  const __m256i mults_b2 = MK_CST_16(CST_5b(m->red_to_blue), 0);
     66  const __m256i mask_rb = _mm256_set1_epi32(0x00ff00ff);  // red-blue masks
     67  const __m256i kCstShuffle = _mm256_set_epi8(
     68      29, -1, 29, -1, 25, -1, 25, -1, 21, -1, 21, -1, 17, -1, 17, -1, 13, -1,
     69      13, -1, 9, -1, 9, -1, 5, -1, 5, -1, 1, -1, 1, -1);
     70  int i;
     71  for (i = 0; i + 8 <= num_pixels; i += 8) {
     72    const __m256i in = _mm256_loadu_si256((__m256i*)&argb_data[i]);  // argb
     73    const __m256i A = _mm256_shuffle_epi8(in, kCstShuffle);          // g0g0
     74    const __m256i B = _mm256_mulhi_epi16(A, mults_rb);  // x dr  x db1
     75    const __m256i C = _mm256_slli_epi16(in, 8);         // r 0   b   0
     76    const __m256i D = _mm256_mulhi_epi16(C, mults_b2);  // x db2 0   0
     77    const __m256i E = _mm256_srli_epi32(D, 16);         // 0 0   x db2
     78    const __m256i F = _mm256_add_epi8(E, B);            // x dr  x  db
     79    const __m256i G = _mm256_and_si256(F, mask_rb);     // 0 dr  0  db
     80    const __m256i out = _mm256_sub_epi8(in, G);
     81    _mm256_storeu_si256((__m256i*)&argb_data[i], out);
     82  }
     83  // fallthrough and finish off with plain-C
     84  if (i != num_pixels) {
     85    VP8LTransformColor_SSE(m, argb_data + i, num_pixels - i);
     86  }
     87 }
     88 
     89 //------------------------------------------------------------------------------
     90 #define SPAN 16
     91 static void CollectColorBlueTransforms_AVX2(const uint32_t* WEBP_RESTRICT argb,
     92                                            int stride, int tile_width,
     93                                            int tile_height, int green_to_blue,
     94                                            int red_to_blue, uint32_t histo[]) {
     95  const __m256i mult =
     96      MK_CST_16(CST_5b(red_to_blue) + 256, CST_5b(green_to_blue));
     97  const __m256i perm = _mm256_setr_epi8(
     98      -1, 1, -1, 2, -1, 5, -1, 6, -1, 9, -1, 10, -1, 13, -1, 14, -1, 17, -1, 18,
     99      -1, 21, -1, 22, -1, 25, -1, 26, -1, 29, -1, 30);
    100  if (tile_width >= 8) {
    101    int y, i;
    102    for (y = 0; y < tile_height; ++y) {
    103      uint8_t values[32];
    104      const uint32_t* const src = argb + y * stride;
    105      const __m256i A1 = _mm256_loadu_si256((const __m256i*)src);
    106      const __m256i B1 = _mm256_shuffle_epi8(A1, perm);
    107      const __m256i C1 = _mm256_mulhi_epi16(B1, mult);
    108      const __m256i D1 = _mm256_sub_epi16(A1, C1);
    109      __m256i E = _mm256_add_epi16(_mm256_srli_epi32(D1, 16), D1);
    110      int x;
    111      for (x = 8; x + 8 <= tile_width; x += 8) {
    112        const __m256i A2 = _mm256_loadu_si256((const __m256i*)(src + x));
    113        __m256i B2, C2, D2;
    114        _mm256_storeu_si256((__m256i*)values, E);
    115        for (i = 0; i < 32; i += 4) ++histo[values[i]];
    116        B2 = _mm256_shuffle_epi8(A2, perm);
    117        C2 = _mm256_mulhi_epi16(B2, mult);
    118        D2 = _mm256_sub_epi16(A2, C2);
    119        E = _mm256_add_epi16(_mm256_srli_epi32(D2, 16), D2);
    120      }
    121      _mm256_storeu_si256((__m256i*)values, E);
    122      for (i = 0; i < 32; i += 4) ++histo[values[i]];
    123    }
    124  }
    125  {
    126    const int left_over = tile_width & 7;
    127    if (left_over > 0) {
    128      VP8LCollectColorBlueTransforms_SSE(argb + tile_width - left_over, stride,
    129                                         left_over, tile_height, green_to_blue,
    130                                         red_to_blue, histo);
    131    }
    132  }
    133 }
    134 
    135 static void CollectColorRedTransforms_AVX2(const uint32_t* WEBP_RESTRICT argb,
    136                                           int stride, int tile_width,
    137                                           int tile_height, int green_to_red,
    138                                           uint32_t histo[]) {
    139  const __m256i mult = MK_CST_16(0, CST_5b(green_to_red));
    140  const __m256i mask_g = _mm256_set1_epi32(0x0000ff00);
    141  if (tile_width >= 8) {
    142    int y, i;
    143    for (y = 0; y < tile_height; ++y) {
    144      uint8_t values[32];
    145      const uint32_t* const src = argb + y * stride;
    146      const __m256i A1 = _mm256_loadu_si256((const __m256i*)src);
    147      const __m256i B1 = _mm256_and_si256(A1, mask_g);
    148      const __m256i C1 = _mm256_madd_epi16(B1, mult);
    149      __m256i D = _mm256_sub_epi16(A1, C1);
    150      int x;
    151      for (x = 8; x + 8 <= tile_width; x += 8) {
    152        const __m256i A2 = _mm256_loadu_si256((const __m256i*)(src + x));
    153        __m256i B2, C2;
    154        _mm256_storeu_si256((__m256i*)values, D);
    155        for (i = 2; i < 32; i += 4) ++histo[values[i]];
    156        B2 = _mm256_and_si256(A2, mask_g);
    157        C2 = _mm256_madd_epi16(B2, mult);
    158        D = _mm256_sub_epi16(A2, C2);
    159      }
    160      _mm256_storeu_si256((__m256i*)values, D);
    161      for (i = 2; i < 32; i += 4) ++histo[values[i]];
    162    }
    163  }
    164  {
    165    const int left_over = tile_width & 7;
    166    if (left_over > 0) {
    167      VP8LCollectColorRedTransforms_SSE(argb + tile_width - left_over, stride,
    168                                        left_over, tile_height, green_to_red,
    169                                        histo);
    170    }
    171  }
    172 }
    173 #undef SPAN
    174 #undef MK_CST_16
    175 
    176 //------------------------------------------------------------------------------
    177 
    178 // Note we are adding uint32_t's as *signed* int32's (using _mm256_add_epi32).
    179 // But that's ok since the histogram values are less than 1<<28 (max picture
    180 // size).
    181 static void AddVector_AVX2(const uint32_t* WEBP_RESTRICT a,
    182                           const uint32_t* WEBP_RESTRICT b,
    183                           uint32_t* WEBP_RESTRICT out, int size) {
    184  int i = 0;
    185  int aligned_size = size & ~31;
    186  // Size is, at minimum, NUM_DISTANCE_CODES (40) and may be as large as
    187  // NUM_LITERAL_CODES (256) + NUM_LENGTH_CODES (24) + (0 or a non-zero power of
    188  // 2). See the usage in VP8LHistogramAdd().
    189  assert(size >= 32);
    190  assert(size % 2 == 0);
    191 
    192  do {
    193    const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i + 0]);
    194    const __m256i a1 = _mm256_loadu_si256((const __m256i*)&a[i + 8]);
    195    const __m256i a2 = _mm256_loadu_si256((const __m256i*)&a[i + 16]);
    196    const __m256i a3 = _mm256_loadu_si256((const __m256i*)&a[i + 24]);
    197    const __m256i b0 = _mm256_loadu_si256((const __m256i*)&b[i + 0]);
    198    const __m256i b1 = _mm256_loadu_si256((const __m256i*)&b[i + 8]);
    199    const __m256i b2 = _mm256_loadu_si256((const __m256i*)&b[i + 16]);
    200    const __m256i b3 = _mm256_loadu_si256((const __m256i*)&b[i + 24]);
    201    _mm256_storeu_si256((__m256i*)&out[i + 0], _mm256_add_epi32(a0, b0));
    202    _mm256_storeu_si256((__m256i*)&out[i + 8], _mm256_add_epi32(a1, b1));
    203    _mm256_storeu_si256((__m256i*)&out[i + 16], _mm256_add_epi32(a2, b2));
    204    _mm256_storeu_si256((__m256i*)&out[i + 24], _mm256_add_epi32(a3, b3));
    205    i += 32;
    206  } while (i != aligned_size);
    207 
    208  if ((size & 16) != 0) {
    209    const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i + 0]);
    210    const __m256i a1 = _mm256_loadu_si256((const __m256i*)&a[i + 8]);
    211    const __m256i b0 = _mm256_loadu_si256((const __m256i*)&b[i + 0]);
    212    const __m256i b1 = _mm256_loadu_si256((const __m256i*)&b[i + 8]);
    213    _mm256_storeu_si256((__m256i*)&out[i + 0], _mm256_add_epi32(a0, b0));
    214    _mm256_storeu_si256((__m256i*)&out[i + 8], _mm256_add_epi32(a1, b1));
    215    i += 16;
    216  }
    217 
    218  size &= 15;
    219  if (size == 8) {
    220    const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i]);
    221    const __m256i b0 = _mm256_loadu_si256((const __m256i*)&b[i]);
    222    _mm256_storeu_si256((__m256i*)&out[i], _mm256_add_epi32(a0, b0));
    223  } else {
    224    for (; size--; ++i) {
    225      out[i] = a[i] + b[i];
    226    }
    227  }
    228 }
    229 
    230 static void AddVectorEq_AVX2(const uint32_t* WEBP_RESTRICT a,
    231                             uint32_t* WEBP_RESTRICT out, int size) {
    232  int i = 0;
    233  int aligned_size = size & ~31;
    234  // Size is, at minimum, NUM_DISTANCE_CODES (40) and may be as large as
    235  // NUM_LITERAL_CODES (256) + NUM_LENGTH_CODES (24) + (0 or a non-zero power of
    236  // 2). See the usage in VP8LHistogramAdd().
    237  assert(size >= 32);
    238  assert(size % 2 == 0);
    239 
    240  do {
    241    const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i + 0]);
    242    const __m256i a1 = _mm256_loadu_si256((const __m256i*)&a[i + 8]);
    243    const __m256i a2 = _mm256_loadu_si256((const __m256i*)&a[i + 16]);
    244    const __m256i a3 = _mm256_loadu_si256((const __m256i*)&a[i + 24]);
    245    const __m256i b0 = _mm256_loadu_si256((const __m256i*)&out[i + 0]);
    246    const __m256i b1 = _mm256_loadu_si256((const __m256i*)&out[i + 8]);
    247    const __m256i b2 = _mm256_loadu_si256((const __m256i*)&out[i + 16]);
    248    const __m256i b3 = _mm256_loadu_si256((const __m256i*)&out[i + 24]);
    249    _mm256_storeu_si256((__m256i*)&out[i + 0], _mm256_add_epi32(a0, b0));
    250    _mm256_storeu_si256((__m256i*)&out[i + 8], _mm256_add_epi32(a1, b1));
    251    _mm256_storeu_si256((__m256i*)&out[i + 16], _mm256_add_epi32(a2, b2));
    252    _mm256_storeu_si256((__m256i*)&out[i + 24], _mm256_add_epi32(a3, b3));
    253    i += 32;
    254  } while (i != aligned_size);
    255 
    256  if ((size & 16) != 0) {
    257    const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i + 0]);
    258    const __m256i a1 = _mm256_loadu_si256((const __m256i*)&a[i + 8]);
    259    const __m256i b0 = _mm256_loadu_si256((const __m256i*)&out[i + 0]);
    260    const __m256i b1 = _mm256_loadu_si256((const __m256i*)&out[i + 8]);
    261    _mm256_storeu_si256((__m256i*)&out[i + 0], _mm256_add_epi32(a0, b0));
    262    _mm256_storeu_si256((__m256i*)&out[i + 8], _mm256_add_epi32(a1, b1));
    263    i += 16;
    264  }
    265 
    266  size &= 15;
    267  if (size == 8) {
    268    const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i]);
    269    const __m256i b0 = _mm256_loadu_si256((const __m256i*)&out[i]);
    270    _mm256_storeu_si256((__m256i*)&out[i], _mm256_add_epi32(a0, b0));
    271  } else {
    272    for (; size--; ++i) {
    273      out[i] += a[i];
    274    }
    275  }
    276 }
    277 
    278 //------------------------------------------------------------------------------
    279 // Entropy
    280 
    281 #if !defined(WEBP_HAVE_SLOW_CLZ_CTZ)
    282 
    283 static uint64_t CombinedShannonEntropy_AVX2(const uint32_t X[256],
    284                                            const uint32_t Y[256]) {
    285  int i;
    286  uint64_t retval = 0;
    287  uint32_t sumX = 0, sumXY = 0;
    288  const __m256i zero = _mm256_setzero_si256();
    289 
    290  for (i = 0; i < 256; i += 32) {
    291    const __m256i x0 = _mm256_loadu_si256((const __m256i*)(X + i + 0));
    292    const __m256i y0 = _mm256_loadu_si256((const __m256i*)(Y + i + 0));
    293    const __m256i x1 = _mm256_loadu_si256((const __m256i*)(X + i + 8));
    294    const __m256i y1 = _mm256_loadu_si256((const __m256i*)(Y + i + 8));
    295    const __m256i x2 = _mm256_loadu_si256((const __m256i*)(X + i + 16));
    296    const __m256i y2 = _mm256_loadu_si256((const __m256i*)(Y + i + 16));
    297    const __m256i x3 = _mm256_loadu_si256((const __m256i*)(X + i + 24));
    298    const __m256i y3 = _mm256_loadu_si256((const __m256i*)(Y + i + 24));
    299    const __m256i x4 = _mm256_packs_epi16(_mm256_packs_epi32(x0, x1),
    300                                          _mm256_packs_epi32(x2, x3));
    301    const __m256i y4 = _mm256_packs_epi16(_mm256_packs_epi32(y0, y1),
    302                                          _mm256_packs_epi32(y2, y3));
    303    // Packed pixels are actually in order: ... 17 16 12 11 10 9 8 3 2 1 0
    304    const __m256i x5 = _mm256_permutevar8x32_epi32(
    305        x4, _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0));
    306    const __m256i y5 = _mm256_permutevar8x32_epi32(
    307        y4, _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0));
    308    const uint32_t mx =
    309        (uint32_t)_mm256_movemask_epi8(_mm256_cmpgt_epi8(x5, zero));
    310    uint32_t my =
    311        (uint32_t)_mm256_movemask_epi8(_mm256_cmpgt_epi8(y5, zero)) | mx;
    312    while (my) {
    313      const int32_t j = BitsCtz(my);
    314      uint32_t xy;
    315      if ((mx >> j) & 1) {
    316        const int x = X[i + j];
    317        sumXY += x;
    318        retval += VP8LFastSLog2(x);
    319      }
    320      xy = X[i + j] + Y[i + j];
    321      sumX += xy;
    322      retval += VP8LFastSLog2(xy);
    323      my &= my - 1;
    324    }
    325  }
    326  retval = VP8LFastSLog2(sumX) + VP8LFastSLog2(sumXY) - retval;
    327  return retval;
    328 }
    329 
    330 #else
    331 
    332 #define DONT_USE_COMBINED_SHANNON_ENTROPY_SSE2_FUNC   // won't be faster
    333 
    334 #endif
    335 
    336 //------------------------------------------------------------------------------
    337 
    338 static int VectorMismatch_AVX2(const uint32_t* const array1,
    339                               const uint32_t* const array2, int length) {
    340  int match_len;
    341 
    342  if (length >= 24) {
    343    __m256i A0 = _mm256_loadu_si256((const __m256i*)&array1[0]);
    344    __m256i A1 = _mm256_loadu_si256((const __m256i*)&array2[0]);
    345    match_len = 0;
    346    do {
    347      // Loop unrolling and early load both provide a speedup of 10% for the
    348      // current function. Also, max_limit can be MAX_LENGTH=4096 at most.
    349      const __m256i cmpA = _mm256_cmpeq_epi32(A0, A1);
    350      const __m256i B0 =
    351          _mm256_loadu_si256((const __m256i*)&array1[match_len + 8]);
    352      const __m256i B1 =
    353          _mm256_loadu_si256((const __m256i*)&array2[match_len + 8]);
    354      if ((uint32_t)_mm256_movemask_epi8(cmpA) != 0xffffffff) break;
    355      match_len += 8;
    356 
    357      {
    358        const __m256i cmpB = _mm256_cmpeq_epi32(B0, B1);
    359        A0 = _mm256_loadu_si256((const __m256i*)&array1[match_len + 8]);
    360        A1 = _mm256_loadu_si256((const __m256i*)&array2[match_len + 8]);
    361        if ((uint32_t)_mm256_movemask_epi8(cmpB) != 0xffffffff) break;
    362        match_len += 8;
    363      }
    364    } while (match_len + 24 < length);
    365  } else {
    366    match_len = 0;
    367    // Unroll the potential first two loops.
    368    if (length >= 8 &&
    369        (uint32_t)_mm256_movemask_epi8(_mm256_cmpeq_epi32(
    370            _mm256_loadu_si256((const __m256i*)&array1[0]),
    371            _mm256_loadu_si256((const __m256i*)&array2[0]))) == 0xffffffff) {
    372      match_len = 8;
    373      if (length >= 16 &&
    374          (uint32_t)_mm256_movemask_epi8(_mm256_cmpeq_epi32(
    375              _mm256_loadu_si256((const __m256i*)&array1[8]),
    376              _mm256_loadu_si256((const __m256i*)&array2[8]))) == 0xffffffff) {
    377        match_len = 16;
    378      }
    379    }
    380  }
    381 
    382  while (match_len < length && array1[match_len] == array2[match_len]) {
    383    ++match_len;
    384  }
    385  return match_len;
    386 }
    387 
    388 // Bundles multiple (1, 2, 4 or 8) pixels into a single pixel.
    389 static void BundleColorMap_AVX2(const uint8_t* WEBP_RESTRICT const row,
    390                                int width, int xbits,
    391                                uint32_t* WEBP_RESTRICT dst) {
    392  int x = 0;
    393  assert(xbits >= 0);
    394  assert(xbits <= 3);
    395  switch (xbits) {
    396    case 0: {
    397      const __m256i ff = _mm256_set1_epi16((short)0xff00);
    398      const __m256i zero = _mm256_setzero_si256();
    399      // Store 0xff000000 | (row[x] << 8).
    400      for (x = 0; x + 32 <= width; x += 32, dst += 32) {
    401        const __m256i in = _mm256_loadu_si256((const __m256i*)&row[x]);
    402        const __m256i in_lo = _mm256_unpacklo_epi8(zero, in);
    403        const __m256i dst0 = _mm256_unpacklo_epi16(in_lo, ff);
    404        const __m256i dst1 = _mm256_unpackhi_epi16(in_lo, ff);
    405        const __m256i in_hi = _mm256_unpackhi_epi8(zero, in);
    406        const __m256i dst2 = _mm256_unpacklo_epi16(in_hi, ff);
    407        const __m256i dst3 = _mm256_unpackhi_epi16(in_hi, ff);
    408        _mm256_storeu2_m128i((__m128i*)&dst[16], (__m128i*)&dst[0], dst0);
    409        _mm256_storeu2_m128i((__m128i*)&dst[20], (__m128i*)&dst[4], dst1);
    410        _mm256_storeu2_m128i((__m128i*)&dst[24], (__m128i*)&dst[8], dst2);
    411        _mm256_storeu2_m128i((__m128i*)&dst[28], (__m128i*)&dst[12], dst3);
    412      }
    413      break;
    414    }
    415    case 1: {
    416      const __m256i ff = _mm256_set1_epi16((short)0xff00);
    417      const __m256i mul = _mm256_set1_epi16(0x110);
    418      for (x = 0; x + 32 <= width; x += 32, dst += 16) {
    419        // 0a0b | (where a/b are 4 bits).
    420        const __m256i in = _mm256_loadu_si256((const __m256i*)&row[x]);
    421        const __m256i tmp = _mm256_mullo_epi16(in, mul);  // aba0
    422        const __m256i pack = _mm256_and_si256(tmp, ff);   // ab00
    423        const __m256i dst0 = _mm256_unpacklo_epi16(pack, ff);
    424        const __m256i dst1 = _mm256_unpackhi_epi16(pack, ff);
    425        _mm256_storeu2_m128i((__m128i*)&dst[8], (__m128i*)&dst[0], dst0);
    426        _mm256_storeu2_m128i((__m128i*)&dst[12], (__m128i*)&dst[4], dst1);
    427      }
    428      break;
    429    }
    430    case 2: {
    431      const __m256i mask_or = _mm256_set1_epi32((int)0xff000000);
    432      const __m256i mul_cst = _mm256_set1_epi16(0x0104);
    433      const __m256i mask_mul = _mm256_set1_epi16(0x0f00);
    434      for (x = 0; x + 32 <= width; x += 32, dst += 8) {
    435        // 000a000b000c000d | (where a/b/c/d are 2 bits).
    436        const __m256i in = _mm256_loadu_si256((const __m256i*)&row[x]);
    437        const __m256i mul =
    438            _mm256_mullo_epi16(in, mul_cst);  // 00ab00b000cd00d0
    439        const __m256i tmp =
    440            _mm256_and_si256(mul, mask_mul);               //  00ab000000cd0000
    441        const __m256i shift = _mm256_srli_epi32(tmp, 12);  // 00000000ab000000
    442        const __m256i pack = _mm256_or_si256(shift, tmp);  // 00000000abcd0000
    443        // Convert to 0xff00**00.
    444        const __m256i res = _mm256_or_si256(pack, mask_or);
    445        _mm256_storeu_si256((__m256i*)dst, res);
    446      }
    447      break;
    448    }
    449    default: {
    450      assert(xbits == 3);
    451      for (x = 0; x + 32 <= width; x += 32, dst += 4) {
    452        // 0000000a00000000b... | (where a/b are 1 bit).
    453        const __m256i in = _mm256_loadu_si256((const __m256i*)&row[x]);
    454        const __m256i shift = _mm256_slli_epi64(in, 7);
    455        const uint32_t move = _mm256_movemask_epi8(shift);
    456        dst[0] = 0xff000000 | ((move & 0xff) << 8);
    457        dst[1] = 0xff000000 | (move & 0xff00);
    458        dst[2] = 0xff000000 | ((move & 0xff0000) >> 8);
    459        dst[3] = 0xff000000 | ((move & 0xff000000) >> 16);
    460      }
    461      break;
    462    }
    463  }
    464  if (x != width) {
    465    VP8LBundleColorMap_SSE(row + x, width - x, xbits, dst);
    466  }
    467 }
    468 
    469 //------------------------------------------------------------------------------
    470 // Batch version of Predictor Transform subtraction
    471 
    472 static WEBP_INLINE void Average2_m256i(const __m256i* const a0,
    473                                       const __m256i* const a1,
    474                                       __m256i* const avg) {
    475  // (a + b) >> 1 = ((a + b + 1) >> 1) - ((a ^ b) & 1)
    476  const __m256i ones = _mm256_set1_epi8(1);
    477  const __m256i avg1 = _mm256_avg_epu8(*a0, *a1);
    478  const __m256i one = _mm256_and_si256(_mm256_xor_si256(*a0, *a1), ones);
    479  *avg = _mm256_sub_epi8(avg1, one);
    480 }
    481 
    482 // Predictor0: ARGB_BLACK.
    483 static void PredictorSub0_AVX2(const uint32_t* in, const uint32_t* upper,
    484                               int num_pixels, uint32_t* WEBP_RESTRICT out) {
    485  int i;
    486  const __m256i black = _mm256_set1_epi32((int)ARGB_BLACK);
    487  for (i = 0; i + 8 <= num_pixels; i += 8) {
    488    const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]);
    489    const __m256i res = _mm256_sub_epi8(src, black);
    490    _mm256_storeu_si256((__m256i*)&out[i], res);
    491  }
    492  if (i != num_pixels) {
    493    VP8LPredictorsSub_SSE[0](in + i, NULL, num_pixels - i, out + i);
    494  }
    495  (void)upper;
    496 }
    497 
    498 #define GENERATE_PREDICTOR_1(X, IN)                                          \
    499  static void PredictorSub##X##_AVX2(                                        \
    500      const uint32_t* const in, const uint32_t* const upper, int num_pixels, \
    501      uint32_t* WEBP_RESTRICT const out) {                                   \
    502    int i;                                                                   \
    503    for (i = 0; i + 8 <= num_pixels; i += 8) {                               \
    504      const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]);        \
    505      const __m256i pred = _mm256_loadu_si256((const __m256i*)&(IN));        \
    506      const __m256i res = _mm256_sub_epi8(src, pred);                        \
    507      _mm256_storeu_si256((__m256i*)&out[i], res);                           \
    508    }                                                                        \
    509    if (i != num_pixels) {                                                   \
    510      VP8LPredictorsSub_SSE[(X)](in + i, WEBP_OFFSET_PTR(upper, i),          \
    511                                 num_pixels - i, out + i);                   \
    512    }                                                                        \
    513  }
    514 
    515 GENERATE_PREDICTOR_1(1, in[i - 1])       // Predictor1: L
    516 GENERATE_PREDICTOR_1(2, upper[i])        // Predictor2: T
    517 GENERATE_PREDICTOR_1(3, upper[i + 1])    // Predictor3: TR
    518 GENERATE_PREDICTOR_1(4, upper[i - 1])    // Predictor4: TL
    519 #undef GENERATE_PREDICTOR_1
    520 
    521 // Predictor5: avg2(avg2(L, TR), T)
    522 static void PredictorSub5_AVX2(const uint32_t* in, const uint32_t* upper,
    523                               int num_pixels, uint32_t* WEBP_RESTRICT out) {
    524  int i;
    525  for (i = 0; i + 8 <= num_pixels; i += 8) {
    526    const __m256i L = _mm256_loadu_si256((const __m256i*)&in[i - 1]);
    527    const __m256i T = _mm256_loadu_si256((const __m256i*)&upper[i]);
    528    const __m256i TR = _mm256_loadu_si256((const __m256i*)&upper[i + 1]);
    529    const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]);
    530    __m256i avg, pred, res;
    531    Average2_m256i(&L, &TR, &avg);
    532    Average2_m256i(&avg, &T, &pred);
    533    res = _mm256_sub_epi8(src, pred);
    534    _mm256_storeu_si256((__m256i*)&out[i], res);
    535  }
    536  if (i != num_pixels) {
    537    VP8LPredictorsSub_SSE[5](in + i, upper + i, num_pixels - i, out + i);
    538  }
    539 }
    540 
    541 #define GENERATE_PREDICTOR_2(X, A, B)                                         \
    542  static void PredictorSub##X##_AVX2(const uint32_t* in,                      \
    543                                     const uint32_t* upper, int num_pixels,   \
    544                                     uint32_t* WEBP_RESTRICT out) {           \
    545    int i;                                                                    \
    546    for (i = 0; i + 8 <= num_pixels; i += 8) {                                \
    547      const __m256i tA = _mm256_loadu_si256((const __m256i*)&(A));            \
    548      const __m256i tB = _mm256_loadu_si256((const __m256i*)&(B));            \
    549      const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]);         \
    550      __m256i pred, res;                                                      \
    551      Average2_m256i(&tA, &tB, &pred);                                        \
    552      res = _mm256_sub_epi8(src, pred);                                       \
    553      _mm256_storeu_si256((__m256i*)&out[i], res);                            \
    554    }                                                                         \
    555    if (i != num_pixels) {                                                    \
    556      VP8LPredictorsSub_SSE[(X)](in + i, upper + i, num_pixels - i, out + i); \
    557    }                                                                         \
    558  }
    559 
    560 GENERATE_PREDICTOR_2(6, in[i - 1], upper[i - 1])   // Predictor6: avg(L, TL)
    561 GENERATE_PREDICTOR_2(7, in[i - 1], upper[i])       // Predictor7: avg(L, T)
    562 GENERATE_PREDICTOR_2(8, upper[i - 1], upper[i])    // Predictor8: avg(TL, T)
    563 GENERATE_PREDICTOR_2(9, upper[i], upper[i + 1])    // Predictor9: average(T, TR)
    564 #undef GENERATE_PREDICTOR_2
    565 
    566 // Predictor10: avg(avg(L,TL), avg(T, TR)).
    567 static void PredictorSub10_AVX2(const uint32_t* in, const uint32_t* upper,
    568                                int num_pixels, uint32_t* WEBP_RESTRICT out) {
    569  int i;
    570  for (i = 0; i + 8 <= num_pixels; i += 8) {
    571    const __m256i L = _mm256_loadu_si256((const __m256i*)&in[i - 1]);
    572    const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]);
    573    const __m256i TL = _mm256_loadu_si256((const __m256i*)&upper[i - 1]);
    574    const __m256i T = _mm256_loadu_si256((const __m256i*)&upper[i]);
    575    const __m256i TR = _mm256_loadu_si256((const __m256i*)&upper[i + 1]);
    576    __m256i avgTTR, avgLTL, avg, res;
    577    Average2_m256i(&T, &TR, &avgTTR);
    578    Average2_m256i(&L, &TL, &avgLTL);
    579    Average2_m256i(&avgTTR, &avgLTL, &avg);
    580    res = _mm256_sub_epi8(src, avg);
    581    _mm256_storeu_si256((__m256i*)&out[i], res);
    582  }
    583  if (i != num_pixels) {
    584    VP8LPredictorsSub_SSE[10](in + i, upper + i, num_pixels - i, out + i);
    585  }
    586 }
    587 
    588 // Predictor11: select.
    589 static void GetSumAbsDiff32_AVX2(const __m256i* const A, const __m256i* const B,
    590                                 __m256i* const out) {
    591  // We can unpack with any value on the upper 32 bits, provided it's the same
    592  // on both operands (to that their sum of abs diff is zero). Here we use *A.
    593  const __m256i A_lo = _mm256_unpacklo_epi32(*A, *A);
    594  const __m256i B_lo = _mm256_unpacklo_epi32(*B, *A);
    595  const __m256i A_hi = _mm256_unpackhi_epi32(*A, *A);
    596  const __m256i B_hi = _mm256_unpackhi_epi32(*B, *A);
    597  const __m256i s_lo = _mm256_sad_epu8(A_lo, B_lo);
    598  const __m256i s_hi = _mm256_sad_epu8(A_hi, B_hi);
    599  *out = _mm256_packs_epi32(s_lo, s_hi);
    600 }
    601 
    602 static void PredictorSub11_AVX2(const uint32_t* in, const uint32_t* upper,
    603                                int num_pixels, uint32_t* WEBP_RESTRICT out) {
    604  int i;
    605  for (i = 0; i + 8 <= num_pixels; i += 8) {
    606    const __m256i L = _mm256_loadu_si256((const __m256i*)&in[i - 1]);
    607    const __m256i T = _mm256_loadu_si256((const __m256i*)&upper[i]);
    608    const __m256i TL = _mm256_loadu_si256((const __m256i*)&upper[i - 1]);
    609    const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]);
    610    __m256i pa, pb;
    611    GetSumAbsDiff32_AVX2(&T, &TL, &pa);  // pa = sum |T-TL|
    612    GetSumAbsDiff32_AVX2(&L, &TL, &pb);  // pb = sum |L-TL|
    613    {
    614      const __m256i mask = _mm256_cmpgt_epi32(pb, pa);
    615      const __m256i A = _mm256_and_si256(mask, L);
    616      const __m256i B = _mm256_andnot_si256(mask, T);
    617      const __m256i pred = _mm256_or_si256(A, B);  // pred = (L > T)? L : T
    618      const __m256i res = _mm256_sub_epi8(src, pred);
    619      _mm256_storeu_si256((__m256i*)&out[i], res);
    620    }
    621  }
    622  if (i != num_pixels) {
    623    VP8LPredictorsSub_SSE[11](in + i, upper + i, num_pixels - i, out + i);
    624  }
    625 }
    626 
    627 // Predictor12: ClampedSubSubtractFull.
    628 static void PredictorSub12_AVX2(const uint32_t* in, const uint32_t* upper,
    629                                int num_pixels, uint32_t* WEBP_RESTRICT out) {
    630  int i;
    631  const __m256i zero = _mm256_setzero_si256();
    632  for (i = 0; i + 8 <= num_pixels; i += 8) {
    633    const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]);
    634    const __m256i L = _mm256_loadu_si256((const __m256i*)&in[i - 1]);
    635    const __m256i L_lo = _mm256_unpacklo_epi8(L, zero);
    636    const __m256i L_hi = _mm256_unpackhi_epi8(L, zero);
    637    const __m256i T = _mm256_loadu_si256((const __m256i*)&upper[i]);
    638    const __m256i T_lo = _mm256_unpacklo_epi8(T, zero);
    639    const __m256i T_hi = _mm256_unpackhi_epi8(T, zero);
    640    const __m256i TL = _mm256_loadu_si256((const __m256i*)&upper[i - 1]);
    641    const __m256i TL_lo = _mm256_unpacklo_epi8(TL, zero);
    642    const __m256i TL_hi = _mm256_unpackhi_epi8(TL, zero);
    643    const __m256i diff_lo = _mm256_sub_epi16(T_lo, TL_lo);
    644    const __m256i diff_hi = _mm256_sub_epi16(T_hi, TL_hi);
    645    const __m256i pred_lo = _mm256_add_epi16(L_lo, diff_lo);
    646    const __m256i pred_hi = _mm256_add_epi16(L_hi, diff_hi);
    647    const __m256i pred = _mm256_packus_epi16(pred_lo, pred_hi);
    648    const __m256i res = _mm256_sub_epi8(src, pred);
    649    _mm256_storeu_si256((__m256i*)&out[i], res);
    650  }
    651  if (i != num_pixels) {
    652    VP8LPredictorsSub_SSE[12](in + i, upper + i, num_pixels - i, out + i);
    653  }
    654 }
    655 
    656 // Predictors13: ClampedAddSubtractHalf
    657 static void PredictorSub13_AVX2(const uint32_t* in, const uint32_t* upper,
    658                                int num_pixels, uint32_t* WEBP_RESTRICT out) {
    659  int i;
    660  const __m256i zero = _mm256_setzero_si256();
    661  for (i = 0; i + 8 <= num_pixels; i += 8) {
    662    const __m256i L = _mm256_loadu_si256((const __m256i*)&in[i - 1]);
    663    const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]);
    664    const __m256i T = _mm256_loadu_si256((const __m256i*)&upper[i]);
    665    const __m256i TL = _mm256_loadu_si256((const __m256i*)&upper[i - 1]);
    666    // lo.
    667    const __m256i L_lo = _mm256_unpacklo_epi8(L, zero);
    668    const __m256i T_lo = _mm256_unpacklo_epi8(T, zero);
    669    const __m256i TL_lo = _mm256_unpacklo_epi8(TL, zero);
    670    const __m256i sum_lo = _mm256_add_epi16(T_lo, L_lo);
    671    const __m256i avg_lo = _mm256_srli_epi16(sum_lo, 1);
    672    const __m256i A1_lo = _mm256_sub_epi16(avg_lo, TL_lo);
    673    const __m256i bit_fix_lo = _mm256_cmpgt_epi16(TL_lo, avg_lo);
    674    const __m256i A2_lo = _mm256_sub_epi16(A1_lo, bit_fix_lo);
    675    const __m256i A3_lo = _mm256_srai_epi16(A2_lo, 1);
    676    const __m256i A4_lo = _mm256_add_epi16(avg_lo, A3_lo);
    677    // hi.
    678    const __m256i L_hi = _mm256_unpackhi_epi8(L, zero);
    679    const __m256i T_hi = _mm256_unpackhi_epi8(T, zero);
    680    const __m256i TL_hi = _mm256_unpackhi_epi8(TL, zero);
    681    const __m256i sum_hi = _mm256_add_epi16(T_hi, L_hi);
    682    const __m256i avg_hi = _mm256_srli_epi16(sum_hi, 1);
    683    const __m256i A1_hi = _mm256_sub_epi16(avg_hi, TL_hi);
    684    const __m256i bit_fix_hi = _mm256_cmpgt_epi16(TL_hi, avg_hi);
    685    const __m256i A2_hi = _mm256_sub_epi16(A1_hi, bit_fix_hi);
    686    const __m256i A3_hi = _mm256_srai_epi16(A2_hi, 1);
    687    const __m256i A4_hi = _mm256_add_epi16(avg_hi, A3_hi);
    688 
    689    const __m256i pred = _mm256_packus_epi16(A4_lo, A4_hi);
    690    const __m256i res = _mm256_sub_epi8(src, pred);
    691    _mm256_storeu_si256((__m256i*)&out[i], res);
    692  }
    693  if (i != num_pixels) {
    694    VP8LPredictorsSub_SSE[13](in + i, upper + i, num_pixels - i, out + i);
    695  }
    696 }
    697 
    698 //------------------------------------------------------------------------------
    699 // Entry point
    700 
    701 extern void VP8LEncDspInitAVX2(void);
    702 
    703 WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInitAVX2(void) {
    704  VP8LSubtractGreenFromBlueAndRed = SubtractGreenFromBlueAndRed_AVX2;
    705  VP8LTransformColor = TransformColor_AVX2;
    706  VP8LCollectColorBlueTransforms = CollectColorBlueTransforms_AVX2;
    707  VP8LCollectColorRedTransforms = CollectColorRedTransforms_AVX2;
    708  VP8LAddVector = AddVector_AVX2;
    709  VP8LAddVectorEq = AddVectorEq_AVX2;
    710  VP8LCombinedShannonEntropy = CombinedShannonEntropy_AVX2;
    711  VP8LVectorMismatch = VectorMismatch_AVX2;
    712  VP8LBundleColorMap = BundleColorMap_AVX2;
    713 
    714  VP8LPredictorsSub[0] = PredictorSub0_AVX2;
    715  VP8LPredictorsSub[1] = PredictorSub1_AVX2;
    716  VP8LPredictorsSub[2] = PredictorSub2_AVX2;
    717  VP8LPredictorsSub[3] = PredictorSub3_AVX2;
    718  VP8LPredictorsSub[4] = PredictorSub4_AVX2;
    719  VP8LPredictorsSub[5] = PredictorSub5_AVX2;
    720  VP8LPredictorsSub[6] = PredictorSub6_AVX2;
    721  VP8LPredictorsSub[7] = PredictorSub7_AVX2;
    722  VP8LPredictorsSub[8] = PredictorSub8_AVX2;
    723  VP8LPredictorsSub[9] = PredictorSub9_AVX2;
    724  VP8LPredictorsSub[10] = PredictorSub10_AVX2;
    725  VP8LPredictorsSub[11] = PredictorSub11_AVX2;
    726  VP8LPredictorsSub[12] = PredictorSub12_AVX2;
    727  VP8LPredictorsSub[13] = PredictorSub13_AVX2;
    728  VP8LPredictorsSub[14] = PredictorSub0_AVX2;  // <- padding security sentinels
    729  VP8LPredictorsSub[15] = PredictorSub0_AVX2;
    730 }
    731 
    732 #else  // !WEBP_USE_AVX2
    733 
    734 WEBP_DSP_INIT_STUB(VP8LEncDspInitAVX2)
    735 
    736 #endif  // WEBP_USE_AVX2