pickrst_avx2.c (105624B)
1 /* 2 * Copyright (c) 2018, Alliance for Open Media. All rights reserved. 3 * 4 * This source code is subject to the terms of the BSD 2 Clause License and 5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License 6 * was not distributed with this source code in the LICENSE file, you can 7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open 8 * Media Patent License 1.0 was not distributed with this source code in the 9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent. 10 */ 11 12 #include <immintrin.h> // AVX2 13 #include "aom_dsp/x86/mem_sse2.h" 14 #include "aom_dsp/x86/synonyms.h" 15 #include "aom_dsp/x86/synonyms_avx2.h" 16 #include "aom_dsp/x86/transpose_sse2.h" 17 18 #include "config/av1_rtcd.h" 19 #include "av1/common/restoration.h" 20 #include "av1/encoder/pickrst.h" 21 22 #if CONFIG_AV1_HIGHBITDEPTH 23 static inline void acc_stat_highbd_avx2(int64_t *dst, const uint16_t *dgd, 24 const __m256i *shuffle, 25 const __m256i *dgd_ijkl) { 26 // Load two 128-bit chunks from dgd 27 const __m256i s0 = _mm256_inserti128_si256( 28 _mm256_castsi128_si256(_mm_loadu_si128((__m128i *)dgd)), 29 _mm_loadu_si128((__m128i *)(dgd + 4)), 1); 30 // s0 = [11 10 9 8 7 6 5 4] [7 6 5 4 3 2 1 0] as u16 (values are dgd indices) 31 // The weird order is so the shuffle stays within 128-bit lanes 32 33 // Shuffle 16x u16 values within lanes according to the mask: 34 // [0 1 1 2 2 3 3 4] [0 1 1 2 2 3 3 4] 35 // (Actually we shuffle u8 values as there's no 16-bit shuffle) 36 const __m256i s1 = _mm256_shuffle_epi8(s0, *shuffle); 37 // s1 = [8 7 7 6 6 5 5 4] [4 3 3 2 2 1 1 0] as u16 (values are dgd indices) 38 39 // Multiply 16x 16-bit integers in dgd_ijkl and s1, resulting in 16x 32-bit 40 // integers then horizontally add pairs of these integers resulting in 8x 41 // 32-bit integers 42 const __m256i d0 = _mm256_madd_epi16(*dgd_ijkl, s1); 43 // d0 = [a b c d] [e f g h] as u32 44 45 // Take the lower-half of d0, extend to u64, add it on to dst (H) 46 const __m256i d0l = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(d0, 0)); 47 // d0l = [a b] [c d] as u64 48 const __m256i dst0 = yy_load_256(dst); 49 yy_store_256(dst, _mm256_add_epi64(d0l, dst0)); 50 51 // Take the upper-half of d0, extend to u64, add it on to dst (H) 52 const __m256i d0h = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(d0, 1)); 53 // d0h = [e f] [g h] as u64 54 const __m256i dst1 = yy_load_256(dst + 4); 55 yy_store_256(dst + 4, _mm256_add_epi64(d0h, dst1)); 56 } 57 58 static inline void acc_stat_highbd_win7_one_line_avx2( 59 const uint16_t *dgd, const uint16_t *src, int h_start, int h_end, 60 int dgd_stride, const __m256i *shuffle, int32_t *sumX, 61 int32_t sumY[WIENER_WIN][WIENER_WIN], int64_t M_int[WIENER_WIN][WIENER_WIN], 62 int64_t H_int[WIENER_WIN2][WIENER_WIN * 8]) { 63 int j, k, l; 64 const int wiener_win = WIENER_WIN; 65 // Main loop handles two pixels at a time 66 // We can assume that h_start is even, since it will always be aligned to 67 // a tile edge + some number of restoration units, and both of those will 68 // be 64-pixel aligned. 69 // However, at the edge of the image, h_end may be odd, so we need to handle 70 // that case correctly. 71 assert(h_start % 2 == 0); 72 const int h_end_even = h_end & ~1; 73 const int has_odd_pixel = h_end & 1; 74 for (j = h_start; j < h_end_even; j += 2) { 75 const uint16_t X1 = src[j]; 76 const uint16_t X2 = src[j + 1]; 77 *sumX += X1 + X2; 78 const uint16_t *dgd_ij = dgd + j; 79 for (k = 0; k < wiener_win; k++) { 80 const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride; 81 for (l = 0; l < wiener_win; l++) { 82 int64_t *H_ = &H_int[(l * wiener_win + k)][0]; 83 const uint16_t D1 = dgd_ijk[l]; 84 const uint16_t D2 = dgd_ijk[l + 1]; 85 sumY[k][l] += D1 + D2; 86 M_int[k][l] += D1 * X1 + D2 * X2; 87 88 // Load two u16 values from dgd_ijkl combined as a u32, 89 // then broadcast to 8x u32 slots of a 256 90 const __m256i dgd_ijkl = _mm256_set1_epi32(loadu_int32(dgd_ijk + l)); 91 // dgd_ijkl = [y x y x y x y x] [y x y x y x y x] where each is a u16 92 93 acc_stat_highbd_avx2(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle, 94 &dgd_ijkl); 95 acc_stat_highbd_avx2(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle, 96 &dgd_ijkl); 97 acc_stat_highbd_avx2(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle, 98 &dgd_ijkl); 99 acc_stat_highbd_avx2(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle, 100 &dgd_ijkl); 101 acc_stat_highbd_avx2(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle, 102 &dgd_ijkl); 103 acc_stat_highbd_avx2(H_ + 5 * 8, dgd_ij + 5 * dgd_stride, shuffle, 104 &dgd_ijkl); 105 acc_stat_highbd_avx2(H_ + 6 * 8, dgd_ij + 6 * dgd_stride, shuffle, 106 &dgd_ijkl); 107 } 108 } 109 } 110 // If the width is odd, add in the final pixel 111 if (has_odd_pixel) { 112 const uint16_t X1 = src[j]; 113 *sumX += X1; 114 const uint16_t *dgd_ij = dgd + j; 115 for (k = 0; k < wiener_win; k++) { 116 const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride; 117 for (l = 0; l < wiener_win; l++) { 118 int64_t *H_ = &H_int[(l * wiener_win + k)][0]; 119 const uint16_t D1 = dgd_ijk[l]; 120 sumY[k][l] += D1; 121 M_int[k][l] += D1 * X1; 122 123 // The `acc_stat_highbd_avx2` function wants its input to have 124 // interleaved copies of two pixels, but we only have one. However, the 125 // pixels are (effectively) used as inputs to a multiply-accumulate. So 126 // if we set the extra pixel slot to 0, then it is effectively ignored. 127 const __m256i dgd_ijkl = _mm256_set1_epi32((int)D1); 128 129 acc_stat_highbd_avx2(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle, 130 &dgd_ijkl); 131 acc_stat_highbd_avx2(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle, 132 &dgd_ijkl); 133 acc_stat_highbd_avx2(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle, 134 &dgd_ijkl); 135 acc_stat_highbd_avx2(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle, 136 &dgd_ijkl); 137 acc_stat_highbd_avx2(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle, 138 &dgd_ijkl); 139 acc_stat_highbd_avx2(H_ + 5 * 8, dgd_ij + 5 * dgd_stride, shuffle, 140 &dgd_ijkl); 141 acc_stat_highbd_avx2(H_ + 6 * 8, dgd_ij + 6 * dgd_stride, shuffle, 142 &dgd_ijkl); 143 } 144 } 145 } 146 } 147 148 static inline void compute_stats_highbd_win7_opt_avx2( 149 const uint8_t *dgd8, const uint8_t *src8, int h_start, int h_end, 150 int v_start, int v_end, int dgd_stride, int src_stride, int64_t *M, 151 int64_t *H, aom_bit_depth_t bit_depth) { 152 int i, j, k, l, m, n; 153 const int wiener_win = WIENER_WIN; 154 const int pixel_count = (h_end - h_start) * (v_end - v_start); 155 const int wiener_win2 = wiener_win * wiener_win; 156 const int wiener_halfwin = (wiener_win >> 1); 157 const uint16_t *src = CONVERT_TO_SHORTPTR(src8); 158 const uint16_t *dgd = CONVERT_TO_SHORTPTR(dgd8); 159 const uint16_t avg = 160 find_average_highbd(dgd, h_start, h_end, v_start, v_end, dgd_stride); 161 162 int64_t M_int[WIENER_WIN][WIENER_WIN] = { { 0 } }; 163 DECLARE_ALIGNED(32, int64_t, H_int[WIENER_WIN2][WIENER_WIN * 8]) = { { 0 } }; 164 int32_t sumY[WIENER_WIN][WIENER_WIN] = { { 0 } }; 165 int32_t sumX = 0; 166 const uint16_t *dgd_win = dgd - wiener_halfwin * dgd_stride - wiener_halfwin; 167 168 const __m256i shuffle = yy_loadu_256(g_shuffle_stats_highbd_data); 169 for (j = v_start; j < v_end; j += 64) { 170 const int vert_end = AOMMIN(64, v_end - j) + j; 171 for (i = j; i < vert_end; i++) { 172 acc_stat_highbd_win7_one_line_avx2( 173 dgd_win + i * dgd_stride, src + i * src_stride, h_start, h_end, 174 dgd_stride, &shuffle, &sumX, sumY, M_int, H_int); 175 } 176 } 177 178 uint8_t bit_depth_divider = 1; 179 if (bit_depth == AOM_BITS_12) 180 bit_depth_divider = 16; 181 else if (bit_depth == AOM_BITS_10) 182 bit_depth_divider = 4; 183 184 const int64_t avg_square_sum = (int64_t)avg * (int64_t)avg * pixel_count; 185 for (k = 0; k < wiener_win; k++) { 186 for (l = 0; l < wiener_win; l++) { 187 const int32_t idx0 = l * wiener_win + k; 188 M[idx0] = (M_int[k][l] + 189 (avg_square_sum - (int64_t)avg * (sumX + sumY[k][l]))) / 190 bit_depth_divider; 191 int64_t *H_ = H + idx0 * wiener_win2; 192 int64_t *H_int_ = &H_int[idx0][0]; 193 for (m = 0; m < wiener_win; m++) { 194 for (n = 0; n < wiener_win; n++) { 195 H_[m * wiener_win + n] = 196 (H_int_[n * 8 + m] + 197 (avg_square_sum - (int64_t)avg * (sumY[k][l] + sumY[n][m]))) / 198 bit_depth_divider; 199 } 200 } 201 } 202 } 203 } 204 205 static inline void acc_stat_highbd_win5_one_line_avx2( 206 const uint16_t *dgd, const uint16_t *src, int h_start, int h_end, 207 int dgd_stride, const __m256i *shuffle, int32_t *sumX, 208 int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA], 209 int64_t M_int[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA], 210 int64_t H_int[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8]) { 211 int j, k, l; 212 const int wiener_win = WIENER_WIN_CHROMA; 213 // Main loop handles two pixels at a time 214 // We can assume that h_start is even, since it will always be aligned to 215 // a tile edge + some number of restoration units, and both of those will 216 // be 64-pixel aligned. 217 // However, at the edge of the image, h_end may be odd, so we need to handle 218 // that case correctly. 219 assert(h_start % 2 == 0); 220 const int h_end_even = h_end & ~1; 221 const int has_odd_pixel = h_end & 1; 222 for (j = h_start; j < h_end_even; j += 2) { 223 const uint16_t X1 = src[j]; 224 const uint16_t X2 = src[j + 1]; 225 *sumX += X1 + X2; 226 const uint16_t *dgd_ij = dgd + j; 227 for (k = 0; k < wiener_win; k++) { 228 const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride; 229 for (l = 0; l < wiener_win; l++) { 230 int64_t *H_ = &H_int[(l * wiener_win + k)][0]; 231 const uint16_t D1 = dgd_ijk[l]; 232 const uint16_t D2 = dgd_ijk[l + 1]; 233 sumY[k][l] += D1 + D2; 234 M_int[k][l] += D1 * X1 + D2 * X2; 235 236 // Load two u16 values from dgd_ijkl combined as a u32, 237 // then broadcast to 8x u32 slots of a 256 238 const __m256i dgd_ijkl = _mm256_set1_epi32(loadu_int32(dgd_ijk + l)); 239 // dgd_ijkl = [x y x y x y x y] [x y x y x y x y] where each is a u16 240 241 acc_stat_highbd_avx2(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle, 242 &dgd_ijkl); 243 acc_stat_highbd_avx2(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle, 244 &dgd_ijkl); 245 acc_stat_highbd_avx2(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle, 246 &dgd_ijkl); 247 acc_stat_highbd_avx2(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle, 248 &dgd_ijkl); 249 acc_stat_highbd_avx2(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle, 250 &dgd_ijkl); 251 } 252 } 253 } 254 // If the width is odd, add in the final pixel 255 if (has_odd_pixel) { 256 const uint16_t X1 = src[j]; 257 *sumX += X1; 258 const uint16_t *dgd_ij = dgd + j; 259 for (k = 0; k < wiener_win; k++) { 260 const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride; 261 for (l = 0; l < wiener_win; l++) { 262 int64_t *H_ = &H_int[(l * wiener_win + k)][0]; 263 const uint16_t D1 = dgd_ijk[l]; 264 sumY[k][l] += D1; 265 M_int[k][l] += D1 * X1; 266 267 // The `acc_stat_highbd_avx2` function wants its input to have 268 // interleaved copies of two pixels, but we only have one. However, the 269 // pixels are (effectively) used as inputs to a multiply-accumulate. So 270 // if we set the extra pixel slot to 0, then it is effectively ignored. 271 const __m256i dgd_ijkl = _mm256_set1_epi32((int)D1); 272 273 acc_stat_highbd_avx2(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle, 274 &dgd_ijkl); 275 acc_stat_highbd_avx2(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle, 276 &dgd_ijkl); 277 acc_stat_highbd_avx2(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle, 278 &dgd_ijkl); 279 acc_stat_highbd_avx2(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle, 280 &dgd_ijkl); 281 acc_stat_highbd_avx2(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle, 282 &dgd_ijkl); 283 } 284 } 285 } 286 } 287 288 static inline void compute_stats_highbd_win5_opt_avx2( 289 const uint8_t *dgd8, const uint8_t *src8, int h_start, int h_end, 290 int v_start, int v_end, int dgd_stride, int src_stride, int64_t *M, 291 int64_t *H, aom_bit_depth_t bit_depth) { 292 int i, j, k, l, m, n; 293 const int wiener_win = WIENER_WIN_CHROMA; 294 const int pixel_count = (h_end - h_start) * (v_end - v_start); 295 const int wiener_win2 = wiener_win * wiener_win; 296 const int wiener_halfwin = (wiener_win >> 1); 297 const uint16_t *src = CONVERT_TO_SHORTPTR(src8); 298 const uint16_t *dgd = CONVERT_TO_SHORTPTR(dgd8); 299 const uint16_t avg = 300 find_average_highbd(dgd, h_start, h_end, v_start, v_end, dgd_stride); 301 302 int64_t M_int64[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } }; 303 DECLARE_ALIGNED( 304 32, int64_t, 305 H_int64[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8]) = { { 0 } }; 306 int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } }; 307 int32_t sumX = 0; 308 const uint16_t *dgd_win = dgd - wiener_halfwin * dgd_stride - wiener_halfwin; 309 310 const __m256i shuffle = yy_loadu_256(g_shuffle_stats_highbd_data); 311 for (j = v_start; j < v_end; j += 64) { 312 const int vert_end = AOMMIN(64, v_end - j) + j; 313 for (i = j; i < vert_end; i++) { 314 acc_stat_highbd_win5_one_line_avx2( 315 dgd_win + i * dgd_stride, src + i * src_stride, h_start, h_end, 316 dgd_stride, &shuffle, &sumX, sumY, M_int64, H_int64); 317 } 318 } 319 320 uint8_t bit_depth_divider = 1; 321 if (bit_depth == AOM_BITS_12) 322 bit_depth_divider = 16; 323 else if (bit_depth == AOM_BITS_10) 324 bit_depth_divider = 4; 325 326 const int64_t avg_square_sum = (int64_t)avg * (int64_t)avg * pixel_count; 327 for (k = 0; k < wiener_win; k++) { 328 for (l = 0; l < wiener_win; l++) { 329 const int32_t idx0 = l * wiener_win + k; 330 M[idx0] = (M_int64[k][l] + 331 (avg_square_sum - (int64_t)avg * (sumX + sumY[k][l]))) / 332 bit_depth_divider; 333 int64_t *H_ = H + idx0 * wiener_win2; 334 int64_t *H_int_ = &H_int64[idx0][0]; 335 for (m = 0; m < wiener_win; m++) { 336 for (n = 0; n < wiener_win; n++) { 337 H_[m * wiener_win + n] = 338 (H_int_[n * 8 + m] + 339 (avg_square_sum - (int64_t)avg * (sumY[k][l] + sumY[n][m]))) / 340 bit_depth_divider; 341 } 342 } 343 } 344 } 345 } 346 347 void av1_compute_stats_highbd_avx2(int wiener_win, const uint8_t *dgd8, 348 const uint8_t *src8, int16_t *dgd_avg, 349 int16_t *src_avg, int h_start, int h_end, 350 int v_start, int v_end, int dgd_stride, 351 int src_stride, int64_t *M, int64_t *H, 352 aom_bit_depth_t bit_depth) { 353 if (wiener_win == WIENER_WIN) { 354 (void)dgd_avg; 355 (void)src_avg; 356 compute_stats_highbd_win7_opt_avx2(dgd8, src8, h_start, h_end, v_start, 357 v_end, dgd_stride, src_stride, M, H, 358 bit_depth); 359 } else if (wiener_win == WIENER_WIN_CHROMA) { 360 (void)dgd_avg; 361 (void)src_avg; 362 compute_stats_highbd_win5_opt_avx2(dgd8, src8, h_start, h_end, v_start, 363 v_end, dgd_stride, src_stride, M, H, 364 bit_depth); 365 } else { 366 av1_compute_stats_highbd_c(wiener_win, dgd8, src8, dgd_avg, src_avg, 367 h_start, h_end, v_start, v_end, dgd_stride, 368 src_stride, M, H, bit_depth); 369 } 370 } 371 #endif // CONFIG_AV1_HIGHBITDEPTH 372 373 static inline void madd_and_accum_avx2(__m256i src, __m256i dgd, __m256i *sum) { 374 *sum = _mm256_add_epi32(*sum, _mm256_madd_epi16(src, dgd)); 375 } 376 377 static inline __m256i convert_and_add_avx2(__m256i src) { 378 const __m256i s0 = _mm256_cvtepi32_epi64(_mm256_castsi256_si128(src)); 379 const __m256i s1 = _mm256_cvtepi32_epi64(_mm256_extracti128_si256(src, 1)); 380 return _mm256_add_epi64(s0, s1); 381 } 382 383 static inline __m256i hadd_four_32_to_64_avx2(__m256i src0, __m256i src1, 384 __m256i *src2, __m256i *src3) { 385 // 00 01 10 11 02 03 12 13 386 const __m256i s_0 = _mm256_hadd_epi32(src0, src1); 387 // 20 21 30 31 22 23 32 33 388 const __m256i s_1 = _mm256_hadd_epi32(*src2, *src3); 389 // 00+01 10+11 20+21 30+31 02+03 12+13 22+23 32+33 390 const __m256i s_2 = _mm256_hadd_epi32(s_0, s_1); 391 return convert_and_add_avx2(s_2); 392 } 393 394 static inline __m128i add_64bit_lvl_avx2(__m256i src0, __m256i src1) { 395 // 00 10 02 12 396 const __m256i t0 = _mm256_unpacklo_epi64(src0, src1); 397 // 01 11 03 13 398 const __m256i t1 = _mm256_unpackhi_epi64(src0, src1); 399 // 00+01 10+11 02+03 12+13 400 const __m256i sum = _mm256_add_epi64(t0, t1); 401 // 00+01 10+11 402 const __m128i sum0 = _mm256_castsi256_si128(sum); 403 // 02+03 12+13 404 const __m128i sum1 = _mm256_extracti128_si256(sum, 1); 405 // 00+01+02+03 10+11+12+13 406 return _mm_add_epi64(sum0, sum1); 407 } 408 409 static inline __m128i convert_32_to_64_add_avx2(__m256i src0, __m256i src1) { 410 // 00 01 02 03 411 const __m256i s0 = convert_and_add_avx2(src0); 412 // 10 11 12 13 413 const __m256i s1 = convert_and_add_avx2(src1); 414 return add_64bit_lvl_avx2(s0, s1); 415 } 416 417 static inline int32_t calc_sum_of_register(__m256i src) { 418 const __m128i src_l = _mm256_castsi256_si128(src); 419 const __m128i src_h = _mm256_extracti128_si256(src, 1); 420 const __m128i sum = _mm_add_epi32(src_l, src_h); 421 const __m128i dst0 = _mm_add_epi32(sum, _mm_srli_si128(sum, 8)); 422 const __m128i dst1 = _mm_add_epi32(dst0, _mm_srli_si128(dst0, 4)); 423 return _mm_cvtsi128_si32(dst1); 424 } 425 426 static inline void transpose_64bit_4x4_avx2(const __m256i *const src, 427 __m256i *const dst) { 428 // Unpack 64 bit elements. Goes from: 429 // src[0]: 00 01 02 03 430 // src[1]: 10 11 12 13 431 // src[2]: 20 21 22 23 432 // src[3]: 30 31 32 33 433 // to: 434 // reg0: 00 10 02 12 435 // reg1: 20 30 22 32 436 // reg2: 01 11 03 13 437 // reg3: 21 31 23 33 438 const __m256i reg0 = _mm256_unpacklo_epi64(src[0], src[1]); 439 const __m256i reg1 = _mm256_unpacklo_epi64(src[2], src[3]); 440 const __m256i reg2 = _mm256_unpackhi_epi64(src[0], src[1]); 441 const __m256i reg3 = _mm256_unpackhi_epi64(src[2], src[3]); 442 443 // Unpack 64 bit elements resulting in: 444 // dst[0]: 00 10 20 30 445 // dst[1]: 01 11 21 31 446 // dst[2]: 02 12 22 32 447 // dst[3]: 03 13 23 33 448 dst[0] = _mm256_inserti128_si256(reg0, _mm256_castsi256_si128(reg1), 1); 449 dst[1] = _mm256_inserti128_si256(reg2, _mm256_castsi256_si128(reg3), 1); 450 dst[2] = _mm256_inserti128_si256(reg1, _mm256_extracti128_si256(reg0, 1), 0); 451 dst[3] = _mm256_inserti128_si256(reg3, _mm256_extracti128_si256(reg2, 1), 0); 452 } 453 454 // When we load 32 values of int8_t type and need less than 32 values for 455 // processing, the below mask is used to make the extra values zero. 456 static const int8_t mask_8bit[32] = { 457 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 16 bytes 458 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16 bytes 459 }; 460 461 // When we load 16 values of int16_t type and need less than 16 values for 462 // processing, the below mask is used to make the extra values zero. 463 static const int16_t mask_16bit[32] = { 464 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 16 bytes 465 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16 bytes 466 }; 467 468 static inline uint8_t calc_dgd_buf_avg_avx2(const uint8_t *src, int32_t h_start, 469 int32_t h_end, int32_t v_start, 470 int32_t v_end, int32_t stride) { 471 const uint8_t *src_temp = src + v_start * stride + h_start; 472 const __m256i zero = _mm256_setzero_si256(); 473 const int32_t width = h_end - h_start; 474 const int32_t height = v_end - v_start; 475 const int32_t wd_beyond_mul32 = width & 31; 476 const int32_t wd_mul32 = width - wd_beyond_mul32; 477 __m128i mask_low, mask_high; 478 __m256i ss = zero; 479 480 // When width is not multiple of 32, it still loads 32 and to make the data 481 // which is extra (beyond required) as zero using the below mask. 482 if (wd_beyond_mul32 >= 16) { 483 mask_low = _mm_set1_epi8(-1); 484 mask_high = _mm_loadu_si128((__m128i *)(&mask_8bit[32 - wd_beyond_mul32])); 485 } else { 486 mask_low = _mm_loadu_si128((__m128i *)(&mask_8bit[16 - wd_beyond_mul32])); 487 mask_high = _mm_setzero_si128(); 488 } 489 const __m256i mask = 490 _mm256_inserti128_si256(_mm256_castsi128_si256(mask_low), mask_high, 1); 491 492 int32_t proc_ht = 0; 493 do { 494 // Process width in multiple of 32. 495 int32_t proc_wd = 0; 496 while (proc_wd < wd_mul32) { 497 const __m256i s_0 = _mm256_loadu_si256((__m256i *)(src_temp + proc_wd)); 498 const __m256i sad_0 = _mm256_sad_epu8(s_0, zero); 499 ss = _mm256_add_epi32(ss, sad_0); 500 proc_wd += 32; 501 } 502 503 // Process the remaining width. 504 if (wd_beyond_mul32) { 505 const __m256i s_0 = _mm256_loadu_si256((__m256i *)(src_temp + proc_wd)); 506 const __m256i s_m_0 = _mm256_and_si256(s_0, mask); 507 const __m256i sad_0 = _mm256_sad_epu8(s_m_0, zero); 508 ss = _mm256_add_epi32(ss, sad_0); 509 } 510 src_temp += stride; 511 proc_ht++; 512 } while (proc_ht < height); 513 514 const uint32_t sum = calc_sum_of_register(ss); 515 const uint8_t avg = sum / (width * height); 516 return avg; 517 } 518 519 // Fill (src-avg) or (dgd-avg) buffers. Note that when n = (width % 16) is not 520 // 0, it writes (16 - n) more data than required. 521 static inline void sub_avg_block_avx2(const uint8_t *src, int32_t src_stride, 522 uint8_t avg, int32_t width, 523 int32_t height, int16_t *dst, 524 int32_t dst_stride, 525 int use_downsampled_wiener_stats) { 526 const __m256i avg_reg = _mm256_set1_epi16(avg); 527 528 int32_t proc_ht = 0; 529 do { 530 int ds_factor = 531 use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1; 532 if (use_downsampled_wiener_stats && 533 (height - proc_ht < WIENER_STATS_DOWNSAMPLE_FACTOR)) { 534 ds_factor = height - proc_ht; 535 } 536 537 int32_t proc_wd = 0; 538 while (proc_wd < width) { 539 const __m128i s = _mm_loadu_si128((__m128i *)(src + proc_wd)); 540 const __m256i ss = _mm256_cvtepu8_epi16(s); 541 const __m256i d = _mm256_sub_epi16(ss, avg_reg); 542 _mm256_storeu_si256((__m256i *)(dst + proc_wd), d); 543 proc_wd += 16; 544 } 545 546 src += ds_factor * src_stride; 547 dst += ds_factor * dst_stride; 548 proc_ht += ds_factor; 549 } while (proc_ht < height); 550 } 551 552 // Fills lower-triangular elements of H buffer from upper triangular elements of 553 // the same 554 static inline void fill_lower_triag_elements_avx2(const int32_t wiener_win2, 555 int64_t *const H) { 556 for (int32_t i = 0; i < wiener_win2 - 1; i += 4) { 557 __m256i in[4], out[4]; 558 559 in[0] = _mm256_loadu_si256((__m256i *)(H + (i + 0) * wiener_win2 + i + 1)); 560 in[1] = _mm256_loadu_si256((__m256i *)(H + (i + 1) * wiener_win2 + i + 1)); 561 in[2] = _mm256_loadu_si256((__m256i *)(H + (i + 2) * wiener_win2 + i + 1)); 562 in[3] = _mm256_loadu_si256((__m256i *)(H + (i + 3) * wiener_win2 + i + 1)); 563 564 transpose_64bit_4x4_avx2(in, out); 565 566 _mm_storel_epi64((__m128i *)(H + (i + 1) * wiener_win2 + i), 567 _mm256_castsi256_si128(out[0])); 568 _mm_storeu_si128((__m128i *)(H + (i + 2) * wiener_win2 + i), 569 _mm256_castsi256_si128(out[1])); 570 _mm256_storeu_si256((__m256i *)(H + (i + 3) * wiener_win2 + i), out[2]); 571 _mm256_storeu_si256((__m256i *)(H + (i + 4) * wiener_win2 + i), out[3]); 572 573 for (int32_t j = i + 5; j < wiener_win2; j += 4) { 574 in[0] = _mm256_loadu_si256((__m256i *)(H + (i + 0) * wiener_win2 + j)); 575 in[1] = _mm256_loadu_si256((__m256i *)(H + (i + 1) * wiener_win2 + j)); 576 in[2] = _mm256_loadu_si256((__m256i *)(H + (i + 2) * wiener_win2 + j)); 577 in[3] = _mm256_loadu_si256((__m256i *)(H + (i + 3) * wiener_win2 + j)); 578 579 transpose_64bit_4x4_avx2(in, out); 580 581 _mm256_storeu_si256((__m256i *)(H + (j + 0) * wiener_win2 + i), out[0]); 582 _mm256_storeu_si256((__m256i *)(H + (j + 1) * wiener_win2 + i), out[1]); 583 _mm256_storeu_si256((__m256i *)(H + (j + 2) * wiener_win2 + i), out[2]); 584 _mm256_storeu_si256((__m256i *)(H + (j + 3) * wiener_win2 + i), out[3]); 585 } 586 } 587 } 588 589 // Fill H buffer based on loop_count. 590 #define INIT_H_VALUES(d, loop_count) \ 591 for (int g = 0; g < (loop_count); g++) { \ 592 const __m256i dgd0 = \ 593 _mm256_loadu_si256((__m256i *)((d) + (g * d_stride))); \ 594 madd_and_accum_avx2(dgd_mul_df, dgd0, &sum_h[g]); \ 595 } 596 597 // Fill M & H buffer. 598 #define INIT_MH_VALUES(d) \ 599 for (int g = 0; g < wiener_win; g++) { \ 600 const __m256i dgds_0 = \ 601 _mm256_loadu_si256((__m256i *)((d) + (g * d_stride))); \ 602 madd_and_accum_avx2(src_mul_df, dgds_0, &sum_m[g]); \ 603 madd_and_accum_avx2(dgd_mul_df, dgds_0, &sum_h[g]); \ 604 } 605 606 // Update the dgd pointers appropriately. 607 #define INITIALIZATION(wiener_window_sz) \ 608 j = i / (wiener_window_sz); \ 609 const int16_t *d_window = d + j; \ 610 const int16_t *d_current_row = \ 611 d + j + ((i % (wiener_window_sz)) * d_stride); \ 612 int proc_ht = v_start; \ 613 downsample_factor = \ 614 use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1; \ 615 __m256i sum_h[wiener_window_sz]; \ 616 memset(sum_h, 0, sizeof(sum_h)); 617 618 // Update the downsample factor appropriately. 619 #define UPDATE_DOWNSAMPLE_FACTOR \ 620 int proc_wd = 0; \ 621 if (use_downsampled_wiener_stats && \ 622 ((v_end - proc_ht) < WIENER_STATS_DOWNSAMPLE_FACTOR)) { \ 623 downsample_factor = v_end - proc_ht; \ 624 } \ 625 const __m256i df_reg = _mm256_set1_epi16(downsample_factor); 626 627 #define CALCULATE_REMAINING_H_WIN5 \ 628 while (j < wiener_win) { \ 629 d_window = d; \ 630 d_current_row = d + (i / wiener_win) + ((i % wiener_win) * d_stride); \ 631 const __m256i zero = _mm256_setzero_si256(); \ 632 sum_h[0] = zero; \ 633 sum_h[1] = zero; \ 634 sum_h[2] = zero; \ 635 sum_h[3] = zero; \ 636 sum_h[4] = zero; \ 637 \ 638 proc_ht = v_start; \ 639 downsample_factor = \ 640 use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1; \ 641 do { \ 642 UPDATE_DOWNSAMPLE_FACTOR; \ 643 \ 644 /* Process the amount of width multiple of 16.*/ \ 645 while (proc_wd < wd_mul16) { \ 646 const __m256i dgd = \ 647 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); \ 648 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); \ 649 INIT_H_VALUES(d_window + j + proc_wd, 5) \ 650 \ 651 proc_wd += 16; \ 652 }; \ 653 \ 654 /* Process the remaining width here. */ \ 655 if (wd_beyond_mul16) { \ 656 const __m256i dgd = \ 657 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); \ 658 const __m256i dgd_mask = _mm256_and_si256(dgd, mask); \ 659 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); \ 660 INIT_H_VALUES(d_window + j + proc_wd, 5) \ 661 } \ 662 proc_ht += downsample_factor; \ 663 d_window += downsample_factor * d_stride; \ 664 d_current_row += downsample_factor * d_stride; \ 665 } while (proc_ht < v_end); \ 666 const __m256i s_h0 = \ 667 hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]); \ 668 _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + (wiener_win * j)), \ 669 s_h0); \ 670 const __m256i s_m_h = convert_and_add_avx2(sum_h[4]); \ 671 const __m128i s_m_h0 = add_64bit_lvl_avx2(s_m_h, s_m_h); \ 672 _mm_storel_epi64( \ 673 (__m128i *)(H + (i * wiener_win2) + (wiener_win * j) + 4), s_m_h0); \ 674 j++; \ 675 } 676 677 #define CALCULATE_REMAINING_H_WIN7 \ 678 while (j < wiener_win) { \ 679 d_window = d; \ 680 d_current_row = d + (i / wiener_win) + ((i % wiener_win) * d_stride); \ 681 const __m256i zero = _mm256_setzero_si256(); \ 682 sum_h[0] = zero; \ 683 sum_h[1] = zero; \ 684 sum_h[2] = zero; \ 685 sum_h[3] = zero; \ 686 sum_h[4] = zero; \ 687 sum_h[5] = zero; \ 688 sum_h[6] = zero; \ 689 \ 690 proc_ht = v_start; \ 691 downsample_factor = \ 692 use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1; \ 693 do { \ 694 UPDATE_DOWNSAMPLE_FACTOR; \ 695 \ 696 /* Process the amount of width multiple of 16.*/ \ 697 while (proc_wd < wd_mul16) { \ 698 const __m256i dgd = \ 699 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); \ 700 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); \ 701 INIT_H_VALUES(d_window + j + proc_wd, 7) \ 702 \ 703 proc_wd += 16; \ 704 }; \ 705 \ 706 /* Process the remaining width here. */ \ 707 if (wd_beyond_mul16) { \ 708 const __m256i dgd = \ 709 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); \ 710 const __m256i dgd_mask = _mm256_and_si256(dgd, mask); \ 711 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); \ 712 INIT_H_VALUES(d_window + j + proc_wd, 7) \ 713 } \ 714 proc_ht += downsample_factor; \ 715 d_window += downsample_factor * d_stride; \ 716 d_current_row += downsample_factor * d_stride; \ 717 } while (proc_ht < v_end); \ 718 const __m256i s_h1 = \ 719 hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]); \ 720 _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + (wiener_win * j)), \ 721 s_h1); \ 722 const __m256i s_h2 = \ 723 hadd_four_32_to_64_avx2(sum_h[4], sum_h[5], &sum_h[6], &sum_h[6]); \ 724 _mm256_storeu_si256( \ 725 (__m256i *)(H + (i * wiener_win2) + (wiener_win * j) + 4), s_h2); \ 726 j++; \ 727 } 728 729 // The buffers H(auto-covariance) and M(cross-correlation) are used to estimate 730 // the filter tap values required for wiener filtering. Here, the buffer H is of 731 // size ((wiener_window_size^2)*(wiener_window_size^2)) and M is of size 732 // (wiener_window_size*wiener_window_size). H is a symmetric matrix where the 733 // value above the diagonal (upper triangle) are equal to the values below the 734 // diagonal (lower triangle). The calculation of elements/stats of H(upper 735 // triangle) and M is done in steps as described below where each step fills 736 // specific values of H and M. 737 // Once the upper triangular elements of H matrix are derived, the same will be 738 // copied to lower triangular using the function 739 // fill_lower_triag_elements_avx2(). 740 // Example: Wiener window size = 741 // WIENER_WIN_CHROMA (5) M buffer = [M0 M1 M2 ---- M23 M24] H buffer = Hxy 742 // (x-row, y-column) [H00 H01 H02 ---- H023 H024] [H10 H11 H12 ---- H123 H124] 743 // [H30 H31 H32 ---- H323 H324] 744 // [H40 H41 H42 ---- H423 H424] 745 // [H50 H51 H52 ---- H523 H524] 746 // [H60 H61 H62 ---- H623 H624] 747 // || 748 // || 749 // [H230 H231 H232 ---- H2323 H2324] 750 // [H240 H241 H242 ---- H2423 H2424] 751 // In Step 1, whole M buffers (i.e., M0 to M24) and the first row of H (i.e., 752 // H00 to H024) is filled. The remaining rows of H buffer are filled through 753 // steps 2 to 6. 754 static void compute_stats_win5_avx2(const int16_t *const d, int32_t d_stride, 755 const int16_t *const s, int32_t s_stride, 756 int32_t width, int v_start, int v_end, 757 int64_t *const M, int64_t *const H, 758 int use_downsampled_wiener_stats) { 759 const int32_t wiener_win = WIENER_WIN_CHROMA; 760 const int32_t wiener_win2 = wiener_win * wiener_win; 761 // Amount of width which is beyond multiple of 16. This case is handled 762 // appropriately to process only the required width towards the end. 763 const int32_t wd_mul16 = width & ~15; 764 const int32_t wd_beyond_mul16 = width - wd_mul16; 765 const __m256i mask = 766 _mm256_loadu_si256((__m256i *)(&mask_16bit[16 - wd_beyond_mul16])); 767 int downsample_factor; 768 769 // Step 1: Full M (i.e., M0 to M24) and first row H (i.e., H00 to H024) 770 // values are filled here. Here, the loop over 'j' is executed for values 0 771 // to 4 (wiener_win-1). When the loop executed for a specific 'j', 5 values of 772 // M and H are filled as shown below. 773 // j=0: M0-M4 and H00-H04, j=1: M5-M9 and H05-H09 are filled etc,. 774 int j = 0; 775 do { 776 const int16_t *s_t = s; 777 const int16_t *d_t = d; 778 __m256i sum_m[WIENER_WIN_CHROMA] = { _mm256_setzero_si256() }; 779 __m256i sum_h[WIENER_WIN_CHROMA] = { _mm256_setzero_si256() }; 780 downsample_factor = 781 use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1; 782 int proc_ht = v_start; 783 do { 784 UPDATE_DOWNSAMPLE_FACTOR 785 786 // Process the amount of width multiple of 16. 787 while (proc_wd < wd_mul16) { 788 const __m256i src = _mm256_loadu_si256((__m256i *)(s_t + proc_wd)); 789 const __m256i dgd = _mm256_loadu_si256((__m256i *)(d_t + proc_wd)); 790 const __m256i src_mul_df = _mm256_mullo_epi16(src, df_reg); 791 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); 792 INIT_MH_VALUES(d_t + j + proc_wd) 793 794 proc_wd += 16; 795 } 796 797 // Process the remaining width here. 798 if (wd_beyond_mul16) { 799 const __m256i src = _mm256_loadu_si256((__m256i *)(s_t + proc_wd)); 800 const __m256i dgd = _mm256_loadu_si256((__m256i *)(d_t + proc_wd)); 801 const __m256i src_mask = _mm256_and_si256(src, mask); 802 const __m256i dgd_mask = _mm256_and_si256(dgd, mask); 803 const __m256i src_mul_df = _mm256_mullo_epi16(src_mask, df_reg); 804 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); 805 INIT_MH_VALUES(d_t + j + proc_wd) 806 } 807 proc_ht += downsample_factor; 808 s_t += downsample_factor * s_stride; 809 d_t += downsample_factor * d_stride; 810 } while (proc_ht < v_end); 811 812 const __m256i s_m = 813 hadd_four_32_to_64_avx2(sum_m[0], sum_m[1], &sum_m[2], &sum_m[3]); 814 const __m128i s_m_h = convert_32_to_64_add_avx2(sum_m[4], sum_h[4]); 815 _mm256_storeu_si256((__m256i *)(M + wiener_win * j), s_m); 816 _mm_storel_epi64((__m128i *)&M[wiener_win * j + 4], s_m_h); 817 818 const __m256i s_h = 819 hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]); 820 _mm256_storeu_si256((__m256i *)(H + wiener_win * j), s_h); 821 _mm_storeh_epi64((__m128i *)&H[wiener_win * j + 4], s_m_h); 822 } while (++j < wiener_win); 823 824 // The below steps are designed to fill remaining rows of H buffer. Here, aim 825 // is to fill only upper triangle elements correspond to each row and lower 826 // triangle elements are copied from upper-triangle elements. Also, as 827 // mentioned in Step 1, the core function is designed to fill 5 828 // elements/stats/values of H buffer. 829 // 830 // Step 2: Here, the rows 1, 6, 11, 16 and 21 are filled. As we need to fill 831 // only upper-triangle elements, H10 from row1, H60-H64 and H65 from row6,etc, 832 // are need not be filled. As the core function process 5 values, in first 833 // iteration of 'j' only 4 values to be filled i.e., H11-H14 from row1,H66-H69 834 // from row6, etc. 835 for (int i = 1; i < wiener_win2; i += wiener_win) { 836 // Update the dgd pointers appropriately and also derive the 'j'th iteration 837 // from where the H buffer filling needs to be started. 838 INITIALIZATION(WIENER_WIN_CHROMA) 839 840 do { 841 UPDATE_DOWNSAMPLE_FACTOR 842 843 // Process the amount of width multiple of 16. 844 while (proc_wd < wd_mul16) { 845 const __m256i dgd = 846 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 847 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); 848 INIT_H_VALUES(d_window + proc_wd + (1 * d_stride), 4) 849 850 proc_wd += 16; 851 } 852 853 // Process the remaining width here. 854 if (wd_beyond_mul16) { 855 const __m256i dgd = 856 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 857 const __m256i dgd_mask = _mm256_and_si256(dgd, mask); 858 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); 859 INIT_H_VALUES(d_window + proc_wd + (1 * d_stride), 4) 860 } 861 proc_ht += downsample_factor; 862 d_window += downsample_factor * d_stride; 863 d_current_row += downsample_factor * d_stride; 864 } while (proc_ht < v_end); 865 const __m256i s_h = 866 hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]); 867 _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + i), s_h); 868 869 // process the remaining 'j' iterations. 870 j++; 871 CALCULATE_REMAINING_H_WIN5 872 } 873 874 // Step 3: Here, the rows 2, 7, 12, 17 and 22 are filled. As we need to fill 875 // only upper-triangle elements, H20-H21 from row2, H70-H74 and H75-H76 from 876 // row7, etc, are need not be filled. As the core function process 5 values, 877 // in first iteration of 'j' only 3 values to be filled i.e., H22-H24 from 878 // row2, H77-H79 from row7, etc. 879 for (int i = 2; i < wiener_win2; i += wiener_win) { 880 // Update the dgd pointers appropriately and also derive the 'j'th iteration 881 // from where the H buffer filling needs to be started. 882 INITIALIZATION(WIENER_WIN_CHROMA) 883 884 do { 885 UPDATE_DOWNSAMPLE_FACTOR 886 887 // Process the amount of width multiple of 16. 888 while (proc_wd < wd_mul16) { 889 const __m256i dgd = 890 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 891 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); 892 INIT_H_VALUES(d_window + proc_wd + (2 * d_stride), 3) 893 894 proc_wd += 16; 895 } 896 897 // Process the remaining width here. 898 if (wd_beyond_mul16) { 899 const __m256i dgd = 900 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 901 const __m256i dgd_mask = _mm256_and_si256(dgd, mask); 902 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); 903 INIT_H_VALUES(d_window + proc_wd + (2 * d_stride), 3) 904 } 905 proc_ht += downsample_factor; 906 d_window += downsample_factor * d_stride; 907 d_current_row += downsample_factor * d_stride; 908 } while (proc_ht < v_end); 909 const __m256i s_h = 910 hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]); 911 _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + i), s_h); 912 913 // process the remaining 'j' iterations. 914 j++; 915 CALCULATE_REMAINING_H_WIN5 916 } 917 918 // Step 4: Here, the rows 3, 8, 13, 18 and 23 are filled. As we need to fill 919 // only upper-triangle elements, H30-H32 from row3, H80-H84 and H85-H87 from 920 // row8, etc, are need not be filled. As the core function process 5 values, 921 // in first iteration of 'j' only 2 values to be filled i.e., H33-H34 from 922 // row3, H88-89 from row8, etc. 923 for (int i = 3; i < wiener_win2; i += wiener_win) { 924 // Update the dgd pointers appropriately and also derive the 'j'th iteration 925 // from where the H buffer filling needs to be started. 926 INITIALIZATION(WIENER_WIN_CHROMA) 927 928 do { 929 UPDATE_DOWNSAMPLE_FACTOR 930 931 // Process the amount of width multiple of 16. 932 while (proc_wd < wd_mul16) { 933 const __m256i dgd = 934 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 935 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); 936 INIT_H_VALUES(d_window + proc_wd + (3 * d_stride), 2) 937 938 proc_wd += 16; 939 } 940 941 // Process the remaining width here. 942 if (wd_beyond_mul16) { 943 const __m256i dgd = 944 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 945 const __m256i dgd_mask = _mm256_and_si256(dgd, mask); 946 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); 947 INIT_H_VALUES(d_window + proc_wd + (3 * d_stride), 2) 948 } 949 proc_ht += downsample_factor; 950 d_window += downsample_factor * d_stride; 951 d_current_row += downsample_factor * d_stride; 952 } while (proc_ht < v_end); 953 const __m128i s_h = convert_32_to_64_add_avx2(sum_h[0], sum_h[1]); 954 _mm_storeu_si128((__m128i *)(H + (i * wiener_win2) + i), s_h); 955 956 // process the remaining 'j' iterations. 957 j++; 958 CALCULATE_REMAINING_H_WIN5 959 } 960 961 // Step 5: Here, the rows 4, 9, 14, 19 and 24 are filled. As we need to fill 962 // only upper-triangle elements, H40-H43 from row4, H90-H94 and H95-H98 from 963 // row9, etc, are need not be filled. As the core function process 5 values, 964 // in first iteration of 'j' only 1 values to be filled i.e., H44 from row4, 965 // H99 from row9, etc. 966 for (int i = 4; i < wiener_win2; i += wiener_win) { 967 // Update the dgd pointers appropriately and also derive the 'j'th iteration 968 // from where the H buffer filling needs to be started. 969 INITIALIZATION(WIENER_WIN_CHROMA) 970 do { 971 UPDATE_DOWNSAMPLE_FACTOR 972 973 // Process the amount of width multiple of 16. 974 while (proc_wd < wd_mul16) { 975 const __m256i dgd = 976 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 977 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); 978 INIT_H_VALUES(d_window + proc_wd + (4 * d_stride), 1) 979 980 proc_wd += 16; 981 } 982 983 // Process the remaining width here. 984 if (wd_beyond_mul16) { 985 const __m256i dgd = 986 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 987 const __m256i dgd_mask = _mm256_and_si256(dgd, mask); 988 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); 989 INIT_H_VALUES(d_window + proc_wd + (4 * d_stride), 1) 990 } 991 proc_ht += downsample_factor; 992 d_window += downsample_factor * d_stride; 993 d_current_row += downsample_factor * d_stride; 994 } while (proc_ht < v_end); 995 const __m128i s_h = convert_32_to_64_add_avx2(sum_h[0], sum_h[1]); 996 _mm_storeu_si128((__m128i *)(H + (i * wiener_win2) + i), s_h); 997 998 // process the remaining 'j' iterations. 999 j++; 1000 CALCULATE_REMAINING_H_WIN5 1001 } 1002 1003 // Step 6: Here, the rows 5, 10, 15 and 20 are filled. As we need to fill only 1004 // upper-triangle elements, H50-H54 from row5, H100-H104 and H105-H109 from 1005 // row10,etc, are need not be filled. The first iteration of 'j' fills H55-H59 1006 // from row5 and H1010-H1014 from row10, etc. 1007 for (int i = 5; i < wiener_win2; i += wiener_win) { 1008 // Derive j'th iteration from where the H buffer filling needs to be 1009 // started. 1010 j = i / wiener_win; 1011 int shift = 0; 1012 do { 1013 // Update the dgd pointers appropriately. 1014 int proc_ht = v_start; 1015 const int16_t *d_window = d + (i / wiener_win); 1016 const int16_t *d_current_row = 1017 d + (i / wiener_win) + ((i % wiener_win) * d_stride); 1018 downsample_factor = 1019 use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1; 1020 __m256i sum_h[WIENER_WIN_CHROMA] = { _mm256_setzero_si256() }; 1021 do { 1022 UPDATE_DOWNSAMPLE_FACTOR 1023 1024 // Process the amount of width multiple of 16. 1025 while (proc_wd < wd_mul16) { 1026 const __m256i dgd = 1027 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 1028 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); 1029 INIT_H_VALUES(d_window + shift + proc_wd, 5) 1030 1031 proc_wd += 16; 1032 } 1033 1034 // Process the remaining width here. 1035 if (wd_beyond_mul16) { 1036 const __m256i dgd = 1037 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 1038 const __m256i dgd_mask = _mm256_and_si256(dgd, mask); 1039 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); 1040 INIT_H_VALUES(d_window + shift + proc_wd, 5) 1041 } 1042 proc_ht += downsample_factor; 1043 d_window += downsample_factor * d_stride; 1044 d_current_row += downsample_factor * d_stride; 1045 } while (proc_ht < v_end); 1046 1047 const __m256i s_h = 1048 hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]); 1049 _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + (wiener_win * j)), 1050 s_h); 1051 const __m256i s_m_h = convert_and_add_avx2(sum_h[4]); 1052 const __m128i s_m_h0 = add_64bit_lvl_avx2(s_m_h, s_m_h); 1053 _mm_storel_epi64( 1054 (__m128i *)(H + (i * wiener_win2) + (wiener_win * j) + 4), s_m_h0); 1055 shift++; 1056 } while (++j < wiener_win); 1057 } 1058 1059 fill_lower_triag_elements_avx2(wiener_win2, H); 1060 } 1061 1062 // The buffers H(auto-covariance) and M(cross-correlation) are used to estimate 1063 // the filter tap values required for wiener filtering. Here, the buffer H is of 1064 // size ((wiener_window_size^2)*(wiener_window_size^2)) and M is of size 1065 // (wiener_window_size*wiener_window_size). H is a symmetric matrix where the 1066 // value above the diagonal (upper triangle) are equal to the values below the 1067 // diagonal (lower triangle). The calculation of elements/stats of H(upper 1068 // triangle) and M is done in steps as described below where each step fills 1069 // specific values of H and M. 1070 // Example: 1071 // Wiener window size = WIENER_WIN (7) 1072 // M buffer = [M0 M1 M2 ---- M47 M48] 1073 // H buffer = Hxy (x-row, y-column) 1074 // [H00 H01 H02 ---- H047 H048] 1075 // [H10 H11 H12 ---- H147 H148] 1076 // [H30 H31 H32 ---- H347 H348] 1077 // [H40 H41 H42 ---- H447 H448] 1078 // [H50 H51 H52 ---- H547 H548] 1079 // [H60 H61 H62 ---- H647 H648] 1080 // || 1081 // || 1082 // [H470 H471 H472 ---- H4747 H4748] 1083 // [H480 H481 H482 ---- H4847 H4848] 1084 // In Step 1, whole M buffers (i.e., M0 to M48) and the first row of H (i.e., 1085 // H00 to H048) is filled. The remaining rows of H buffer are filled through 1086 // steps 2 to 8. 1087 static void compute_stats_win7_avx2(const int16_t *const d, int32_t d_stride, 1088 const int16_t *const s, int32_t s_stride, 1089 int32_t width, int v_start, int v_end, 1090 int64_t *const M, int64_t *const H, 1091 int use_downsampled_wiener_stats) { 1092 const int32_t wiener_win = WIENER_WIN; 1093 const int32_t wiener_win2 = wiener_win * wiener_win; 1094 // Amount of width which is beyond multiple of 16. This case is handled 1095 // appropriately to process only the required width towards the end. 1096 const int32_t wd_mul16 = width & ~15; 1097 const int32_t wd_beyond_mul16 = width - wd_mul16; 1098 const __m256i mask = 1099 _mm256_loadu_si256((__m256i *)(&mask_16bit[16 - wd_beyond_mul16])); 1100 int downsample_factor; 1101 1102 // Step 1: Full M (i.e., M0 to M48) and first row H (i.e., H00 to H048) 1103 // values are filled here. Here, the loop over 'j' is executed for values 0 1104 // to 6. When the loop executed for a specific 'j', 7 values of M and H are 1105 // filled as shown below. 1106 // j=0: M0-M6 and H00-H06, j=1: M7-M13 and H07-H013 are filled etc,. 1107 int j = 0; 1108 do { 1109 const int16_t *s_t = s; 1110 const int16_t *d_t = d; 1111 __m256i sum_m[WIENER_WIN] = { _mm256_setzero_si256() }; 1112 __m256i sum_h[WIENER_WIN] = { _mm256_setzero_si256() }; 1113 downsample_factor = 1114 use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1; 1115 int proc_ht = v_start; 1116 do { 1117 UPDATE_DOWNSAMPLE_FACTOR 1118 1119 // Process the amount of width multiple of 16. 1120 while (proc_wd < wd_mul16) { 1121 const __m256i src = _mm256_loadu_si256((__m256i *)(s_t + proc_wd)); 1122 const __m256i dgd = _mm256_loadu_si256((__m256i *)(d_t + proc_wd)); 1123 const __m256i src_mul_df = _mm256_mullo_epi16(src, df_reg); 1124 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); 1125 INIT_MH_VALUES(d_t + j + proc_wd) 1126 1127 proc_wd += 16; 1128 } 1129 1130 if (wd_beyond_mul16) { 1131 const __m256i src = _mm256_loadu_si256((__m256i *)(s_t + proc_wd)); 1132 const __m256i dgd = _mm256_loadu_si256((__m256i *)(d_t + proc_wd)); 1133 const __m256i src_mask = _mm256_and_si256(src, mask); 1134 const __m256i dgd_mask = _mm256_and_si256(dgd, mask); 1135 const __m256i src_mul_df = _mm256_mullo_epi16(src_mask, df_reg); 1136 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); 1137 INIT_MH_VALUES(d_t + j + proc_wd) 1138 } 1139 proc_ht += downsample_factor; 1140 s_t += downsample_factor * s_stride; 1141 d_t += downsample_factor * d_stride; 1142 } while (proc_ht < v_end); 1143 1144 const __m256i s_m0 = 1145 hadd_four_32_to_64_avx2(sum_m[0], sum_m[1], &sum_m[2], &sum_m[3]); 1146 const __m256i s_m1 = 1147 hadd_four_32_to_64_avx2(sum_m[4], sum_m[5], &sum_m[6], &sum_m[6]); 1148 _mm256_storeu_si256((__m256i *)(M + wiener_win * j + 0), s_m0); 1149 _mm_storeu_si128((__m128i *)(M + wiener_win * j + 4), 1150 _mm256_castsi256_si128(s_m1)); 1151 _mm_storel_epi64((__m128i *)&M[wiener_win * j + 6], 1152 _mm256_extracti128_si256(s_m1, 1)); 1153 1154 const __m256i sh_0 = 1155 hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]); 1156 const __m256i sh_1 = 1157 hadd_four_32_to_64_avx2(sum_h[4], sum_h[5], &sum_h[6], &sum_h[6]); 1158 _mm256_storeu_si256((__m256i *)(H + wiener_win * j + 0), sh_0); 1159 _mm_storeu_si128((__m128i *)(H + wiener_win * j + 4), 1160 _mm256_castsi256_si128(sh_1)); 1161 _mm_storel_epi64((__m128i *)&H[wiener_win * j + 6], 1162 _mm256_extracti128_si256(sh_1, 1)); 1163 } while (++j < wiener_win); 1164 1165 // The below steps are designed to fill remaining rows of H buffer. Here, aim 1166 // is to fill only upper triangle elements correspond to each row and lower 1167 // triangle elements are copied from upper-triangle elements. Also, as 1168 // mentioned in Step 1, the core function is designed to fill 7 1169 // elements/stats/values of H buffer. 1170 // 1171 // Step 2: Here, the rows 1, 8, 15, 22, 29, 36 and 43 are filled. As we need 1172 // to fill only upper-triangle elements, H10 from row1, H80-H86 and H87 from 1173 // row8, etc. are need not be filled. As the core function process 7 values, 1174 // in first iteration of 'j' only 6 values to be filled i.e., H11-H16 from 1175 // row1 and H88-H813 from row8, etc. 1176 for (int i = 1; i < wiener_win2; i += wiener_win) { 1177 // Update the dgd pointers appropriately and also derive the 'j'th iteration 1178 // from where the H buffer filling needs to be started. 1179 INITIALIZATION(WIENER_WIN) 1180 1181 do { 1182 UPDATE_DOWNSAMPLE_FACTOR 1183 1184 // Process the amount of width multiple of 16. 1185 while (proc_wd < wd_mul16) { 1186 const __m256i dgd = 1187 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 1188 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); 1189 INIT_H_VALUES(d_window + proc_wd + (1 * d_stride), 6) 1190 1191 proc_wd += 16; 1192 } 1193 1194 // Process the remaining width here. 1195 if (wd_beyond_mul16) { 1196 const __m256i dgd = 1197 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 1198 const __m256i dgd_mask = _mm256_and_si256(dgd, mask); 1199 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); 1200 INIT_H_VALUES(d_window + proc_wd + (1 * d_stride), 6) 1201 } 1202 proc_ht += downsample_factor; 1203 d_window += downsample_factor * d_stride; 1204 d_current_row += downsample_factor * d_stride; 1205 } while (proc_ht < v_end); 1206 const __m256i s_h = 1207 hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]); 1208 _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + i), s_h); 1209 const __m128i s_h0 = convert_32_to_64_add_avx2(sum_h[4], sum_h[5]); 1210 _mm_storeu_si128((__m128i *)(H + (i * wiener_win2) + i + 4), s_h0); 1211 1212 // process the remaining 'j' iterations. 1213 j++; 1214 CALCULATE_REMAINING_H_WIN7 1215 } 1216 1217 // Step 3: Here, the rows 2, 9, 16, 23, 30, 37 and 44 are filled. As we need 1218 // to fill only upper-triangle elements, H20-H21 from row2, H90-H96 and 1219 // H97-H98 from row9, etc. are need not be filled. As the core function 1220 // process 7 values, in first iteration of 'j' only 5 values to be filled 1221 // i.e., H22-H26 from row2 and H99-H913 from row9, etc. 1222 for (int i = 2; i < wiener_win2; i += wiener_win) { 1223 // Update the dgd pointers appropriately and also derive the 'j'th iteration 1224 // from where the H buffer filling needs to be started. 1225 INITIALIZATION(WIENER_WIN) 1226 do { 1227 UPDATE_DOWNSAMPLE_FACTOR 1228 1229 // Process the amount of width multiple of 16. 1230 while (proc_wd < wd_mul16) { 1231 const __m256i dgd = 1232 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 1233 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); 1234 INIT_H_VALUES(d_window + proc_wd + (2 * d_stride), 5) 1235 1236 proc_wd += 16; 1237 } 1238 1239 // Process the remaining width here. 1240 if (wd_beyond_mul16) { 1241 const __m256i dgd = 1242 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 1243 const __m256i dgd_mask = _mm256_and_si256(dgd, mask); 1244 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); 1245 INIT_H_VALUES(d_window + proc_wd + (2 * d_stride), 5) 1246 } 1247 proc_ht += downsample_factor; 1248 d_window += downsample_factor * d_stride; 1249 d_current_row += downsample_factor * d_stride; 1250 } while (proc_ht < v_end); 1251 const __m256i s_h = 1252 hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]); 1253 _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + i), s_h); 1254 const __m256i s_m_h = convert_and_add_avx2(sum_h[4]); 1255 const __m128i s_m_h0 = add_64bit_lvl_avx2(s_m_h, s_m_h); 1256 _mm_storel_epi64((__m128i *)(H + (i * wiener_win2) + i + 4), s_m_h0); 1257 1258 // process the remaining 'j' iterations. 1259 j++; 1260 CALCULATE_REMAINING_H_WIN7 1261 } 1262 1263 // Step 4: Here, the rows 3, 10, 17, 24, 31, 38 and 45 are filled. As we need 1264 // to fill only upper-triangle elements, H30-H32 from row3, H100-H106 and 1265 // H107-H109 from row10, etc. are need not be filled. As the core function 1266 // process 7 values, in first iteration of 'j' only 4 values to be filled 1267 // i.e., H33-H36 from row3 and H1010-H1013 from row10, etc. 1268 for (int i = 3; i < wiener_win2; i += wiener_win) { 1269 // Update the dgd pointers appropriately and also derive the 'j'th iteration 1270 // from where the H buffer filling needs to be started. 1271 INITIALIZATION(WIENER_WIN) 1272 1273 do { 1274 UPDATE_DOWNSAMPLE_FACTOR 1275 1276 // Process the amount of width multiple of 16. 1277 while (proc_wd < wd_mul16) { 1278 const __m256i dgd = 1279 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 1280 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); 1281 INIT_H_VALUES(d_window + proc_wd + (3 * d_stride), 4) 1282 1283 proc_wd += 16; 1284 } 1285 1286 // Process the remaining width here. 1287 if (wd_beyond_mul16) { 1288 const __m256i dgd = 1289 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 1290 const __m256i dgd_mask = _mm256_and_si256(dgd, mask); 1291 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); 1292 INIT_H_VALUES(d_window + proc_wd + (3 * d_stride), 4) 1293 } 1294 proc_ht += downsample_factor; 1295 d_window += downsample_factor * d_stride; 1296 d_current_row += downsample_factor * d_stride; 1297 } while (proc_ht < v_end); 1298 const __m256i s_h = 1299 hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]); 1300 _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + i), s_h); 1301 1302 // process the remaining 'j' iterations. 1303 j++; 1304 CALCULATE_REMAINING_H_WIN7 1305 } 1306 1307 // Step 5: Here, the rows 4, 11, 18, 25, 32, 39 and 46 are filled. As we need 1308 // to fill only upper-triangle elements, H40-H43 from row4, H110-H116 and 1309 // H117-H1110 from row10, etc. are need not be filled. As the core function 1310 // process 7 values, in first iteration of 'j' only 3 values to be filled 1311 // i.e., H44-H46 from row4 and H1111-H1113 from row11, etc. 1312 for (int i = 4; i < wiener_win2; i += wiener_win) { 1313 // Update the dgd pointers appropriately and also derive the 'j'th iteration 1314 // from where the H buffer filling needs to be started. 1315 INITIALIZATION(WIENER_WIN) 1316 1317 do { 1318 UPDATE_DOWNSAMPLE_FACTOR 1319 1320 // Process the amount of width multiple of 16. 1321 while (proc_wd < wd_mul16) { 1322 const __m256i dgd = 1323 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 1324 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); 1325 INIT_H_VALUES(d_window + proc_wd + (4 * d_stride), 3) 1326 1327 proc_wd += 16; 1328 } 1329 1330 // Process the remaining width here. 1331 if (wd_beyond_mul16) { 1332 const __m256i dgd = 1333 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 1334 const __m256i dgd_mask = _mm256_and_si256(dgd, mask); 1335 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); 1336 INIT_H_VALUES(d_window + proc_wd + (4 * d_stride), 3) 1337 } 1338 proc_ht += downsample_factor; 1339 d_window += downsample_factor * d_stride; 1340 d_current_row += downsample_factor * d_stride; 1341 } while (proc_ht < v_end); 1342 const __m256i s_h = 1343 hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]); 1344 _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + i), s_h); 1345 1346 // process the remaining 'j' iterations. 1347 j++; 1348 CALCULATE_REMAINING_H_WIN7 1349 } 1350 1351 // Step 6: Here, the rows 5, 12, 19, 26, 33, 40 and 47 are filled. As we need 1352 // to fill only upper-triangle elements, H50-H54 from row5, H120-H126 and 1353 // H127-H1211 from row12, etc. are need not be filled. As the core function 1354 // process 7 values, in first iteration of 'j' only 2 values to be filled 1355 // i.e., H55-H56 from row5 and H1212-H1213 from row12, etc. 1356 for (int i = 5; i < wiener_win2; i += wiener_win) { 1357 // Update the dgd pointers appropriately and also derive the 'j'th iteration 1358 // from where the H buffer filling needs to be started. 1359 INITIALIZATION(WIENER_WIN) 1360 do { 1361 UPDATE_DOWNSAMPLE_FACTOR 1362 1363 // Process the amount of width multiple of 16. 1364 while (proc_wd < wd_mul16) { 1365 const __m256i dgd = 1366 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 1367 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); 1368 INIT_H_VALUES(d_window + proc_wd + (5 * d_stride), 2) 1369 1370 proc_wd += 16; 1371 } 1372 1373 // Process the remaining width here. 1374 if (wd_beyond_mul16) { 1375 const __m256i dgd = 1376 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 1377 const __m256i dgd_mask = _mm256_and_si256(dgd, mask); 1378 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); 1379 INIT_H_VALUES(d_window + proc_wd + (5 * d_stride), 2) 1380 } 1381 proc_ht += downsample_factor; 1382 d_window += downsample_factor * d_stride; 1383 d_current_row += downsample_factor * d_stride; 1384 } while (proc_ht < v_end); 1385 const __m256i s_h = 1386 hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]); 1387 _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + i), s_h); 1388 1389 // process the remaining 'j' iterations. 1390 j++; 1391 CALCULATE_REMAINING_H_WIN7 1392 } 1393 1394 // Step 7: Here, the rows 6, 13, 20, 27, 34, 41 and 48 are filled. As we need 1395 // to fill only upper-triangle elements, H60-H65 from row6, H130-H136 and 1396 // H137-H1312 from row13, etc. are need not be filled. As the core function 1397 // process 7 values, in first iteration of 'j' only 1 value to be filled 1398 // i.e., H66 from row6 and H1313 from row13, etc. 1399 for (int i = 6; i < wiener_win2; i += wiener_win) { 1400 // Update the dgd pointers appropriately and also derive the 'j'th iteration 1401 // from where the H buffer filling needs to be started. 1402 INITIALIZATION(WIENER_WIN) 1403 do { 1404 UPDATE_DOWNSAMPLE_FACTOR 1405 1406 // Process the amount of width multiple of 16. 1407 while (proc_wd < wd_mul16) { 1408 const __m256i dgd = 1409 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 1410 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); 1411 INIT_H_VALUES(d_window + proc_wd + (6 * d_stride), 1) 1412 1413 proc_wd += 16; 1414 } 1415 1416 // Process the remaining width here. 1417 if (wd_beyond_mul16) { 1418 const __m256i dgd = 1419 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 1420 const __m256i dgd_mask = _mm256_and_si256(dgd, mask); 1421 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); 1422 INIT_H_VALUES(d_window + proc_wd + (6 * d_stride), 1) 1423 } 1424 proc_ht += downsample_factor; 1425 d_window += downsample_factor * d_stride; 1426 d_current_row += downsample_factor * d_stride; 1427 } while (proc_ht < v_end); 1428 const __m256i s_h = 1429 hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]); 1430 xx_storel_64(&H[(i * wiener_win2) + i], _mm256_castsi256_si128(s_h)); 1431 1432 // process the remaining 'j' iterations. 1433 j++; 1434 CALCULATE_REMAINING_H_WIN7 1435 } 1436 1437 // Step 8: Here, the rows 7, 14, 21, 28, 35 and 42 are filled. As we need 1438 // to fill only upper-triangle elements, H70-H75 from row7, H140-H146 and 1439 // H147-H1413 from row14, etc. are need not be filled. The first iteration of 1440 // 'j' fills H77-H713 from row7 and H1414-H1420 from row14, etc. 1441 for (int i = 7; i < wiener_win2; i += wiener_win) { 1442 // Derive j'th iteration from where the H buffer filling needs to be 1443 // started. 1444 j = i / wiener_win; 1445 int shift = 0; 1446 do { 1447 // Update the dgd pointers appropriately. 1448 int proc_ht = v_start; 1449 const int16_t *d_window = d + (i / WIENER_WIN); 1450 const int16_t *d_current_row = 1451 d + (i / WIENER_WIN) + ((i % WIENER_WIN) * d_stride); 1452 downsample_factor = 1453 use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1; 1454 __m256i sum_h[WIENER_WIN] = { _mm256_setzero_si256() }; 1455 do { 1456 UPDATE_DOWNSAMPLE_FACTOR 1457 1458 // Process the amount of width multiple of 16. 1459 while (proc_wd < wd_mul16) { 1460 const __m256i dgd = 1461 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 1462 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); 1463 INIT_H_VALUES(d_window + shift + proc_wd, 7) 1464 1465 proc_wd += 16; 1466 } 1467 1468 // Process the remaining width here. 1469 if (wd_beyond_mul16) { 1470 const __m256i dgd = 1471 _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); 1472 const __m256i dgd_mask = _mm256_and_si256(dgd, mask); 1473 const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); 1474 INIT_H_VALUES(d_window + shift + proc_wd, 7) 1475 } 1476 proc_ht += downsample_factor; 1477 d_window += downsample_factor * d_stride; 1478 d_current_row += downsample_factor * d_stride; 1479 } while (proc_ht < v_end); 1480 1481 const __m256i sh_0 = 1482 hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]); 1483 const __m256i sh_1 = 1484 hadd_four_32_to_64_avx2(sum_h[4], sum_h[5], &sum_h[6], &sum_h[6]); 1485 _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + (wiener_win * j)), 1486 sh_0); 1487 _mm_storeu_si128( 1488 (__m128i *)(H + (i * wiener_win2) + (wiener_win * j) + 4), 1489 _mm256_castsi256_si128(sh_1)); 1490 _mm_storel_epi64((__m128i *)&H[(i * wiener_win2) + (wiener_win * j) + 6], 1491 _mm256_extracti128_si256(sh_1, 1)); 1492 shift++; 1493 } while (++j < wiener_win); 1494 } 1495 1496 fill_lower_triag_elements_avx2(wiener_win2, H); 1497 } 1498 1499 void av1_compute_stats_avx2(int wiener_win, const uint8_t *dgd, 1500 const uint8_t *src, int16_t *dgd_avg, 1501 int16_t *src_avg, int h_start, int h_end, 1502 int v_start, int v_end, int dgd_stride, 1503 int src_stride, int64_t *M, int64_t *H, 1504 int use_downsampled_wiener_stats) { 1505 if (wiener_win != WIENER_WIN && wiener_win != WIENER_WIN_CHROMA) { 1506 // Currently, libaom supports Wiener filter processing with window sizes as 1507 // WIENER_WIN_CHROMA(5) and WIENER_WIN(7). For any other window size, SIMD 1508 // support is not facilitated. Hence, invoke C function for the same. 1509 av1_compute_stats_c(wiener_win, dgd, src, dgd_avg, src_avg, h_start, h_end, 1510 v_start, v_end, dgd_stride, src_stride, M, H, 1511 use_downsampled_wiener_stats); 1512 return; 1513 } 1514 1515 const int32_t wiener_halfwin = wiener_win >> 1; 1516 const uint8_t avg = 1517 calc_dgd_buf_avg_avx2(dgd, h_start, h_end, v_start, v_end, dgd_stride); 1518 const int32_t width = h_end - h_start; 1519 const int32_t height = v_end - v_start; 1520 const int32_t d_stride = (width + 2 * wiener_halfwin + 15) & ~15; 1521 const int32_t s_stride = (width + 15) & ~15; 1522 1523 // Based on the sf 'use_downsampled_wiener_stats', process either once for 1524 // UPDATE_DOWNSAMPLE_FACTOR or for each row. 1525 sub_avg_block_avx2(src + v_start * src_stride + h_start, src_stride, avg, 1526 width, height, src_avg, s_stride, 1527 use_downsampled_wiener_stats); 1528 1529 // Compute (dgd-avg) buffer here which is used to fill H buffer. 1530 sub_avg_block_avx2( 1531 dgd + (v_start - wiener_halfwin) * dgd_stride + h_start - wiener_halfwin, 1532 dgd_stride, avg, width + 2 * wiener_halfwin, height + 2 * wiener_halfwin, 1533 dgd_avg, d_stride, 0); 1534 if (wiener_win == WIENER_WIN) { 1535 compute_stats_win7_avx2(dgd_avg, d_stride, src_avg, s_stride, width, 1536 v_start, v_end, M, H, use_downsampled_wiener_stats); 1537 } else if (wiener_win == WIENER_WIN_CHROMA) { 1538 compute_stats_win5_avx2(dgd_avg, d_stride, src_avg, s_stride, width, 1539 v_start, v_end, M, H, use_downsampled_wiener_stats); 1540 } 1541 } 1542 1543 static inline __m256i pair_set_epi16(int a, int b) { 1544 return _mm256_set1_epi32( 1545 (int32_t)(((uint16_t)(a)) | (((uint32_t)(uint16_t)(b)) << 16))); 1546 } 1547 1548 int64_t av1_lowbd_pixel_proj_error_avx2( 1549 const uint8_t *src8, int width, int height, int src_stride, 1550 const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride, 1551 int32_t *flt1, int flt1_stride, int xq[2], const sgr_params_type *params) { 1552 int i, j, k; 1553 const int32_t shift = SGRPROJ_RST_BITS + SGRPROJ_PRJ_BITS; 1554 const __m256i rounding = _mm256_set1_epi32(1 << (shift - 1)); 1555 __m256i sum64 = _mm256_setzero_si256(); 1556 const uint8_t *src = src8; 1557 const uint8_t *dat = dat8; 1558 int64_t err = 0; 1559 if (params->r[0] > 0 && params->r[1] > 0) { 1560 __m256i xq_coeff = pair_set_epi16(xq[0], xq[1]); 1561 for (i = 0; i < height; ++i) { 1562 __m256i sum32 = _mm256_setzero_si256(); 1563 for (j = 0; j <= width - 16; j += 16) { 1564 const __m256i d0 = _mm256_cvtepu8_epi16(xx_loadu_128(dat + j)); 1565 const __m256i s0 = _mm256_cvtepu8_epi16(xx_loadu_128(src + j)); 1566 const __m256i flt0_16b = _mm256_permute4x64_epi64( 1567 _mm256_packs_epi32(yy_loadu_256(flt0 + j), 1568 yy_loadu_256(flt0 + j + 8)), 1569 0xd8); 1570 const __m256i flt1_16b = _mm256_permute4x64_epi64( 1571 _mm256_packs_epi32(yy_loadu_256(flt1 + j), 1572 yy_loadu_256(flt1 + j + 8)), 1573 0xd8); 1574 const __m256i u0 = _mm256_slli_epi16(d0, SGRPROJ_RST_BITS); 1575 const __m256i flt0_0_sub_u = _mm256_sub_epi16(flt0_16b, u0); 1576 const __m256i flt1_0_sub_u = _mm256_sub_epi16(flt1_16b, u0); 1577 const __m256i v0 = _mm256_madd_epi16( 1578 xq_coeff, _mm256_unpacklo_epi16(flt0_0_sub_u, flt1_0_sub_u)); 1579 const __m256i v1 = _mm256_madd_epi16( 1580 xq_coeff, _mm256_unpackhi_epi16(flt0_0_sub_u, flt1_0_sub_u)); 1581 const __m256i vr0 = 1582 _mm256_srai_epi32(_mm256_add_epi32(v0, rounding), shift); 1583 const __m256i vr1 = 1584 _mm256_srai_epi32(_mm256_add_epi32(v1, rounding), shift); 1585 const __m256i e0 = _mm256_sub_epi16( 1586 _mm256_add_epi16(_mm256_packs_epi32(vr0, vr1), d0), s0); 1587 const __m256i err0 = _mm256_madd_epi16(e0, e0); 1588 sum32 = _mm256_add_epi32(sum32, err0); 1589 } 1590 for (k = j; k < width; ++k) { 1591 const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS); 1592 int32_t v = xq[0] * (flt0[k] - u) + xq[1] * (flt1[k] - u); 1593 const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k]; 1594 err += ((int64_t)e * e); 1595 } 1596 dat += dat_stride; 1597 src += src_stride; 1598 flt0 += flt0_stride; 1599 flt1 += flt1_stride; 1600 const __m256i sum64_0 = 1601 _mm256_cvtepi32_epi64(_mm256_castsi256_si128(sum32)); 1602 const __m256i sum64_1 = 1603 _mm256_cvtepi32_epi64(_mm256_extracti128_si256(sum32, 1)); 1604 sum64 = _mm256_add_epi64(sum64, sum64_0); 1605 sum64 = _mm256_add_epi64(sum64, sum64_1); 1606 } 1607 } else if (params->r[0] > 0 || params->r[1] > 0) { 1608 const int xq_active = (params->r[0] > 0) ? xq[0] : xq[1]; 1609 const __m256i xq_coeff = 1610 pair_set_epi16(xq_active, -xq_active * (1 << SGRPROJ_RST_BITS)); 1611 const int32_t *flt = (params->r[0] > 0) ? flt0 : flt1; 1612 const int flt_stride = (params->r[0] > 0) ? flt0_stride : flt1_stride; 1613 for (i = 0; i < height; ++i) { 1614 __m256i sum32 = _mm256_setzero_si256(); 1615 for (j = 0; j <= width - 16; j += 16) { 1616 const __m256i d0 = _mm256_cvtepu8_epi16(xx_loadu_128(dat + j)); 1617 const __m256i s0 = _mm256_cvtepu8_epi16(xx_loadu_128(src + j)); 1618 const __m256i flt_16b = _mm256_permute4x64_epi64( 1619 _mm256_packs_epi32(yy_loadu_256(flt + j), 1620 yy_loadu_256(flt + j + 8)), 1621 0xd8); 1622 const __m256i v0 = 1623 _mm256_madd_epi16(xq_coeff, _mm256_unpacklo_epi16(flt_16b, d0)); 1624 const __m256i v1 = 1625 _mm256_madd_epi16(xq_coeff, _mm256_unpackhi_epi16(flt_16b, d0)); 1626 const __m256i vr0 = 1627 _mm256_srai_epi32(_mm256_add_epi32(v0, rounding), shift); 1628 const __m256i vr1 = 1629 _mm256_srai_epi32(_mm256_add_epi32(v1, rounding), shift); 1630 const __m256i e0 = _mm256_sub_epi16( 1631 _mm256_add_epi16(_mm256_packs_epi32(vr0, vr1), d0), s0); 1632 const __m256i err0 = _mm256_madd_epi16(e0, e0); 1633 sum32 = _mm256_add_epi32(sum32, err0); 1634 } 1635 for (k = j; k < width; ++k) { 1636 const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS); 1637 int32_t v = xq_active * (flt[k] - u); 1638 const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k]; 1639 err += ((int64_t)e * e); 1640 } 1641 dat += dat_stride; 1642 src += src_stride; 1643 flt += flt_stride; 1644 const __m256i sum64_0 = 1645 _mm256_cvtepi32_epi64(_mm256_castsi256_si128(sum32)); 1646 const __m256i sum64_1 = 1647 _mm256_cvtepi32_epi64(_mm256_extracti128_si256(sum32, 1)); 1648 sum64 = _mm256_add_epi64(sum64, sum64_0); 1649 sum64 = _mm256_add_epi64(sum64, sum64_1); 1650 } 1651 } else { 1652 __m256i sum32 = _mm256_setzero_si256(); 1653 for (i = 0; i < height; ++i) { 1654 for (j = 0; j <= width - 16; j += 16) { 1655 const __m256i d0 = _mm256_cvtepu8_epi16(xx_loadu_128(dat + j)); 1656 const __m256i s0 = _mm256_cvtepu8_epi16(xx_loadu_128(src + j)); 1657 const __m256i diff0 = _mm256_sub_epi16(d0, s0); 1658 const __m256i err0 = _mm256_madd_epi16(diff0, diff0); 1659 sum32 = _mm256_add_epi32(sum32, err0); 1660 } 1661 for (k = j; k < width; ++k) { 1662 const int32_t e = (int32_t)(dat[k]) - src[k]; 1663 err += ((int64_t)e * e); 1664 } 1665 dat += dat_stride; 1666 src += src_stride; 1667 } 1668 const __m256i sum64_0 = 1669 _mm256_cvtepi32_epi64(_mm256_castsi256_si128(sum32)); 1670 const __m256i sum64_1 = 1671 _mm256_cvtepi32_epi64(_mm256_extracti128_si256(sum32, 1)); 1672 sum64 = _mm256_add_epi64(sum64_0, sum64_1); 1673 } 1674 int64_t sum[4]; 1675 yy_storeu_256(sum, sum64); 1676 err += sum[0] + sum[1] + sum[2] + sum[3]; 1677 return err; 1678 } 1679 1680 // When params->r[0] > 0 and params->r[1] > 0. In this case all elements of 1681 // C and H need to be computed. 1682 static inline void calc_proj_params_r0_r1_avx2( 1683 const uint8_t *src8, int width, int height, int src_stride, 1684 const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride, 1685 int32_t *flt1, int flt1_stride, int64_t H[2][2], int64_t C[2]) { 1686 const int size = width * height; 1687 const uint8_t *src = src8; 1688 const uint8_t *dat = dat8; 1689 __m256i h00, h01, h11, c0, c1; 1690 const __m256i zero = _mm256_setzero_si256(); 1691 h01 = h11 = c0 = c1 = h00 = zero; 1692 1693 for (int i = 0; i < height; ++i) { 1694 for (int j = 0; j < width; j += 8) { 1695 const __m256i u_load = _mm256_cvtepu8_epi32( 1696 _mm_loadl_epi64((__m128i *)(dat + i * dat_stride + j))); 1697 const __m256i s_load = _mm256_cvtepu8_epi32( 1698 _mm_loadl_epi64((__m128i *)(src + i * src_stride + j))); 1699 __m256i f1 = _mm256_loadu_si256((__m256i *)(flt0 + i * flt0_stride + j)); 1700 __m256i f2 = _mm256_loadu_si256((__m256i *)(flt1 + i * flt1_stride + j)); 1701 __m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS); 1702 __m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS); 1703 s = _mm256_sub_epi32(s, d); 1704 f1 = _mm256_sub_epi32(f1, d); 1705 f2 = _mm256_sub_epi32(f2, d); 1706 1707 const __m256i h00_even = _mm256_mul_epi32(f1, f1); 1708 const __m256i h00_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32), 1709 _mm256_srli_epi64(f1, 32)); 1710 h00 = _mm256_add_epi64(h00, h00_even); 1711 h00 = _mm256_add_epi64(h00, h00_odd); 1712 1713 const __m256i h01_even = _mm256_mul_epi32(f1, f2); 1714 const __m256i h01_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32), 1715 _mm256_srli_epi64(f2, 32)); 1716 h01 = _mm256_add_epi64(h01, h01_even); 1717 h01 = _mm256_add_epi64(h01, h01_odd); 1718 1719 const __m256i h11_even = _mm256_mul_epi32(f2, f2); 1720 const __m256i h11_odd = _mm256_mul_epi32(_mm256_srli_epi64(f2, 32), 1721 _mm256_srli_epi64(f2, 32)); 1722 h11 = _mm256_add_epi64(h11, h11_even); 1723 h11 = _mm256_add_epi64(h11, h11_odd); 1724 1725 const __m256i c0_even = _mm256_mul_epi32(f1, s); 1726 const __m256i c0_odd = 1727 _mm256_mul_epi32(_mm256_srli_epi64(f1, 32), _mm256_srli_epi64(s, 32)); 1728 c0 = _mm256_add_epi64(c0, c0_even); 1729 c0 = _mm256_add_epi64(c0, c0_odd); 1730 1731 const __m256i c1_even = _mm256_mul_epi32(f2, s); 1732 const __m256i c1_odd = 1733 _mm256_mul_epi32(_mm256_srli_epi64(f2, 32), _mm256_srli_epi64(s, 32)); 1734 c1 = _mm256_add_epi64(c1, c1_even); 1735 c1 = _mm256_add_epi64(c1, c1_odd); 1736 } 1737 } 1738 1739 __m256i c_low = _mm256_unpacklo_epi64(c0, c1); 1740 const __m256i c_high = _mm256_unpackhi_epi64(c0, c1); 1741 c_low = _mm256_add_epi64(c_low, c_high); 1742 const __m128i c_128bit = _mm_add_epi64(_mm256_extracti128_si256(c_low, 1), 1743 _mm256_castsi256_si128(c_low)); 1744 1745 __m256i h0x_low = _mm256_unpacklo_epi64(h00, h01); 1746 const __m256i h0x_high = _mm256_unpackhi_epi64(h00, h01); 1747 h0x_low = _mm256_add_epi64(h0x_low, h0x_high); 1748 const __m128i h0x_128bit = _mm_add_epi64(_mm256_extracti128_si256(h0x_low, 1), 1749 _mm256_castsi256_si128(h0x_low)); 1750 1751 // Using the symmetric properties of H, calculations of H[1][0] are not 1752 // needed. 1753 __m256i h1x_low = _mm256_unpacklo_epi64(zero, h11); 1754 const __m256i h1x_high = _mm256_unpackhi_epi64(zero, h11); 1755 h1x_low = _mm256_add_epi64(h1x_low, h1x_high); 1756 const __m128i h1x_128bit = _mm_add_epi64(_mm256_extracti128_si256(h1x_low, 1), 1757 _mm256_castsi256_si128(h1x_low)); 1758 1759 xx_storeu_128(C, c_128bit); 1760 xx_storeu_128(H[0], h0x_128bit); 1761 xx_storeu_128(H[1], h1x_128bit); 1762 1763 H[0][0] /= size; 1764 H[0][1] /= size; 1765 H[1][1] /= size; 1766 1767 // Since H is a symmetric matrix 1768 H[1][0] = H[0][1]; 1769 C[0] /= size; 1770 C[1] /= size; 1771 } 1772 1773 // When only params->r[0] > 0. In this case only H[0][0] and C[0] are 1774 // non-zero and need to be computed. 1775 static inline void calc_proj_params_r0_avx2(const uint8_t *src8, int width, 1776 int height, int src_stride, 1777 const uint8_t *dat8, int dat_stride, 1778 int32_t *flt0, int flt0_stride, 1779 int64_t H[2][2], int64_t C[2]) { 1780 const int size = width * height; 1781 const uint8_t *src = src8; 1782 const uint8_t *dat = dat8; 1783 __m256i h00, c0; 1784 const __m256i zero = _mm256_setzero_si256(); 1785 c0 = h00 = zero; 1786 1787 for (int i = 0; i < height; ++i) { 1788 for (int j = 0; j < width; j += 8) { 1789 const __m256i u_load = _mm256_cvtepu8_epi32( 1790 _mm_loadl_epi64((__m128i *)(dat + i * dat_stride + j))); 1791 const __m256i s_load = _mm256_cvtepu8_epi32( 1792 _mm_loadl_epi64((__m128i *)(src + i * src_stride + j))); 1793 __m256i f1 = _mm256_loadu_si256((__m256i *)(flt0 + i * flt0_stride + j)); 1794 __m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS); 1795 __m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS); 1796 s = _mm256_sub_epi32(s, d); 1797 f1 = _mm256_sub_epi32(f1, d); 1798 1799 const __m256i h00_even = _mm256_mul_epi32(f1, f1); 1800 const __m256i h00_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32), 1801 _mm256_srli_epi64(f1, 32)); 1802 h00 = _mm256_add_epi64(h00, h00_even); 1803 h00 = _mm256_add_epi64(h00, h00_odd); 1804 1805 const __m256i c0_even = _mm256_mul_epi32(f1, s); 1806 const __m256i c0_odd = 1807 _mm256_mul_epi32(_mm256_srli_epi64(f1, 32), _mm256_srli_epi64(s, 32)); 1808 c0 = _mm256_add_epi64(c0, c0_even); 1809 c0 = _mm256_add_epi64(c0, c0_odd); 1810 } 1811 } 1812 const __m128i h00_128bit = _mm_add_epi64(_mm256_extracti128_si256(h00, 1), 1813 _mm256_castsi256_si128(h00)); 1814 const __m128i h00_val = 1815 _mm_add_epi64(h00_128bit, _mm_srli_si128(h00_128bit, 8)); 1816 1817 const __m128i c0_128bit = _mm_add_epi64(_mm256_extracti128_si256(c0, 1), 1818 _mm256_castsi256_si128(c0)); 1819 const __m128i c0_val = _mm_add_epi64(c0_128bit, _mm_srli_si128(c0_128bit, 8)); 1820 1821 const __m128i c = _mm_unpacklo_epi64(c0_val, _mm256_castsi256_si128(zero)); 1822 const __m128i h0x = _mm_unpacklo_epi64(h00_val, _mm256_castsi256_si128(zero)); 1823 1824 xx_storeu_128(C, c); 1825 xx_storeu_128(H[0], h0x); 1826 1827 H[0][0] /= size; 1828 C[0] /= size; 1829 } 1830 1831 // When only params->r[1] > 0. In this case only H[1][1] and C[1] are 1832 // non-zero and need to be computed. 1833 static inline void calc_proj_params_r1_avx2(const uint8_t *src8, int width, 1834 int height, int src_stride, 1835 const uint8_t *dat8, int dat_stride, 1836 int32_t *flt1, int flt1_stride, 1837 int64_t H[2][2], int64_t C[2]) { 1838 const int size = width * height; 1839 const uint8_t *src = src8; 1840 const uint8_t *dat = dat8; 1841 __m256i h11, c1; 1842 const __m256i zero = _mm256_setzero_si256(); 1843 c1 = h11 = zero; 1844 1845 for (int i = 0; i < height; ++i) { 1846 for (int j = 0; j < width; j += 8) { 1847 const __m256i u_load = _mm256_cvtepu8_epi32( 1848 _mm_loadl_epi64((__m128i *)(dat + i * dat_stride + j))); 1849 const __m256i s_load = _mm256_cvtepu8_epi32( 1850 _mm_loadl_epi64((__m128i *)(src + i * src_stride + j))); 1851 __m256i f2 = _mm256_loadu_si256((__m256i *)(flt1 + i * flt1_stride + j)); 1852 __m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS); 1853 __m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS); 1854 s = _mm256_sub_epi32(s, d); 1855 f2 = _mm256_sub_epi32(f2, d); 1856 1857 const __m256i h11_even = _mm256_mul_epi32(f2, f2); 1858 const __m256i h11_odd = _mm256_mul_epi32(_mm256_srli_epi64(f2, 32), 1859 _mm256_srli_epi64(f2, 32)); 1860 h11 = _mm256_add_epi64(h11, h11_even); 1861 h11 = _mm256_add_epi64(h11, h11_odd); 1862 1863 const __m256i c1_even = _mm256_mul_epi32(f2, s); 1864 const __m256i c1_odd = 1865 _mm256_mul_epi32(_mm256_srli_epi64(f2, 32), _mm256_srli_epi64(s, 32)); 1866 c1 = _mm256_add_epi64(c1, c1_even); 1867 c1 = _mm256_add_epi64(c1, c1_odd); 1868 } 1869 } 1870 1871 const __m128i h11_128bit = _mm_add_epi64(_mm256_extracti128_si256(h11, 1), 1872 _mm256_castsi256_si128(h11)); 1873 const __m128i h11_val = 1874 _mm_add_epi64(h11_128bit, _mm_srli_si128(h11_128bit, 8)); 1875 1876 const __m128i c1_128bit = _mm_add_epi64(_mm256_extracti128_si256(c1, 1), 1877 _mm256_castsi256_si128(c1)); 1878 const __m128i c1_val = _mm_add_epi64(c1_128bit, _mm_srli_si128(c1_128bit, 8)); 1879 1880 const __m128i c = _mm_unpacklo_epi64(_mm256_castsi256_si128(zero), c1_val); 1881 const __m128i h1x = _mm_unpacklo_epi64(_mm256_castsi256_si128(zero), h11_val); 1882 1883 xx_storeu_128(C, c); 1884 xx_storeu_128(H[1], h1x); 1885 1886 H[1][1] /= size; 1887 C[1] /= size; 1888 } 1889 1890 // AVX2 variant of av1_calc_proj_params_c. 1891 void av1_calc_proj_params_avx2(const uint8_t *src8, int width, int height, 1892 int src_stride, const uint8_t *dat8, 1893 int dat_stride, int32_t *flt0, int flt0_stride, 1894 int32_t *flt1, int flt1_stride, int64_t H[2][2], 1895 int64_t C[2], const sgr_params_type *params) { 1896 if ((params->r[0] > 0) && (params->r[1] > 0)) { 1897 calc_proj_params_r0_r1_avx2(src8, width, height, src_stride, dat8, 1898 dat_stride, flt0, flt0_stride, flt1, 1899 flt1_stride, H, C); 1900 } else if (params->r[0] > 0) { 1901 calc_proj_params_r0_avx2(src8, width, height, src_stride, dat8, dat_stride, 1902 flt0, flt0_stride, H, C); 1903 } else if (params->r[1] > 0) { 1904 calc_proj_params_r1_avx2(src8, width, height, src_stride, dat8, dat_stride, 1905 flt1, flt1_stride, H, C); 1906 } 1907 } 1908 1909 #if CONFIG_AV1_HIGHBITDEPTH 1910 static inline void calc_proj_params_r0_r1_high_bd_avx2( 1911 const uint8_t *src8, int width, int height, int src_stride, 1912 const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride, 1913 int32_t *flt1, int flt1_stride, int64_t H[2][2], int64_t C[2]) { 1914 const int size = width * height; 1915 const uint16_t *src = CONVERT_TO_SHORTPTR(src8); 1916 const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8); 1917 __m256i h00, h01, h11, c0, c1; 1918 const __m256i zero = _mm256_setzero_si256(); 1919 h01 = h11 = c0 = c1 = h00 = zero; 1920 1921 for (int i = 0; i < height; ++i) { 1922 for (int j = 0; j < width; j += 8) { 1923 const __m256i u_load = _mm256_cvtepu16_epi32( 1924 _mm_load_si128((__m128i *)(dat + i * dat_stride + j))); 1925 const __m256i s_load = _mm256_cvtepu16_epi32( 1926 _mm_load_si128((__m128i *)(src + i * src_stride + j))); 1927 __m256i f1 = _mm256_loadu_si256((__m256i *)(flt0 + i * flt0_stride + j)); 1928 __m256i f2 = _mm256_loadu_si256((__m256i *)(flt1 + i * flt1_stride + j)); 1929 __m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS); 1930 __m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS); 1931 s = _mm256_sub_epi32(s, d); 1932 f1 = _mm256_sub_epi32(f1, d); 1933 f2 = _mm256_sub_epi32(f2, d); 1934 1935 const __m256i h00_even = _mm256_mul_epi32(f1, f1); 1936 const __m256i h00_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32), 1937 _mm256_srli_epi64(f1, 32)); 1938 h00 = _mm256_add_epi64(h00, h00_even); 1939 h00 = _mm256_add_epi64(h00, h00_odd); 1940 1941 const __m256i h01_even = _mm256_mul_epi32(f1, f2); 1942 const __m256i h01_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32), 1943 _mm256_srli_epi64(f2, 32)); 1944 h01 = _mm256_add_epi64(h01, h01_even); 1945 h01 = _mm256_add_epi64(h01, h01_odd); 1946 1947 const __m256i h11_even = _mm256_mul_epi32(f2, f2); 1948 const __m256i h11_odd = _mm256_mul_epi32(_mm256_srli_epi64(f2, 32), 1949 _mm256_srli_epi64(f2, 32)); 1950 h11 = _mm256_add_epi64(h11, h11_even); 1951 h11 = _mm256_add_epi64(h11, h11_odd); 1952 1953 const __m256i c0_even = _mm256_mul_epi32(f1, s); 1954 const __m256i c0_odd = 1955 _mm256_mul_epi32(_mm256_srli_epi64(f1, 32), _mm256_srli_epi64(s, 32)); 1956 c0 = _mm256_add_epi64(c0, c0_even); 1957 c0 = _mm256_add_epi64(c0, c0_odd); 1958 1959 const __m256i c1_even = _mm256_mul_epi32(f2, s); 1960 const __m256i c1_odd = 1961 _mm256_mul_epi32(_mm256_srli_epi64(f2, 32), _mm256_srli_epi64(s, 32)); 1962 c1 = _mm256_add_epi64(c1, c1_even); 1963 c1 = _mm256_add_epi64(c1, c1_odd); 1964 } 1965 } 1966 1967 __m256i c_low = _mm256_unpacklo_epi64(c0, c1); 1968 const __m256i c_high = _mm256_unpackhi_epi64(c0, c1); 1969 c_low = _mm256_add_epi64(c_low, c_high); 1970 const __m128i c_128bit = _mm_add_epi64(_mm256_extracti128_si256(c_low, 1), 1971 _mm256_castsi256_si128(c_low)); 1972 1973 __m256i h0x_low = _mm256_unpacklo_epi64(h00, h01); 1974 const __m256i h0x_high = _mm256_unpackhi_epi64(h00, h01); 1975 h0x_low = _mm256_add_epi64(h0x_low, h0x_high); 1976 const __m128i h0x_128bit = _mm_add_epi64(_mm256_extracti128_si256(h0x_low, 1), 1977 _mm256_castsi256_si128(h0x_low)); 1978 1979 // Using the symmetric properties of H, calculations of H[1][0] are not 1980 // needed. 1981 __m256i h1x_low = _mm256_unpacklo_epi64(zero, h11); 1982 const __m256i h1x_high = _mm256_unpackhi_epi64(zero, h11); 1983 h1x_low = _mm256_add_epi64(h1x_low, h1x_high); 1984 const __m128i h1x_128bit = _mm_add_epi64(_mm256_extracti128_si256(h1x_low, 1), 1985 _mm256_castsi256_si128(h1x_low)); 1986 1987 xx_storeu_128(C, c_128bit); 1988 xx_storeu_128(H[0], h0x_128bit); 1989 xx_storeu_128(H[1], h1x_128bit); 1990 1991 H[0][0] /= size; 1992 H[0][1] /= size; 1993 H[1][1] /= size; 1994 1995 // Since H is a symmetric matrix 1996 H[1][0] = H[0][1]; 1997 C[0] /= size; 1998 C[1] /= size; 1999 } 2000 2001 static inline void calc_proj_params_r0_high_bd_avx2( 2002 const uint8_t *src8, int width, int height, int src_stride, 2003 const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride, 2004 int64_t H[2][2], int64_t C[2]) { 2005 const int size = width * height; 2006 const uint16_t *src = CONVERT_TO_SHORTPTR(src8); 2007 const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8); 2008 __m256i h00, c0; 2009 const __m256i zero = _mm256_setzero_si256(); 2010 c0 = h00 = zero; 2011 2012 for (int i = 0; i < height; ++i) { 2013 for (int j = 0; j < width; j += 8) { 2014 const __m256i u_load = _mm256_cvtepu16_epi32( 2015 _mm_load_si128((__m128i *)(dat + i * dat_stride + j))); 2016 const __m256i s_load = _mm256_cvtepu16_epi32( 2017 _mm_load_si128((__m128i *)(src + i * src_stride + j))); 2018 __m256i f1 = _mm256_loadu_si256((__m256i *)(flt0 + i * flt0_stride + j)); 2019 __m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS); 2020 __m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS); 2021 s = _mm256_sub_epi32(s, d); 2022 f1 = _mm256_sub_epi32(f1, d); 2023 2024 const __m256i h00_even = _mm256_mul_epi32(f1, f1); 2025 const __m256i h00_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32), 2026 _mm256_srli_epi64(f1, 32)); 2027 h00 = _mm256_add_epi64(h00, h00_even); 2028 h00 = _mm256_add_epi64(h00, h00_odd); 2029 2030 const __m256i c0_even = _mm256_mul_epi32(f1, s); 2031 const __m256i c0_odd = 2032 _mm256_mul_epi32(_mm256_srli_epi64(f1, 32), _mm256_srli_epi64(s, 32)); 2033 c0 = _mm256_add_epi64(c0, c0_even); 2034 c0 = _mm256_add_epi64(c0, c0_odd); 2035 } 2036 } 2037 const __m128i h00_128bit = _mm_add_epi64(_mm256_extracti128_si256(h00, 1), 2038 _mm256_castsi256_si128(h00)); 2039 const __m128i h00_val = 2040 _mm_add_epi64(h00_128bit, _mm_srli_si128(h00_128bit, 8)); 2041 2042 const __m128i c0_128bit = _mm_add_epi64(_mm256_extracti128_si256(c0, 1), 2043 _mm256_castsi256_si128(c0)); 2044 const __m128i c0_val = _mm_add_epi64(c0_128bit, _mm_srli_si128(c0_128bit, 8)); 2045 2046 const __m128i c = _mm_unpacklo_epi64(c0_val, _mm256_castsi256_si128(zero)); 2047 const __m128i h0x = _mm_unpacklo_epi64(h00_val, _mm256_castsi256_si128(zero)); 2048 2049 xx_storeu_128(C, c); 2050 xx_storeu_128(H[0], h0x); 2051 2052 H[0][0] /= size; 2053 C[0] /= size; 2054 } 2055 2056 static inline void calc_proj_params_r1_high_bd_avx2( 2057 const uint8_t *src8, int width, int height, int src_stride, 2058 const uint8_t *dat8, int dat_stride, int32_t *flt1, int flt1_stride, 2059 int64_t H[2][2], int64_t C[2]) { 2060 const int size = width * height; 2061 const uint16_t *src = CONVERT_TO_SHORTPTR(src8); 2062 const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8); 2063 __m256i h11, c1; 2064 const __m256i zero = _mm256_setzero_si256(); 2065 c1 = h11 = zero; 2066 2067 for (int i = 0; i < height; ++i) { 2068 for (int j = 0; j < width; j += 8) { 2069 const __m256i u_load = _mm256_cvtepu16_epi32( 2070 _mm_load_si128((__m128i *)(dat + i * dat_stride + j))); 2071 const __m256i s_load = _mm256_cvtepu16_epi32( 2072 _mm_load_si128((__m128i *)(src + i * src_stride + j))); 2073 __m256i f2 = _mm256_loadu_si256((__m256i *)(flt1 + i * flt1_stride + j)); 2074 __m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS); 2075 __m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS); 2076 s = _mm256_sub_epi32(s, d); 2077 f2 = _mm256_sub_epi32(f2, d); 2078 2079 const __m256i h11_even = _mm256_mul_epi32(f2, f2); 2080 const __m256i h11_odd = _mm256_mul_epi32(_mm256_srli_epi64(f2, 32), 2081 _mm256_srli_epi64(f2, 32)); 2082 h11 = _mm256_add_epi64(h11, h11_even); 2083 h11 = _mm256_add_epi64(h11, h11_odd); 2084 2085 const __m256i c1_even = _mm256_mul_epi32(f2, s); 2086 const __m256i c1_odd = 2087 _mm256_mul_epi32(_mm256_srli_epi64(f2, 32), _mm256_srli_epi64(s, 32)); 2088 c1 = _mm256_add_epi64(c1, c1_even); 2089 c1 = _mm256_add_epi64(c1, c1_odd); 2090 } 2091 } 2092 2093 const __m128i h11_128bit = _mm_add_epi64(_mm256_extracti128_si256(h11, 1), 2094 _mm256_castsi256_si128(h11)); 2095 const __m128i h11_val = 2096 _mm_add_epi64(h11_128bit, _mm_srli_si128(h11_128bit, 8)); 2097 2098 const __m128i c1_128bit = _mm_add_epi64(_mm256_extracti128_si256(c1, 1), 2099 _mm256_castsi256_si128(c1)); 2100 const __m128i c1_val = _mm_add_epi64(c1_128bit, _mm_srli_si128(c1_128bit, 8)); 2101 2102 const __m128i c = _mm_unpacklo_epi64(_mm256_castsi256_si128(zero), c1_val); 2103 const __m128i h1x = _mm_unpacklo_epi64(_mm256_castsi256_si128(zero), h11_val); 2104 2105 xx_storeu_128(C, c); 2106 xx_storeu_128(H[1], h1x); 2107 2108 H[1][1] /= size; 2109 C[1] /= size; 2110 } 2111 2112 // AVX2 variant of av1_calc_proj_params_high_bd_c. 2113 void av1_calc_proj_params_high_bd_avx2(const uint8_t *src8, int width, 2114 int height, int src_stride, 2115 const uint8_t *dat8, int dat_stride, 2116 int32_t *flt0, int flt0_stride, 2117 int32_t *flt1, int flt1_stride, 2118 int64_t H[2][2], int64_t C[2], 2119 const sgr_params_type *params) { 2120 if ((params->r[0] > 0) && (params->r[1] > 0)) { 2121 calc_proj_params_r0_r1_high_bd_avx2(src8, width, height, src_stride, dat8, 2122 dat_stride, flt0, flt0_stride, flt1, 2123 flt1_stride, H, C); 2124 } else if (params->r[0] > 0) { 2125 calc_proj_params_r0_high_bd_avx2(src8, width, height, src_stride, dat8, 2126 dat_stride, flt0, flt0_stride, H, C); 2127 } else if (params->r[1] > 0) { 2128 calc_proj_params_r1_high_bd_avx2(src8, width, height, src_stride, dat8, 2129 dat_stride, flt1, flt1_stride, H, C); 2130 } 2131 } 2132 2133 int64_t av1_highbd_pixel_proj_error_avx2( 2134 const uint8_t *src8, int width, int height, int src_stride, 2135 const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride, 2136 int32_t *flt1, int flt1_stride, int xq[2], const sgr_params_type *params) { 2137 int i, j, k; 2138 const int32_t shift = SGRPROJ_RST_BITS + SGRPROJ_PRJ_BITS; 2139 const __m256i rounding = _mm256_set1_epi32(1 << (shift - 1)); 2140 __m256i sum64 = _mm256_setzero_si256(); 2141 const uint16_t *src = CONVERT_TO_SHORTPTR(src8); 2142 const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8); 2143 int64_t err = 0; 2144 if (params->r[0] > 0 && params->r[1] > 0) { // Both filters are enabled 2145 const __m256i xq0 = _mm256_set1_epi32(xq[0]); 2146 const __m256i xq1 = _mm256_set1_epi32(xq[1]); 2147 for (i = 0; i < height; ++i) { 2148 __m256i sum32 = _mm256_setzero_si256(); 2149 for (j = 0; j <= width - 16; j += 16) { // Process 16 pixels at a time 2150 // Load 16 pixels each from source image and corrupted image 2151 const __m256i s0 = yy_loadu_256(src + j); 2152 const __m256i d0 = yy_loadu_256(dat + j); 2153 // s0 = [15 14 13 12 11 10 9 8] [7 6 5 4 3 2 1 0] as u16 (indices) 2154 2155 // Shift-up each pixel to match filtered image scaling 2156 const __m256i u0 = _mm256_slli_epi16(d0, SGRPROJ_RST_BITS); 2157 2158 // Split u0 into two halves and pad each from u16 to i32 2159 const __m256i u0l = _mm256_cvtepu16_epi32(_mm256_castsi256_si128(u0)); 2160 const __m256i u0h = 2161 _mm256_cvtepu16_epi32(_mm256_extracti128_si256(u0, 1)); 2162 // u0h, u0l = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0] as u32 2163 2164 // Load 16 pixels from each filtered image 2165 const __m256i flt0l = yy_loadu_256(flt0 + j); 2166 const __m256i flt0h = yy_loadu_256(flt0 + j + 8); 2167 const __m256i flt1l = yy_loadu_256(flt1 + j); 2168 const __m256i flt1h = yy_loadu_256(flt1 + j + 8); 2169 // flt?l, flt?h = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0] as u32 2170 2171 // Subtract shifted corrupt image from each filtered image 2172 const __m256i flt0l_subu = _mm256_sub_epi32(flt0l, u0l); 2173 const __m256i flt0h_subu = _mm256_sub_epi32(flt0h, u0h); 2174 const __m256i flt1l_subu = _mm256_sub_epi32(flt1l, u0l); 2175 const __m256i flt1h_subu = _mm256_sub_epi32(flt1h, u0h); 2176 2177 // Multiply basis vectors by appropriate coefficients 2178 const __m256i v0l = _mm256_mullo_epi32(flt0l_subu, xq0); 2179 const __m256i v0h = _mm256_mullo_epi32(flt0h_subu, xq0); 2180 const __m256i v1l = _mm256_mullo_epi32(flt1l_subu, xq1); 2181 const __m256i v1h = _mm256_mullo_epi32(flt1h_subu, xq1); 2182 2183 // Add together the contributions from the two basis vectors 2184 const __m256i vl = _mm256_add_epi32(v0l, v1l); 2185 const __m256i vh = _mm256_add_epi32(v0h, v1h); 2186 2187 // Right-shift v with appropriate rounding 2188 const __m256i vrl = 2189 _mm256_srai_epi32(_mm256_add_epi32(vl, rounding), shift); 2190 const __m256i vrh = 2191 _mm256_srai_epi32(_mm256_add_epi32(vh, rounding), shift); 2192 // vrh, vrl = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0] 2193 2194 // Saturate each i32 to an i16 then combine both halves 2195 // The permute (control=[3 1 2 0]) fixes weird ordering from AVX lanes 2196 const __m256i vr = 2197 _mm256_permute4x64_epi64(_mm256_packs_epi32(vrl, vrh), 0xd8); 2198 // intermediate = [15 14 13 12 7 6 5 4] [11 10 9 8 3 2 1 0] 2199 // vr = [15 14 13 12 11 10 9 8] [7 6 5 4 3 2 1 0] 2200 2201 // Add twin-subspace-sgr-filter to corrupt image then subtract source 2202 const __m256i e0 = _mm256_sub_epi16(_mm256_add_epi16(vr, d0), s0); 2203 2204 // Calculate squared error and add adjacent values 2205 const __m256i err0 = _mm256_madd_epi16(e0, e0); 2206 2207 sum32 = _mm256_add_epi32(sum32, err0); 2208 } 2209 2210 const __m256i sum32l = 2211 _mm256_cvtepu32_epi64(_mm256_castsi256_si128(sum32)); 2212 sum64 = _mm256_add_epi64(sum64, sum32l); 2213 const __m256i sum32h = 2214 _mm256_cvtepu32_epi64(_mm256_extracti128_si256(sum32, 1)); 2215 sum64 = _mm256_add_epi64(sum64, sum32h); 2216 2217 // Process remaining pixels in this row (modulo 16) 2218 for (k = j; k < width; ++k) { 2219 const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS); 2220 int32_t v = xq[0] * (flt0[k] - u) + xq[1] * (flt1[k] - u); 2221 const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k]; 2222 err += ((int64_t)e * e); 2223 } 2224 dat += dat_stride; 2225 src += src_stride; 2226 flt0 += flt0_stride; 2227 flt1 += flt1_stride; 2228 } 2229 } else if (params->r[0] > 0 || params->r[1] > 0) { // Only one filter enabled 2230 const int32_t xq_on = (params->r[0] > 0) ? xq[0] : xq[1]; 2231 const __m256i xq_active = _mm256_set1_epi32(xq_on); 2232 const __m256i xq_inactive = 2233 _mm256_set1_epi32(-xq_on * (1 << SGRPROJ_RST_BITS)); 2234 const int32_t *flt = (params->r[0] > 0) ? flt0 : flt1; 2235 const int flt_stride = (params->r[0] > 0) ? flt0_stride : flt1_stride; 2236 for (i = 0; i < height; ++i) { 2237 __m256i sum32 = _mm256_setzero_si256(); 2238 for (j = 0; j <= width - 16; j += 16) { 2239 // Load 16 pixels from source image 2240 const __m256i s0 = yy_loadu_256(src + j); 2241 // s0 = [15 14 13 12 11 10 9 8] [7 6 5 4 3 2 1 0] as u16 2242 2243 // Load 16 pixels from corrupted image and pad each u16 to i32 2244 const __m256i d0 = yy_loadu_256(dat + j); 2245 const __m256i d0h = 2246 _mm256_cvtepu16_epi32(_mm256_extracti128_si256(d0, 1)); 2247 const __m256i d0l = _mm256_cvtepu16_epi32(_mm256_castsi256_si128(d0)); 2248 // d0 = [15 14 13 12 11 10 9 8] [7 6 5 4 3 2 1 0] as u16 2249 // d0h, d0l = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0] as i32 2250 2251 // Load 16 pixels from the filtered image 2252 const __m256i flth = yy_loadu_256(flt + j + 8); 2253 const __m256i fltl = yy_loadu_256(flt + j); 2254 // flth, fltl = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0] as i32 2255 2256 const __m256i flth_xq = _mm256_mullo_epi32(flth, xq_active); 2257 const __m256i fltl_xq = _mm256_mullo_epi32(fltl, xq_active); 2258 const __m256i d0h_xq = _mm256_mullo_epi32(d0h, xq_inactive); 2259 const __m256i d0l_xq = _mm256_mullo_epi32(d0l, xq_inactive); 2260 2261 const __m256i vh = _mm256_add_epi32(flth_xq, d0h_xq); 2262 const __m256i vl = _mm256_add_epi32(fltl_xq, d0l_xq); 2263 2264 // Shift this down with appropriate rounding 2265 const __m256i vrh = 2266 _mm256_srai_epi32(_mm256_add_epi32(vh, rounding), shift); 2267 const __m256i vrl = 2268 _mm256_srai_epi32(_mm256_add_epi32(vl, rounding), shift); 2269 // vrh, vrl = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0] as i32 2270 2271 // Saturate each i32 to an i16 then combine both halves 2272 // The permute (control=[3 1 2 0]) fixes weird ordering from AVX lanes 2273 const __m256i vr = 2274 _mm256_permute4x64_epi64(_mm256_packs_epi32(vrl, vrh), 0xd8); 2275 // intermediate = [15 14 13 12 7 6 5 4] [11 10 9 8 3 2 1 0] as u16 2276 // vr = [15 14 13 12 11 10 9 8] [7 6 5 4 3 2 1 0] as u16 2277 2278 // Subtract twin-subspace-sgr filtered from source image to get error 2279 const __m256i e0 = _mm256_sub_epi16(_mm256_add_epi16(vr, d0), s0); 2280 2281 // Calculate squared error and add adjacent values 2282 const __m256i err0 = _mm256_madd_epi16(e0, e0); 2283 2284 sum32 = _mm256_add_epi32(sum32, err0); 2285 } 2286 2287 const __m256i sum32l = 2288 _mm256_cvtepu32_epi64(_mm256_castsi256_si128(sum32)); 2289 sum64 = _mm256_add_epi64(sum64, sum32l); 2290 const __m256i sum32h = 2291 _mm256_cvtepu32_epi64(_mm256_extracti128_si256(sum32, 1)); 2292 sum64 = _mm256_add_epi64(sum64, sum32h); 2293 2294 // Process remaining pixels in this row (modulo 16) 2295 for (k = j; k < width; ++k) { 2296 const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS); 2297 int32_t v = xq_on * (flt[k] - u); 2298 const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k]; 2299 err += ((int64_t)e * e); 2300 } 2301 dat += dat_stride; 2302 src += src_stride; 2303 flt += flt_stride; 2304 } 2305 } else { // Neither filter is enabled 2306 for (i = 0; i < height; ++i) { 2307 __m256i sum32 = _mm256_setzero_si256(); 2308 for (j = 0; j <= width - 32; j += 32) { 2309 // Load 2x16 u16 from source image 2310 const __m256i s0l = yy_loadu_256(src + j); 2311 const __m256i s0h = yy_loadu_256(src + j + 16); 2312 2313 // Load 2x16 u16 from corrupted image 2314 const __m256i d0l = yy_loadu_256(dat + j); 2315 const __m256i d0h = yy_loadu_256(dat + j + 16); 2316 2317 // Subtract corrupted image from source image 2318 const __m256i diffl = _mm256_sub_epi16(d0l, s0l); 2319 const __m256i diffh = _mm256_sub_epi16(d0h, s0h); 2320 2321 // Square error and add adjacent values 2322 const __m256i err0l = _mm256_madd_epi16(diffl, diffl); 2323 const __m256i err0h = _mm256_madd_epi16(diffh, diffh); 2324 2325 sum32 = _mm256_add_epi32(sum32, err0l); 2326 sum32 = _mm256_add_epi32(sum32, err0h); 2327 } 2328 2329 const __m256i sum32l = 2330 _mm256_cvtepu32_epi64(_mm256_castsi256_si128(sum32)); 2331 sum64 = _mm256_add_epi64(sum64, sum32l); 2332 const __m256i sum32h = 2333 _mm256_cvtepu32_epi64(_mm256_extracti128_si256(sum32, 1)); 2334 sum64 = _mm256_add_epi64(sum64, sum32h); 2335 2336 // Process remaining pixels (modulu 16) 2337 for (k = j; k < width; ++k) { 2338 const int32_t e = (int32_t)(dat[k]) - src[k]; 2339 err += ((int64_t)e * e); 2340 } 2341 dat += dat_stride; 2342 src += src_stride; 2343 } 2344 } 2345 2346 // Sum 4 values from sum64l and sum64h into err 2347 int64_t sum[4]; 2348 yy_storeu_256(sum, sum64); 2349 err += sum[0] + sum[1] + sum[2] + sum[3]; 2350 return err; 2351 } 2352 #endif // CONFIG_AV1_HIGHBITDEPTH