cdef_block_simd.h (37698B)
1 /* 2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved. 3 * 4 * This source code is subject to the terms of the BSD 2 Clause License and 5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License 6 * was not distributed with this source code in the LICENSE file, you can 7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open 8 * Media Patent License 1.0 was not distributed with this source code in the 9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent. 10 */ 11 12 #ifndef AOM_AV1_COMMON_CDEF_BLOCK_SIMD_H_ 13 #define AOM_AV1_COMMON_CDEF_BLOCK_SIMD_H_ 14 15 #include "config/aom_config.h" 16 #include "config/av1_rtcd.h" 17 18 #include "av1/common/cdef_block.h" 19 20 /* partial A is a 16-bit vector of the form: 21 [x8 x7 x6 x5 x4 x3 x2 x1] and partial B has the form: 22 [0 y1 y2 y3 y4 y5 y6 y7]. 23 This function computes (x1^2+y1^2)*C1 + (x2^2+y2^2)*C2 + ... 24 (x7^2+y2^7)*C7 + (x8^2+0^2)*C8 where the C1..C8 constants are in const1 25 and const2. */ 26 static inline v128 fold_mul_and_sum(v128 partiala, v128 partialb, v128 const1, 27 v128 const2) { 28 v128 tmp; 29 /* Reverse partial B. */ 30 partialb = v128_shuffle_8( 31 partialb, v128_from_32(0x0f0e0100, 0x03020504, 0x07060908, 0x0b0a0d0c)); 32 /* Interleave the x and y values of identical indices and pair x8 with 0. */ 33 tmp = partiala; 34 partiala = v128_ziplo_16(partialb, partiala); 35 partialb = v128_ziphi_16(partialb, tmp); 36 /* Square and add the corresponding x and y values. */ 37 partiala = v128_madd_s16(partiala, partiala); 38 partialb = v128_madd_s16(partialb, partialb); 39 /* Multiply by constant. */ 40 partiala = v128_mullo_s32(partiala, const1); 41 partialb = v128_mullo_s32(partialb, const2); 42 /* Sum all results. */ 43 partiala = v128_add_32(partiala, partialb); 44 return partiala; 45 } 46 47 static inline v128 hsum4(v128 x0, v128 x1, v128 x2, v128 x3) { 48 v128 t0, t1, t2, t3; 49 t0 = v128_ziplo_32(x1, x0); 50 t1 = v128_ziplo_32(x3, x2); 51 t2 = v128_ziphi_32(x1, x0); 52 t3 = v128_ziphi_32(x3, x2); 53 x0 = v128_ziplo_64(t1, t0); 54 x1 = v128_ziphi_64(t1, t0); 55 x2 = v128_ziplo_64(t3, t2); 56 x3 = v128_ziphi_64(t3, t2); 57 return v128_add_32(v128_add_32(x0, x1), v128_add_32(x2, x3)); 58 } 59 60 /* Computes cost for directions 0, 5, 6 and 7. We can call this function again 61 to compute the remaining directions. */ 62 static inline v128 compute_directions(v128 lines[8], int32_t tmp_cost1[4]) { 63 v128 partial4a, partial4b, partial5a, partial5b, partial7a, partial7b; 64 v128 partial6; 65 v128 tmp; 66 /* Partial sums for lines 0 and 1. */ 67 partial4a = v128_shl_n_byte(lines[0], 14); 68 partial4b = v128_shr_n_byte(lines[0], 2); 69 partial4a = v128_add_16(partial4a, v128_shl_n_byte(lines[1], 12)); 70 partial4b = v128_add_16(partial4b, v128_shr_n_byte(lines[1], 4)); 71 tmp = v128_add_16(lines[0], lines[1]); 72 partial5a = v128_shl_n_byte(tmp, 10); 73 partial5b = v128_shr_n_byte(tmp, 6); 74 partial7a = v128_shl_n_byte(tmp, 4); 75 partial7b = v128_shr_n_byte(tmp, 12); 76 partial6 = tmp; 77 78 /* Partial sums for lines 2 and 3. */ 79 partial4a = v128_add_16(partial4a, v128_shl_n_byte(lines[2], 10)); 80 partial4b = v128_add_16(partial4b, v128_shr_n_byte(lines[2], 6)); 81 partial4a = v128_add_16(partial4a, v128_shl_n_byte(lines[3], 8)); 82 partial4b = v128_add_16(partial4b, v128_shr_n_byte(lines[3], 8)); 83 tmp = v128_add_16(lines[2], lines[3]); 84 partial5a = v128_add_16(partial5a, v128_shl_n_byte(tmp, 8)); 85 partial5b = v128_add_16(partial5b, v128_shr_n_byte(tmp, 8)); 86 partial7a = v128_add_16(partial7a, v128_shl_n_byte(tmp, 6)); 87 partial7b = v128_add_16(partial7b, v128_shr_n_byte(tmp, 10)); 88 partial6 = v128_add_16(partial6, tmp); 89 90 /* Partial sums for lines 4 and 5. */ 91 partial4a = v128_add_16(partial4a, v128_shl_n_byte(lines[4], 6)); 92 partial4b = v128_add_16(partial4b, v128_shr_n_byte(lines[4], 10)); 93 partial4a = v128_add_16(partial4a, v128_shl_n_byte(lines[5], 4)); 94 partial4b = v128_add_16(partial4b, v128_shr_n_byte(lines[5], 12)); 95 tmp = v128_add_16(lines[4], lines[5]); 96 partial5a = v128_add_16(partial5a, v128_shl_n_byte(tmp, 6)); 97 partial5b = v128_add_16(partial5b, v128_shr_n_byte(tmp, 10)); 98 partial7a = v128_add_16(partial7a, v128_shl_n_byte(tmp, 8)); 99 partial7b = v128_add_16(partial7b, v128_shr_n_byte(tmp, 8)); 100 partial6 = v128_add_16(partial6, tmp); 101 102 /* Partial sums for lines 6 and 7. */ 103 partial4a = v128_add_16(partial4a, v128_shl_n_byte(lines[6], 2)); 104 partial4b = v128_add_16(partial4b, v128_shr_n_byte(lines[6], 14)); 105 partial4a = v128_add_16(partial4a, lines[7]); 106 tmp = v128_add_16(lines[6], lines[7]); 107 partial5a = v128_add_16(partial5a, v128_shl_n_byte(tmp, 4)); 108 partial5b = v128_add_16(partial5b, v128_shr_n_byte(tmp, 12)); 109 partial7a = v128_add_16(partial7a, v128_shl_n_byte(tmp, 10)); 110 partial7b = v128_add_16(partial7b, v128_shr_n_byte(tmp, 6)); 111 partial6 = v128_add_16(partial6, tmp); 112 113 /* Compute costs in terms of partial sums. */ 114 partial4a = 115 fold_mul_and_sum(partial4a, partial4b, v128_from_32(210, 280, 420, 840), 116 v128_from_32(105, 120, 140, 168)); 117 partial7a = 118 fold_mul_and_sum(partial7a, partial7b, v128_from_32(210, 420, 0, 0), 119 v128_from_32(105, 105, 105, 140)); 120 partial5a = 121 fold_mul_and_sum(partial5a, partial5b, v128_from_32(210, 420, 0, 0), 122 v128_from_32(105, 105, 105, 140)); 123 partial6 = v128_madd_s16(partial6, partial6); 124 partial6 = v128_mullo_s32(partial6, v128_dup_32(105)); 125 126 partial4a = hsum4(partial4a, partial5a, partial6, partial7a); 127 v128_store_unaligned(tmp_cost1, partial4a); 128 return partial4a; 129 } 130 131 /* transpose and reverse the order of the lines -- equivalent to a 90-degree 132 counter-clockwise rotation of the pixels. */ 133 static inline void array_reverse_transpose_8x8(v128 *in, v128 *res) { 134 const v128 tr0_0 = v128_ziplo_16(in[1], in[0]); 135 const v128 tr0_1 = v128_ziplo_16(in[3], in[2]); 136 const v128 tr0_2 = v128_ziphi_16(in[1], in[0]); 137 const v128 tr0_3 = v128_ziphi_16(in[3], in[2]); 138 const v128 tr0_4 = v128_ziplo_16(in[5], in[4]); 139 const v128 tr0_5 = v128_ziplo_16(in[7], in[6]); 140 const v128 tr0_6 = v128_ziphi_16(in[5], in[4]); 141 const v128 tr0_7 = v128_ziphi_16(in[7], in[6]); 142 143 const v128 tr1_0 = v128_ziplo_32(tr0_1, tr0_0); 144 const v128 tr1_1 = v128_ziplo_32(tr0_5, tr0_4); 145 const v128 tr1_2 = v128_ziphi_32(tr0_1, tr0_0); 146 const v128 tr1_3 = v128_ziphi_32(tr0_5, tr0_4); 147 const v128 tr1_4 = v128_ziplo_32(tr0_3, tr0_2); 148 const v128 tr1_5 = v128_ziplo_32(tr0_7, tr0_6); 149 const v128 tr1_6 = v128_ziphi_32(tr0_3, tr0_2); 150 const v128 tr1_7 = v128_ziphi_32(tr0_7, tr0_6); 151 152 res[7] = v128_ziplo_64(tr1_1, tr1_0); 153 res[6] = v128_ziphi_64(tr1_1, tr1_0); 154 res[5] = v128_ziplo_64(tr1_3, tr1_2); 155 res[4] = v128_ziphi_64(tr1_3, tr1_2); 156 res[3] = v128_ziplo_64(tr1_5, tr1_4); 157 res[2] = v128_ziphi_64(tr1_5, tr1_4); 158 res[1] = v128_ziplo_64(tr1_7, tr1_6); 159 res[0] = v128_ziphi_64(tr1_7, tr1_6); 160 } 161 162 int SIMD_FUNC(cdef_find_dir)(const uint16_t *img, int stride, int32_t *var, 163 int coeff_shift) { 164 int i; 165 int32_t cost[8]; 166 int32_t best_cost = 0; 167 int best_dir = 0; 168 v128 lines[8]; 169 for (i = 0; i < 8; i++) { 170 lines[i] = v128_load_unaligned(&img[i * stride]); 171 lines[i] = 172 v128_sub_16(v128_shr_s16(lines[i], coeff_shift), v128_dup_16(128)); 173 } 174 175 /* Compute "mostly vertical" directions. */ 176 v128 dir47 = compute_directions(lines, cost + 4); 177 178 array_reverse_transpose_8x8(lines, lines); 179 180 /* Compute "mostly horizontal" directions. */ 181 v128 dir03 = compute_directions(lines, cost); 182 183 v128 max = v128_max_s32(dir03, dir47); 184 max = v128_max_s32(max, v128_align(max, max, 8)); 185 max = v128_max_s32(max, v128_align(max, max, 4)); 186 best_cost = v128_low_u32(max); 187 v128 t = 188 v128_pack_s32_s16(v128_cmpeq_32(max, dir47), v128_cmpeq_32(max, dir03)); 189 best_dir = v128_movemask_8(v128_pack_s16_s8(t, t)); 190 best_dir = get_msb(best_dir ^ (best_dir - 1)); // Count trailing zeros 191 192 /* Difference between the optimal variance and the variance along the 193 orthogonal direction. Again, the sum(x^2) terms cancel out. */ 194 *var = best_cost - cost[(best_dir + 4) & 7]; 195 /* We'd normally divide by 840, but dividing by 1024 is close enough 196 for what we're going to do with this. */ 197 *var >>= 10; 198 return best_dir; 199 } 200 201 // Work around compiler out of memory issues with Win32 builds. This issue has 202 // been observed with Visual Studio 2017, 2019, and 2022 (version 17.10.3). 203 #if defined(_MSC_VER) && defined(_M_IX86) 204 #define CDEF_INLINE static inline 205 #else 206 #define CDEF_INLINE SIMD_INLINE 207 #endif 208 209 // sign(a-b) * min(abs(a-b), max(0, threshold - (abs(a-b) >> adjdamp))) 210 CDEF_INLINE v256 constrain16(v256 a, v256 b, unsigned int threshold, 211 unsigned int adjdamp) { 212 v256 diff = v256_sub_16(a, b); 213 const v256 sign = v256_shr_n_s16(diff, 15); 214 diff = v256_abs_s16(diff); 215 const v256 s = 216 v256_ssub_u16(v256_dup_16(threshold), v256_shr_u16(diff, adjdamp)); 217 return v256_xor(v256_add_16(sign, v256_min_s16(diff, s)), sign); 218 } 219 220 SIMD_INLINE v256 get_max_primary(const int is_lowbd, v256 *tap, v256 max, 221 v256 cdef_large_value_mask) { 222 if (is_lowbd) { 223 v256 max_u8; 224 max_u8 = tap[0]; 225 max_u8 = v256_max_u8(max_u8, tap[1]); 226 max_u8 = v256_max_u8(max_u8, tap[2]); 227 max_u8 = v256_max_u8(max_u8, tap[3]); 228 /* The source is 16 bits, however, we only really care about the lower 229 8 bits. The upper 8 bits contain the "large" flag. After the final 230 primary max has been calculated, zero out the upper 8 bits. Use this 231 to find the "16 bit" max. */ 232 max = v256_max_s16(max, v256_and(max_u8, cdef_large_value_mask)); 233 } else { 234 /* Convert CDEF_VERY_LARGE to 0 before calculating max. */ 235 max = v256_max_s16(max, v256_and(tap[0], cdef_large_value_mask)); 236 max = v256_max_s16(max, v256_and(tap[1], cdef_large_value_mask)); 237 max = v256_max_s16(max, v256_and(tap[2], cdef_large_value_mask)); 238 max = v256_max_s16(max, v256_and(tap[3], cdef_large_value_mask)); 239 } 240 return max; 241 } 242 243 SIMD_INLINE v256 get_max_secondary(const int is_lowbd, v256 *tap, v256 max, 244 v256 cdef_large_value_mask) { 245 if (is_lowbd) { 246 v256 max_u8; 247 max_u8 = tap[0]; 248 max_u8 = v256_max_u8(max_u8, tap[1]); 249 max_u8 = v256_max_u8(max_u8, tap[2]); 250 max_u8 = v256_max_u8(max_u8, tap[3]); 251 max_u8 = v256_max_u8(max_u8, tap[4]); 252 max_u8 = v256_max_u8(max_u8, tap[5]); 253 max_u8 = v256_max_u8(max_u8, tap[6]); 254 max_u8 = v256_max_u8(max_u8, tap[7]); 255 /* The source is 16 bits, however, we only really care about the lower 256 8 bits. The upper 8 bits contain the "large" flag. After the final 257 primary max has been calculated, zero out the upper 8 bits. Use this 258 to find the "16 bit" max. */ 259 max = v256_max_s16(max, v256_and(max_u8, cdef_large_value_mask)); 260 } else { 261 /* Convert CDEF_VERY_LARGE to 0 before calculating max. */ 262 max = v256_max_s16(max, v256_and(tap[0], cdef_large_value_mask)); 263 max = v256_max_s16(max, v256_and(tap[1], cdef_large_value_mask)); 264 max = v256_max_s16(max, v256_and(tap[2], cdef_large_value_mask)); 265 max = v256_max_s16(max, v256_and(tap[3], cdef_large_value_mask)); 266 max = v256_max_s16(max, v256_and(tap[4], cdef_large_value_mask)); 267 max = v256_max_s16(max, v256_and(tap[5], cdef_large_value_mask)); 268 max = v256_max_s16(max, v256_and(tap[6], cdef_large_value_mask)); 269 max = v256_max_s16(max, v256_and(tap[7], cdef_large_value_mask)); 270 } 271 return max; 272 } 273 274 // MSVC takes far too much time optimizing these. 275 // https://bugs.chromium.org/p/aomedia/issues/detail?id=3395 276 #if defined(_MSC_VER) && !defined(__clang__) 277 #pragma optimize("", off) 278 #endif 279 280 CDEF_INLINE void filter_block_4x4(const int is_lowbd, void *dest, int dstride, 281 const uint16_t *in, int pri_strength, 282 int sec_strength, int dir, int pri_damping, 283 int sec_damping, int coeff_shift, int height, 284 int enable_primary, int enable_secondary) { 285 uint8_t *dst8 = (uint8_t *)dest; 286 uint16_t *dst16 = (uint16_t *)dest; 287 const int clipping_required = enable_primary && enable_secondary; 288 v256 p0, p1, p2, p3; 289 v256 sum, row, res; 290 v256 max, min; 291 const v256 cdef_large_value_mask = v256_dup_16((uint16_t)~CDEF_VERY_LARGE); 292 const int po1 = cdef_directions[dir][0]; 293 const int po2 = cdef_directions[dir][1]; 294 const int s1o1 = cdef_directions[dir + 2][0]; 295 const int s1o2 = cdef_directions[dir + 2][1]; 296 const int s2o1 = cdef_directions[dir - 2][0]; 297 const int s2o2 = cdef_directions[dir - 2][1]; 298 const int *pri_taps = cdef_pri_taps[(pri_strength >> coeff_shift) & 1]; 299 const int *sec_taps = cdef_sec_taps; 300 int i; 301 302 if (enable_primary && pri_strength) 303 pri_damping = AOMMAX(0, pri_damping - get_msb(pri_strength)); 304 if (enable_secondary && sec_strength) 305 sec_damping = AOMMAX(0, sec_damping - get_msb(sec_strength)); 306 307 for (i = 0; i < height; i += 4) { 308 sum = v256_zero(); 309 row = v256_from_v64(v64_load_aligned(&in[(i + 0) * CDEF_BSTRIDE]), 310 v64_load_aligned(&in[(i + 1) * CDEF_BSTRIDE]), 311 v64_load_aligned(&in[(i + 2) * CDEF_BSTRIDE]), 312 v64_load_aligned(&in[(i + 3) * CDEF_BSTRIDE])); 313 max = min = row; 314 315 if (enable_primary) { 316 v256 tap[4]; 317 // Primary near taps 318 tap[0] = 319 v256_from_v64(v64_load_unaligned(&in[(i + 0) * CDEF_BSTRIDE + po1]), 320 v64_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE + po1]), 321 v64_load_unaligned(&in[(i + 2) * CDEF_BSTRIDE + po1]), 322 v64_load_unaligned(&in[(i + 3) * CDEF_BSTRIDE + po1])); 323 p0 = constrain16(tap[0], row, pri_strength, pri_damping); 324 tap[1] = 325 v256_from_v64(v64_load_unaligned(&in[(i + 0) * CDEF_BSTRIDE - po1]), 326 v64_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE - po1]), 327 v64_load_unaligned(&in[(i + 2) * CDEF_BSTRIDE - po1]), 328 v64_load_unaligned(&in[(i + 3) * CDEF_BSTRIDE - po1])); 329 p1 = constrain16(tap[1], row, pri_strength, pri_damping); 330 331 // sum += pri_taps[0] * (p0 + p1) 332 sum = v256_add_16( 333 sum, v256_mullo_s16(v256_dup_16(pri_taps[0]), v256_add_16(p0, p1))); 334 335 // Primary far taps 336 tap[2] = 337 v256_from_v64(v64_load_unaligned(&in[(i + 0) * CDEF_BSTRIDE + po2]), 338 v64_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE + po2]), 339 v64_load_unaligned(&in[(i + 2) * CDEF_BSTRIDE + po2]), 340 v64_load_unaligned(&in[(i + 3) * CDEF_BSTRIDE + po2])); 341 p0 = constrain16(tap[2], row, pri_strength, pri_damping); 342 tap[3] = 343 v256_from_v64(v64_load_unaligned(&in[(i + 0) * CDEF_BSTRIDE - po2]), 344 v64_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE - po2]), 345 v64_load_unaligned(&in[(i + 2) * CDEF_BSTRIDE - po2]), 346 v64_load_unaligned(&in[(i + 3) * CDEF_BSTRIDE - po2])); 347 p1 = constrain16(tap[3], row, pri_strength, pri_damping); 348 349 // sum += pri_taps[1] * (p0 + p1) 350 sum = v256_add_16( 351 sum, v256_mullo_s16(v256_dup_16(pri_taps[1]), v256_add_16(p0, p1))); 352 if (clipping_required) { 353 max = get_max_primary(is_lowbd, tap, max, cdef_large_value_mask); 354 355 min = v256_min_s16(min, tap[0]); 356 min = v256_min_s16(min, tap[1]); 357 min = v256_min_s16(min, tap[2]); 358 min = v256_min_s16(min, tap[3]); 359 } 360 } 361 362 if (enable_secondary) { 363 v256 tap[8]; 364 // Secondary near taps 365 tap[0] = 366 v256_from_v64(v64_load_unaligned(&in[(i + 0) * CDEF_BSTRIDE + s1o1]), 367 v64_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE + s1o1]), 368 v64_load_unaligned(&in[(i + 2) * CDEF_BSTRIDE + s1o1]), 369 v64_load_unaligned(&in[(i + 3) * CDEF_BSTRIDE + s1o1])); 370 p0 = constrain16(tap[0], row, sec_strength, sec_damping); 371 tap[1] = 372 v256_from_v64(v64_load_unaligned(&in[(i + 0) * CDEF_BSTRIDE - s1o1]), 373 v64_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE - s1o1]), 374 v64_load_unaligned(&in[(i + 2) * CDEF_BSTRIDE - s1o1]), 375 v64_load_unaligned(&in[(i + 3) * CDEF_BSTRIDE - s1o1])); 376 p1 = constrain16(tap[1], row, sec_strength, sec_damping); 377 tap[2] = 378 v256_from_v64(v64_load_unaligned(&in[(i + 0) * CDEF_BSTRIDE + s2o1]), 379 v64_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE + s2o1]), 380 v64_load_unaligned(&in[(i + 2) * CDEF_BSTRIDE + s2o1]), 381 v64_load_unaligned(&in[(i + 3) * CDEF_BSTRIDE + s2o1])); 382 p2 = constrain16(tap[2], row, sec_strength, sec_damping); 383 tap[3] = 384 v256_from_v64(v64_load_unaligned(&in[(i + 0) * CDEF_BSTRIDE - s2o1]), 385 v64_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE - s2o1]), 386 v64_load_unaligned(&in[(i + 2) * CDEF_BSTRIDE - s2o1]), 387 v64_load_unaligned(&in[(i + 3) * CDEF_BSTRIDE - s2o1])); 388 p3 = constrain16(tap[3], row, sec_strength, sec_damping); 389 390 // sum += sec_taps[0] * (p0 + p1 + p2 + p3) 391 sum = v256_add_16(sum, v256_mullo_s16(v256_dup_16(sec_taps[0]), 392 v256_add_16(v256_add_16(p0, p1), 393 v256_add_16(p2, p3)))); 394 395 // Secondary far taps 396 tap[4] = 397 v256_from_v64(v64_load_unaligned(&in[(i + 0) * CDEF_BSTRIDE + s1o2]), 398 v64_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE + s1o2]), 399 v64_load_unaligned(&in[(i + 2) * CDEF_BSTRIDE + s1o2]), 400 v64_load_unaligned(&in[(i + 3) * CDEF_BSTRIDE + s1o2])); 401 p0 = constrain16(tap[4], row, sec_strength, sec_damping); 402 tap[5] = 403 v256_from_v64(v64_load_unaligned(&in[(i + 0) * CDEF_BSTRIDE - s1o2]), 404 v64_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE - s1o2]), 405 v64_load_unaligned(&in[(i + 2) * CDEF_BSTRIDE - s1o2]), 406 v64_load_unaligned(&in[(i + 3) * CDEF_BSTRIDE - s1o2])); 407 p1 = constrain16(tap[5], row, sec_strength, sec_damping); 408 tap[6] = 409 v256_from_v64(v64_load_unaligned(&in[(i + 0) * CDEF_BSTRIDE + s2o2]), 410 v64_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE + s2o2]), 411 v64_load_unaligned(&in[(i + 2) * CDEF_BSTRIDE + s2o2]), 412 v64_load_unaligned(&in[(i + 3) * CDEF_BSTRIDE + s2o2])); 413 p2 = constrain16(tap[6], row, sec_strength, sec_damping); 414 tap[7] = 415 v256_from_v64(v64_load_unaligned(&in[(i + 0) * CDEF_BSTRIDE - s2o2]), 416 v64_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE - s2o2]), 417 v64_load_unaligned(&in[(i + 2) * CDEF_BSTRIDE - s2o2]), 418 v64_load_unaligned(&in[(i + 3) * CDEF_BSTRIDE - s2o2])); 419 p3 = constrain16(tap[7], row, sec_strength, sec_damping); 420 421 // sum += sec_taps[1] * (p0 + p1 + p2 + p3) 422 sum = v256_add_16(sum, v256_mullo_s16(v256_dup_16(sec_taps[1]), 423 v256_add_16(v256_add_16(p0, p1), 424 v256_add_16(p2, p3)))); 425 426 if (clipping_required) { 427 max = get_max_secondary(is_lowbd, tap, max, cdef_large_value_mask); 428 429 min = v256_min_s16(min, tap[0]); 430 min = v256_min_s16(min, tap[1]); 431 min = v256_min_s16(min, tap[2]); 432 min = v256_min_s16(min, tap[3]); 433 min = v256_min_s16(min, tap[4]); 434 min = v256_min_s16(min, tap[5]); 435 min = v256_min_s16(min, tap[6]); 436 min = v256_min_s16(min, tap[7]); 437 } 438 } 439 440 // res = row + ((sum - (sum < 0) + 8) >> 4) 441 sum = v256_add_16(sum, v256_cmplt_s16(sum, v256_zero())); 442 res = v256_add_16(sum, v256_dup_16(8)); 443 res = v256_shr_n_s16(res, 4); 444 res = v256_add_16(row, res); 445 if (clipping_required) { 446 res = v256_min_s16(v256_max_s16(res, min), max); 447 } 448 449 if (is_lowbd) { 450 const v128 res_128 = v256_low_v128(v256_pack_s16_u8(res, res)); 451 u32_store_aligned(&dst8[(i + 0) * dstride], 452 v64_high_u32(v128_high_v64(res_128))); 453 u32_store_aligned(&dst8[(i + 1) * dstride], 454 v64_low_u32(v128_high_v64(res_128))); 455 u32_store_aligned(&dst8[(i + 2) * dstride], 456 v64_high_u32(v128_low_v64(res_128))); 457 u32_store_aligned(&dst8[(i + 3) * dstride], 458 v64_low_u32(v128_low_v64(res_128))); 459 } else { 460 v64_store_aligned(&dst16[(i + 0) * dstride], 461 v128_high_v64(v256_high_v128(res))); 462 v64_store_aligned(&dst16[(i + 1) * dstride], 463 v128_low_v64(v256_high_v128(res))); 464 v64_store_aligned(&dst16[(i + 2) * dstride], 465 v128_high_v64(v256_low_v128(res))); 466 v64_store_aligned(&dst16[(i + 3) * dstride], 467 v128_low_v64(v256_low_v128(res))); 468 } 469 } 470 } 471 472 CDEF_INLINE void filter_block_8x8(const int is_lowbd, void *dest, int dstride, 473 const uint16_t *in, int pri_strength, 474 int sec_strength, int dir, int pri_damping, 475 int sec_damping, int coeff_shift, int height, 476 int enable_primary, int enable_secondary) { 477 uint8_t *dst8 = (uint8_t *)dest; 478 uint16_t *dst16 = (uint16_t *)dest; 479 const int clipping_required = enable_primary && enable_secondary; 480 int i; 481 v256 sum, p0, p1, p2, p3, row, res; 482 const v256 cdef_large_value_mask = v256_dup_16((uint16_t)~CDEF_VERY_LARGE); 483 v256 max, min; 484 const int po1 = cdef_directions[dir][0]; 485 const int po2 = cdef_directions[dir][1]; 486 const int s1o1 = cdef_directions[dir + 2][0]; 487 const int s1o2 = cdef_directions[dir + 2][1]; 488 const int s2o1 = cdef_directions[dir - 2][0]; 489 const int s2o2 = cdef_directions[dir - 2][1]; 490 const int *pri_taps = cdef_pri_taps[(pri_strength >> coeff_shift) & 1]; 491 const int *sec_taps = cdef_sec_taps; 492 493 if (enable_primary && pri_strength) 494 pri_damping = AOMMAX(0, pri_damping - get_msb(pri_strength)); 495 if (enable_secondary && sec_strength) 496 sec_damping = AOMMAX(0, sec_damping - get_msb(sec_strength)); 497 498 for (i = 0; i < height; i += 2) { 499 v256 tap[8]; 500 sum = v256_zero(); 501 row = v256_from_v128(v128_load_aligned(&in[i * CDEF_BSTRIDE]), 502 v128_load_aligned(&in[(i + 1) * CDEF_BSTRIDE])); 503 504 min = max = row; 505 if (enable_primary) { 506 // Primary near taps 507 tap[0] = v256_from_v128( 508 v128_load_unaligned(&in[i * CDEF_BSTRIDE + po1]), 509 v128_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE + po1])); 510 tap[1] = v256_from_v128( 511 v128_load_unaligned(&in[i * CDEF_BSTRIDE - po1]), 512 v128_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE - po1])); 513 p0 = constrain16(tap[0], row, pri_strength, pri_damping); 514 p1 = constrain16(tap[1], row, pri_strength, pri_damping); 515 516 // sum += pri_taps[0] * (p0 + p1) 517 sum = v256_add_16( 518 sum, v256_mullo_s16(v256_dup_16(pri_taps[0]), v256_add_16(p0, p1))); 519 520 // Primary far taps 521 tap[2] = v256_from_v128( 522 v128_load_unaligned(&in[i * CDEF_BSTRIDE + po2]), 523 v128_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE + po2])); 524 tap[3] = v256_from_v128( 525 v128_load_unaligned(&in[i * CDEF_BSTRIDE - po2]), 526 v128_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE - po2])); 527 p0 = constrain16(tap[2], row, pri_strength, pri_damping); 528 p1 = constrain16(tap[3], row, pri_strength, pri_damping); 529 530 // sum += pri_taps[1] * (p0 + p1) 531 sum = v256_add_16( 532 sum, v256_mullo_s16(v256_dup_16(pri_taps[1]), v256_add_16(p0, p1))); 533 534 if (clipping_required) { 535 max = get_max_primary(is_lowbd, tap, max, cdef_large_value_mask); 536 537 min = v256_min_s16(min, tap[0]); 538 min = v256_min_s16(min, tap[1]); 539 min = v256_min_s16(min, tap[2]); 540 min = v256_min_s16(min, tap[3]); 541 } 542 // End primary 543 } 544 545 if (enable_secondary) { 546 // Secondary near taps 547 tap[0] = v256_from_v128( 548 v128_load_unaligned(&in[i * CDEF_BSTRIDE + s1o1]), 549 v128_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE + s1o1])); 550 tap[1] = v256_from_v128( 551 v128_load_unaligned(&in[i * CDEF_BSTRIDE - s1o1]), 552 v128_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE - s1o1])); 553 tap[2] = v256_from_v128( 554 v128_load_unaligned(&in[i * CDEF_BSTRIDE + s2o1]), 555 v128_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE + s2o1])); 556 tap[3] = v256_from_v128( 557 v128_load_unaligned(&in[i * CDEF_BSTRIDE - s2o1]), 558 v128_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE - s2o1])); 559 p0 = constrain16(tap[0], row, sec_strength, sec_damping); 560 p1 = constrain16(tap[1], row, sec_strength, sec_damping); 561 p2 = constrain16(tap[2], row, sec_strength, sec_damping); 562 p3 = constrain16(tap[3], row, sec_strength, sec_damping); 563 564 // sum += sec_taps[0] * (p0 + p1 + p2 + p3) 565 sum = v256_add_16(sum, v256_mullo_s16(v256_dup_16(sec_taps[0]), 566 v256_add_16(v256_add_16(p0, p1), 567 v256_add_16(p2, p3)))); 568 569 // Secondary far taps 570 tap[4] = v256_from_v128( 571 v128_load_unaligned(&in[i * CDEF_BSTRIDE + s1o2]), 572 v128_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE + s1o2])); 573 tap[5] = v256_from_v128( 574 v128_load_unaligned(&in[i * CDEF_BSTRIDE - s1o2]), 575 v128_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE - s1o2])); 576 tap[6] = v256_from_v128( 577 v128_load_unaligned(&in[i * CDEF_BSTRIDE + s2o2]), 578 v128_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE + s2o2])); 579 tap[7] = v256_from_v128( 580 v128_load_unaligned(&in[i * CDEF_BSTRIDE - s2o2]), 581 v128_load_unaligned(&in[(i + 1) * CDEF_BSTRIDE - s2o2])); 582 p0 = constrain16(tap[4], row, sec_strength, sec_damping); 583 p1 = constrain16(tap[5], row, sec_strength, sec_damping); 584 p2 = constrain16(tap[6], row, sec_strength, sec_damping); 585 p3 = constrain16(tap[7], row, sec_strength, sec_damping); 586 587 // sum += sec_taps[1] * (p0 + p1 + p2 + p3) 588 sum = v256_add_16(sum, v256_mullo_s16(v256_dup_16(sec_taps[1]), 589 v256_add_16(v256_add_16(p0, p1), 590 v256_add_16(p2, p3)))); 591 592 if (clipping_required) { 593 max = get_max_secondary(is_lowbd, tap, max, cdef_large_value_mask); 594 595 min = v256_min_s16(min, tap[0]); 596 min = v256_min_s16(min, tap[1]); 597 min = v256_min_s16(min, tap[2]); 598 min = v256_min_s16(min, tap[3]); 599 min = v256_min_s16(min, tap[4]); 600 min = v256_min_s16(min, tap[5]); 601 min = v256_min_s16(min, tap[6]); 602 min = v256_min_s16(min, tap[7]); 603 } 604 // End secondary 605 } 606 607 // res = row + ((sum - (sum < 0) + 8) >> 4) 608 sum = v256_add_16(sum, v256_cmplt_s16(sum, v256_zero())); 609 res = v256_add_16(sum, v256_dup_16(8)); 610 res = v256_shr_n_s16(res, 4); 611 res = v256_add_16(row, res); 612 if (clipping_required) { 613 res = v256_min_s16(v256_max_s16(res, min), max); 614 } 615 616 if (is_lowbd) { 617 const v128 res_128 = v256_low_v128(v256_pack_s16_u8(res, res)); 618 v64_store_aligned(&dst8[i * dstride], v128_high_v64(res_128)); 619 v64_store_aligned(&dst8[(i + 1) * dstride], v128_low_v64(res_128)); 620 } else { 621 v128_store_unaligned(&dst16[i * dstride], v256_high_v128(res)); 622 v128_store_unaligned(&dst16[(i + 1) * dstride], v256_low_v128(res)); 623 } 624 } 625 } 626 627 #if defined(_MSC_VER) && !defined(__clang__) 628 #pragma optimize("", on) 629 #endif 630 631 SIMD_INLINE void copy_block_4xh(const int is_lowbd, void *dest, int dstride, 632 const uint16_t *in, int height) { 633 uint8_t *dst8 = (uint8_t *)dest; 634 uint16_t *dst16 = (uint16_t *)dest; 635 int i; 636 for (i = 0; i < height; i += 4) { 637 const v128 row0 = 638 v128_from_v64(v64_load_aligned(&in[(i + 0) * CDEF_BSTRIDE]), 639 v64_load_aligned(&in[(i + 1) * CDEF_BSTRIDE])); 640 const v128 row1 = 641 v128_from_v64(v64_load_aligned(&in[(i + 2) * CDEF_BSTRIDE]), 642 v64_load_aligned(&in[(i + 3) * CDEF_BSTRIDE])); 643 if (is_lowbd) { 644 /* Note: v128_pack_s16_u8(). The parameter order is swapped internally */ 645 const v128 res_128 = v128_pack_s16_u8(row1, row0); 646 u32_store_aligned(&dst8[(i + 0) * dstride], 647 v64_high_u32(v128_low_v64(res_128))); 648 u32_store_aligned(&dst8[(i + 1) * dstride], 649 v64_low_u32(v128_low_v64(res_128))); 650 u32_store_aligned(&dst8[(i + 2) * dstride], 651 v64_high_u32(v128_high_v64(res_128))); 652 u32_store_aligned(&dst8[(i + 3) * dstride], 653 v64_low_u32(v128_high_v64(res_128))); 654 } else { 655 v64_store_aligned(&dst16[(i + 0) * dstride], v128_high_v64(row0)); 656 v64_store_aligned(&dst16[(i + 1) * dstride], v128_low_v64(row0)); 657 v64_store_aligned(&dst16[(i + 2) * dstride], v128_high_v64(row1)); 658 v64_store_aligned(&dst16[(i + 3) * dstride], v128_low_v64(row1)); 659 } 660 } 661 } 662 663 SIMD_INLINE void copy_block_8xh(const int is_lowbd, void *dest, int dstride, 664 const uint16_t *in, int height) { 665 uint8_t *dst8 = (uint8_t *)dest; 666 uint16_t *dst16 = (uint16_t *)dest; 667 int i; 668 for (i = 0; i < height; i += 2) { 669 const v128 row0 = v128_load_aligned(&in[i * CDEF_BSTRIDE]); 670 const v128 row1 = v128_load_aligned(&in[(i + 1) * CDEF_BSTRIDE]); 671 if (is_lowbd) { 672 /* Note: v128_pack_s16_u8(). The parameter order is swapped internally */ 673 const v128 res_128 = v128_pack_s16_u8(row1, row0); 674 v64_store_aligned(&dst8[i * dstride], v128_low_v64(res_128)); 675 v64_store_aligned(&dst8[(i + 1) * dstride], v128_high_v64(res_128)); 676 } else { 677 v128_store_unaligned(&dst16[i * dstride], row0); 678 v128_store_unaligned(&dst16[(i + 1) * dstride], row1); 679 } 680 } 681 } 682 683 void SIMD_FUNC(cdef_filter_8_0)(void *dest, int dstride, const uint16_t *in, 684 int pri_strength, int sec_strength, int dir, 685 int pri_damping, int sec_damping, 686 int coeff_shift, int block_width, 687 int block_height) { 688 if (block_width == 8) { 689 filter_block_8x8(/*is_lowbd=*/1, dest, dstride, in, pri_strength, 690 sec_strength, dir, pri_damping, sec_damping, coeff_shift, 691 block_height, /*enable_primary=*/1, 692 /*enable_secondary=*/1); 693 } else { 694 filter_block_4x4(/*is_lowbd=*/1, dest, dstride, in, pri_strength, 695 sec_strength, dir, pri_damping, sec_damping, coeff_shift, 696 block_height, /*enable_primary=*/1, 697 /*enable_secondary=*/1); 698 } 699 } 700 701 void SIMD_FUNC(cdef_filter_8_1)(void *dest, int dstride, const uint16_t *in, 702 int pri_strength, int sec_strength, int dir, 703 int pri_damping, int sec_damping, 704 int coeff_shift, int block_width, 705 int block_height) { 706 if (block_width == 8) { 707 filter_block_8x8(/*is_lowbd=*/1, dest, dstride, in, pri_strength, 708 sec_strength, dir, pri_damping, sec_damping, coeff_shift, 709 block_height, /*enable_primary=*/1, 710 /*enable_secondary=*/0); 711 } else { 712 filter_block_4x4(/*is_lowbd=*/1, dest, dstride, in, pri_strength, 713 sec_strength, dir, pri_damping, sec_damping, coeff_shift, 714 block_height, /*enable_primary=*/1, 715 /*enable_secondary=*/0); 716 } 717 } 718 void SIMD_FUNC(cdef_filter_8_2)(void *dest, int dstride, const uint16_t *in, 719 int pri_strength, int sec_strength, int dir, 720 int pri_damping, int sec_damping, 721 int coeff_shift, int block_width, 722 int block_height) { 723 if (block_width == 8) { 724 filter_block_8x8(/*is_lowbd=*/1, dest, dstride, in, pri_strength, 725 sec_strength, dir, pri_damping, sec_damping, coeff_shift, 726 block_height, /*enable_primary=*/0, 727 /*enable_secondary=*/1); 728 } else { 729 filter_block_4x4(/*is_lowbd=*/1, dest, dstride, in, pri_strength, 730 sec_strength, dir, pri_damping, sec_damping, coeff_shift, 731 block_height, /*enable_primary=*/0, 732 /*enable_secondary=*/1); 733 } 734 } 735 736 void SIMD_FUNC(cdef_filter_8_3)(void *dest, int dstride, const uint16_t *in, 737 int pri_strength, int sec_strength, int dir, 738 int pri_damping, int sec_damping, 739 int coeff_shift, int block_width, 740 int block_height) { 741 (void)pri_strength; 742 (void)sec_strength; 743 (void)dir; 744 (void)pri_damping; 745 (void)sec_damping; 746 (void)coeff_shift; 747 (void)block_width; 748 749 if (block_width == 8) { 750 copy_block_8xh(/*is_lowbd=*/1, dest, dstride, in, block_height); 751 } else { 752 copy_block_4xh(/*is_lowbd=*/1, dest, dstride, in, block_height); 753 } 754 } 755 756 void SIMD_FUNC(cdef_filter_16_0)(void *dest, int dstride, const uint16_t *in, 757 int pri_strength, int sec_strength, int dir, 758 int pri_damping, int sec_damping, 759 int coeff_shift, int block_width, 760 int block_height) { 761 if (block_width == 8) { 762 filter_block_8x8(/*is_lowbd=*/0, dest, dstride, in, pri_strength, 763 sec_strength, dir, pri_damping, sec_damping, coeff_shift, 764 block_height, /*enable_primary=*/1, 765 /*enable_secondary=*/1); 766 } else { 767 filter_block_4x4(/*is_lowbd=*/0, dest, dstride, in, pri_strength, 768 sec_strength, dir, pri_damping, sec_damping, coeff_shift, 769 block_height, /*enable_primary=*/1, 770 /*enable_secondary=*/1); 771 } 772 } 773 774 void SIMD_FUNC(cdef_filter_16_1)(void *dest, int dstride, const uint16_t *in, 775 int pri_strength, int sec_strength, int dir, 776 int pri_damping, int sec_damping, 777 int coeff_shift, int block_width, 778 int block_height) { 779 if (block_width == 8) { 780 filter_block_8x8(/*is_lowbd=*/0, dest, dstride, in, pri_strength, 781 sec_strength, dir, pri_damping, sec_damping, coeff_shift, 782 block_height, /*enable_primary=*/1, 783 /*enable_secondary=*/0); 784 } else { 785 filter_block_4x4(/*is_lowbd=*/0, dest, dstride, in, pri_strength, 786 sec_strength, dir, pri_damping, sec_damping, coeff_shift, 787 block_height, /*enable_primary=*/1, 788 /*enable_secondary=*/0); 789 } 790 } 791 void SIMD_FUNC(cdef_filter_16_2)(void *dest, int dstride, const uint16_t *in, 792 int pri_strength, int sec_strength, int dir, 793 int pri_damping, int sec_damping, 794 int coeff_shift, int block_width, 795 int block_height) { 796 if (block_width == 8) { 797 filter_block_8x8(/*is_lowbd=*/0, dest, dstride, in, pri_strength, 798 sec_strength, dir, pri_damping, sec_damping, coeff_shift, 799 block_height, /*enable_primary=*/0, 800 /*enable_secondary=*/1); 801 } else { 802 filter_block_4x4(/*is_lowbd=*/0, dest, dstride, in, pri_strength, 803 sec_strength, dir, pri_damping, sec_damping, coeff_shift, 804 block_height, /*enable_primary=*/0, 805 /*enable_secondary=*/1); 806 } 807 } 808 809 void SIMD_FUNC(cdef_filter_16_3)(void *dest, int dstride, const uint16_t *in, 810 int pri_strength, int sec_strength, int dir, 811 int pri_damping, int sec_damping, 812 int coeff_shift, int block_width, 813 int block_height) { 814 (void)pri_strength; 815 (void)sec_strength; 816 (void)dir; 817 (void)pri_damping; 818 (void)sec_damping; 819 (void)coeff_shift; 820 (void)block_width; 821 if (block_width == 8) { 822 copy_block_8xh(/*is_lowbd=*/0, dest, dstride, in, block_height); 823 } else { 824 copy_block_4xh(/*is_lowbd=*/0, dest, dstride, in, block_height); 825 } 826 } 827 828 #if CONFIG_AV1_HIGHBITDEPTH 829 void SIMD_FUNC(cdef_copy_rect8_16bit_to_16bit)(uint16_t *dst, int dstride, 830 const uint16_t *src, int sstride, 831 int width, int height) { 832 int i, j; 833 for (i = 0; i < height; i++) { 834 for (j = 0; j < (width & ~0x7); j += 8) { 835 v128 row = v128_load_unaligned(&src[i * sstride + j]); 836 v128_store_unaligned(&dst[i * dstride + j], row); 837 } 838 for (; j < width; j++) { 839 dst[i * dstride + j] = src[i * sstride + j]; 840 } 841 } 842 } 843 #endif // CONFIG_AV1_HIGHBITDEPTH 844 845 #undef CDEF_INLINE 846 847 #endif // AOM_AV1_COMMON_CDEF_BLOCK_SIMD_H_