vpx_convolve8_neon_i8mm.c (36254B)
1 /* 2 * Copyright (c) 2023 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include <arm_neon.h> 12 #include <assert.h> 13 14 #include "./vpx_config.h" 15 #include "./vpx_dsp_rtcd.h" 16 #include "vpx/vpx_integer.h" 17 #include "vpx_dsp/arm/mem_neon.h" 18 #include "vpx_dsp/arm/transpose_neon.h" 19 #include "vpx_dsp/arm/vpx_convolve8_neon.h" 20 #include "vpx_dsp/vpx_filter.h" 21 #include "vpx_ports/mem.h" 22 23 DECLARE_ALIGNED(16, static const uint8_t, dot_prod_permute_tbl[48]) = { 24 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6, 25 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, 26 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 27 }; 28 29 DECLARE_ALIGNED(16, static const uint8_t, dot_prod_merge_block_tbl[48]) = { 30 // Shift left and insert new last column in transposed 4x4 block. 31 1, 2, 3, 16, 5, 6, 7, 20, 9, 10, 11, 24, 13, 14, 15, 28, 32 // Shift left and insert two new columns in transposed 4x4 block. 33 2, 3, 16, 17, 6, 7, 20, 21, 10, 11, 24, 25, 14, 15, 28, 29, 34 // Shift left and insert three new columns in transposed 4x4 block. 35 3, 16, 17, 18, 7, 20, 21, 22, 11, 24, 25, 26, 15, 28, 29, 30 36 }; 37 38 static INLINE int16x4_t convolve4_4_h(const uint8x16_t samples, 39 const int8x8_t filters, 40 const uint8x16_t permute_tbl) { 41 // Permute samples ready for dot product. 42 // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } 43 uint8x16_t permuted_samples = vqtbl1q_u8(samples, permute_tbl); 44 45 int32x4_t sum = 46 vusdotq_lane_s32(vdupq_n_s32(0), permuted_samples, filters, 0); 47 48 // Further narrowing and packing is performed by the caller. 49 return vmovn_s32(sum); 50 } 51 52 static INLINE uint8x8_t convolve4_8_h(const uint8x16_t samples, 53 const int8x8_t filters, 54 const uint8x16x2_t permute_tbl) { 55 // Permute samples ready for dot product. 56 // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } 57 // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } 58 uint8x16_t permuted_samples[2] = { vqtbl1q_u8(samples, permute_tbl.val[0]), 59 vqtbl1q_u8(samples, permute_tbl.val[1]) }; 60 61 // First 4 output values. 62 int32x4_t sum0 = 63 vusdotq_lane_s32(vdupq_n_s32(0), permuted_samples[0], filters, 0); 64 // Second 4 output values. 65 int32x4_t sum1 = 66 vusdotq_lane_s32(vdupq_n_s32(0), permuted_samples[1], filters, 0); 67 68 // Narrow and re-pack. 69 int16x8_t sum = vcombine_s16(vmovn_s32(sum0), vmovn_s32(sum1)); 70 // We halved the filter values so -1 from right shift. 71 return vqrshrun_n_s16(sum, FILTER_BITS - 1); 72 } 73 74 static INLINE int16x4_t convolve8_4_h(const uint8x16_t samples, 75 const int8x8_t filters, 76 const uint8x16x2_t permute_tbl) { 77 // Permute samples ready for dot product. 78 // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } 79 // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } 80 uint8x16_t permuted_samples[2] = { vqtbl1q_u8(samples, permute_tbl.val[0]), 81 vqtbl1q_u8(samples, permute_tbl.val[1]) }; 82 83 int32x4_t sum = 84 vusdotq_lane_s32(vdupq_n_s32(0), permuted_samples[0], filters, 0); 85 sum = vusdotq_lane_s32(sum, permuted_samples[1], filters, 1); 86 87 // Further narrowing and packing is performed by the caller. 88 return vshrn_n_s32(sum, 1); 89 } 90 91 static INLINE uint8x8_t convolve8_8_h(const uint8x16_t samples, 92 const int8x8_t filters, 93 const uint8x16x3_t permute_tbl) { 94 // Permute samples ready for dot product. 95 // { 0, 1, 2, 3, 1, 2, 3, 4, 2, 3, 4, 5, 3, 4, 5, 6 } 96 // { 4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10 } 97 // { 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 } 98 uint8x16_t permuted_samples[3] = { vqtbl1q_u8(samples, permute_tbl.val[0]), 99 vqtbl1q_u8(samples, permute_tbl.val[1]), 100 vqtbl1q_u8(samples, permute_tbl.val[2]) }; 101 102 // First 4 output values. 103 int32x4_t sum0 = 104 vusdotq_lane_s32(vdupq_n_s32(0), permuted_samples[0], filters, 0); 105 sum0 = vusdotq_lane_s32(sum0, permuted_samples[1], filters, 1); 106 // Second 4 output values. 107 int32x4_t sum1 = 108 vusdotq_lane_s32(vdupq_n_s32(0), permuted_samples[1], filters, 0); 109 sum1 = vusdotq_lane_s32(sum1, permuted_samples[2], filters, 1); 110 111 // Narrow and re-pack. 112 int16x8_t sum = vcombine_s16(vshrn_n_s32(sum0, 1), vshrn_n_s32(sum1, 1)); 113 return vqrshrun_n_s16(sum, FILTER_BITS - 1); 114 } 115 116 static INLINE void convolve_4tap_horiz_neon_i8mm(const uint8_t *src, 117 ptrdiff_t src_stride, 118 uint8_t *dst, 119 ptrdiff_t dst_stride, int w, 120 int h, const int8x8_t filter) { 121 if (w == 4) { 122 const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl); 123 124 do { 125 uint8x16_t s0, s1, s2, s3; 126 load_u8_16x4(src, src_stride, &s0, &s1, &s2, &s3); 127 128 int16x4_t t0 = convolve4_4_h(s0, filter, permute_tbl); 129 int16x4_t t1 = convolve4_4_h(s1, filter, permute_tbl); 130 int16x4_t t2 = convolve4_4_h(s2, filter, permute_tbl); 131 int16x4_t t3 = convolve4_4_h(s3, filter, permute_tbl); 132 // We halved the filter values so -1 from right shift. 133 uint8x8_t d01 = vqrshrun_n_s16(vcombine_s16(t0, t1), FILTER_BITS - 1); 134 uint8x8_t d23 = vqrshrun_n_s16(vcombine_s16(t2, t3), FILTER_BITS - 1); 135 136 store_u8(dst + 0 * dst_stride, dst_stride, d01); 137 store_u8(dst + 2 * dst_stride, dst_stride, d23); 138 139 src += 4 * src_stride; 140 dst += 4 * dst_stride; 141 h -= 4; 142 } while (h != 0); 143 } else { 144 const uint8x16x2_t permute_tbl = vld1q_u8_x2(dot_prod_permute_tbl); 145 146 do { 147 const uint8_t *s = src; 148 uint8_t *d = dst; 149 int width = w; 150 151 do { 152 uint8x16_t s0, s1, s2, s3; 153 load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3); 154 155 uint8x8_t d0 = convolve4_8_h(s0, filter, permute_tbl); 156 uint8x8_t d1 = convolve4_8_h(s1, filter, permute_tbl); 157 uint8x8_t d2 = convolve4_8_h(s2, filter, permute_tbl); 158 uint8x8_t d3 = convolve4_8_h(s3, filter, permute_tbl); 159 160 store_u8_8x4(d, dst_stride, d0, d1, d2, d3); 161 162 s += 8; 163 d += 8; 164 width -= 8; 165 } while (width != 0); 166 src += 4 * src_stride; 167 dst += 4 * dst_stride; 168 h -= 4; 169 } while (h != 0); 170 } 171 } 172 173 static INLINE void convolve_8tap_horiz_neon_i8mm(const uint8_t *src, 174 ptrdiff_t src_stride, 175 uint8_t *dst, 176 ptrdiff_t dst_stride, int w, 177 int h, const int8x8_t filter) { 178 if (w == 4) { 179 const uint8x16x2_t permute_tbl = vld1q_u8_x2(dot_prod_permute_tbl); 180 181 do { 182 uint8x16_t s0, s1, s2, s3; 183 load_u8_16x4(src, src_stride, &s0, &s1, &s2, &s3); 184 185 int16x4_t t0 = convolve8_4_h(s0, filter, permute_tbl); 186 int16x4_t t1 = convolve8_4_h(s1, filter, permute_tbl); 187 int16x4_t t2 = convolve8_4_h(s2, filter, permute_tbl); 188 int16x4_t t3 = convolve8_4_h(s3, filter, permute_tbl); 189 uint8x8_t d01 = vqrshrun_n_s16(vcombine_s16(t0, t1), FILTER_BITS - 1); 190 uint8x8_t d23 = vqrshrun_n_s16(vcombine_s16(t2, t3), FILTER_BITS - 1); 191 192 store_u8(dst + 0 * dst_stride, dst_stride, d01); 193 store_u8(dst + 2 * dst_stride, dst_stride, d23); 194 195 src += 4 * src_stride; 196 dst += 4 * dst_stride; 197 h -= 4; 198 } while (h != 0); 199 } else { 200 const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); 201 202 do { 203 const uint8_t *s = src; 204 uint8_t *d = dst; 205 int width = w; 206 207 do { 208 uint8x16_t s0, s1, s2, s3; 209 load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3); 210 211 uint8x8_t d0 = convolve8_8_h(s0, filter, permute_tbl); 212 uint8x8_t d1 = convolve8_8_h(s1, filter, permute_tbl); 213 uint8x8_t d2 = convolve8_8_h(s2, filter, permute_tbl); 214 uint8x8_t d3 = convolve8_8_h(s3, filter, permute_tbl); 215 216 store_u8_8x4(d, dst_stride, d0, d1, d2, d3); 217 218 s += 8; 219 d += 8; 220 width -= 8; 221 } while (width != 0); 222 src += 4 * src_stride; 223 dst += 4 * dst_stride; 224 h -= 4; 225 } while (h != 0); 226 } 227 } 228 229 void vpx_convolve8_horiz_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride, 230 uint8_t *dst, ptrdiff_t dst_stride, 231 const InterpKernel *filter, int x0_q4, 232 int x_step_q4, int y0_q4, int y_step_q4, 233 int w, int h) { 234 assert((intptr_t)dst % 4 == 0); 235 assert(dst_stride % 4 == 0); 236 assert(x_step_q4 == 16); 237 238 (void)x_step_q4; 239 (void)y0_q4; 240 (void)y_step_q4; 241 242 if (vpx_get_filter_taps(filter[x0_q4]) <= 4) { 243 // Load 4-tap filter into first 4 elements of the vector. 244 // All 4-tap and bilinear filter values are even, so halve them to reduce 245 // intermediate precision requirements. 246 const int16x4_t x_filter = vld1_s16(filter[x0_q4] + 2); 247 const int8x8_t x_filter_4tap = 248 vshrn_n_s16(vcombine_s16(x_filter, vdup_n_s16(0)), 1); 249 250 convolve_4tap_horiz_neon_i8mm(src - 1, src_stride, dst, dst_stride, w, h, 251 x_filter_4tap); 252 253 } else { 254 const int8x8_t x_filter_8tap = vmovn_s16(vld1q_s16(filter[x0_q4])); 255 256 convolve_8tap_horiz_neon_i8mm(src - 3, src_stride, dst, dst_stride, w, h, 257 x_filter_8tap); 258 } 259 } 260 261 void vpx_convolve8_avg_horiz_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride, 262 uint8_t *dst, ptrdiff_t dst_stride, 263 const InterpKernel *filter, int x0_q4, 264 int x_step_q4, int y0_q4, int y_step_q4, 265 int w, int h) { 266 const int8x8_t filters = vmovn_s16(vld1q_s16(filter[x0_q4])); 267 268 assert((intptr_t)dst % 4 == 0); 269 assert(dst_stride % 4 == 0); 270 assert(x_step_q4 == 16); 271 272 (void)x_step_q4; 273 (void)y0_q4; 274 (void)y_step_q4; 275 276 src -= 3; 277 278 if (w == 4) { 279 const uint8x16x2_t permute_tbl = vld1q_u8_x2(dot_prod_permute_tbl); 280 281 do { 282 uint8x16_t s0, s1, s2, s3; 283 load_u8_16x4(src, src_stride, &s0, &s1, &s2, &s3); 284 285 int16x4_t t0 = convolve8_4_h(s0, filters, permute_tbl); 286 int16x4_t t1 = convolve8_4_h(s1, filters, permute_tbl); 287 int16x4_t t2 = convolve8_4_h(s2, filters, permute_tbl); 288 int16x4_t t3 = convolve8_4_h(s3, filters, permute_tbl); 289 uint8x8_t d01 = vqrshrun_n_s16(vcombine_s16(t0, t1), FILTER_BITS - 1); 290 uint8x8_t d23 = vqrshrun_n_s16(vcombine_s16(t2, t3), FILTER_BITS - 1); 291 292 uint8x8_t dd01 = load_u8(dst + 0 * dst_stride, dst_stride); 293 uint8x8_t dd23 = load_u8(dst + 2 * dst_stride, dst_stride); 294 295 d01 = vrhadd_u8(d01, dd01); 296 d23 = vrhadd_u8(d23, dd23); 297 298 store_u8(dst + 0 * dst_stride, dst_stride, d01); 299 store_u8(dst + 2 * dst_stride, dst_stride, d23); 300 301 src += 4 * src_stride; 302 dst += 4 * dst_stride; 303 h -= 4; 304 } while (h != 0); 305 } else { 306 const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); 307 308 do { 309 const uint8_t *s = src; 310 uint8_t *d = dst; 311 int width = w; 312 313 do { 314 uint8x16_t s0, s1, s2, s3; 315 load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3); 316 317 uint8x8_t d0 = convolve8_8_h(s0, filters, permute_tbl); 318 uint8x8_t d1 = convolve8_8_h(s1, filters, permute_tbl); 319 uint8x8_t d2 = convolve8_8_h(s2, filters, permute_tbl); 320 uint8x8_t d3 = convolve8_8_h(s3, filters, permute_tbl); 321 322 uint8x8_t dd0, dd1, dd2, dd3; 323 load_u8_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); 324 325 d0 = vrhadd_u8(d0, dd0); 326 d1 = vrhadd_u8(d1, dd1); 327 d2 = vrhadd_u8(d2, dd2); 328 d3 = vrhadd_u8(d3, dd3); 329 330 store_u8_8x4(d, dst_stride, d0, d1, d2, d3); 331 332 s += 8; 333 d += 8; 334 width -= 8; 335 } while (width != 0); 336 src += 4 * src_stride; 337 dst += 4 * dst_stride; 338 h -= 4; 339 } while (h != 0); 340 } 341 } 342 343 static INLINE int16x4_t convolve8_4_v(const uint8x16_t samples_lo, 344 const uint8x16_t samples_hi, 345 const int8x8_t filters) { 346 // Sample permutation is performed by the caller. 347 int32x4_t sum = vusdotq_lane_s32(vdupq_n_s32(0), samples_lo, filters, 0); 348 sum = vusdotq_lane_s32(sum, samples_hi, filters, 1); 349 350 // Further narrowing and packing is performed by the caller. 351 return vshrn_n_s32(sum, 1); 352 } 353 354 static INLINE uint8x8_t convolve8_8_v(const uint8x16_t samples0_lo, 355 const uint8x16_t samples0_hi, 356 const uint8x16_t samples1_lo, 357 const uint8x16_t samples1_hi, 358 const int8x8_t filters) { 359 // Sample permutation is performed by the caller. 360 361 // First 4 output values. 362 int32x4_t sum0 = vusdotq_lane_s32(vdupq_n_s32(0), samples0_lo, filters, 0); 363 sum0 = vusdotq_lane_s32(sum0, samples0_hi, filters, 1); 364 // Second 4 output values. 365 int32x4_t sum1 = vusdotq_lane_s32(vdupq_n_s32(0), samples1_lo, filters, 0); 366 sum1 = vusdotq_lane_s32(sum1, samples1_hi, filters, 1); 367 368 // Narrow and re-pack. 369 int16x8_t sum = vcombine_s16(vshrn_n_s32(sum0, 1), vshrn_n_s32(sum1, 1)); 370 return vqrshrun_n_s16(sum, FILTER_BITS - 1); 371 } 372 373 static INLINE void convolve_8tap_vert_neon_i8mm(const uint8_t *src, 374 ptrdiff_t src_stride, 375 uint8_t *dst, 376 ptrdiff_t dst_stride, int w, 377 int h, const int8x8_t filter) { 378 const uint8x16x3_t merge_block_tbl = vld1q_u8_x3(dot_prod_merge_block_tbl); 379 if (w == 4) { 380 uint8x8_t s0, s1, s2, s3, s4, s5, s6; 381 load_u8_8x7(src, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); 382 src += 7 * src_stride; 383 384 // This operation combines a conventional transpose and the sample permute 385 // (see horizontal case) required before computing the dot product. 386 uint8x16_t s0123, s1234, s2345, s3456; 387 transpose_concat_u8_4x4(s0, s1, s2, s3, &s0123); 388 transpose_concat_u8_4x4(s1, s2, s3, s4, &s1234); 389 transpose_concat_u8_4x4(s2, s3, s4, s5, &s2345); 390 transpose_concat_u8_4x4(s3, s4, s5, s6, &s3456); 391 392 do { 393 uint8x8_t s7, s8, s9, s10; 394 load_u8_8x4(src, src_stride, &s7, &s8, &s9, &s10); 395 396 uint8x16_t s78910; 397 transpose_concat_u8_4x4(s7, s8, s9, s10, &s78910); 398 399 // Merge new data into block from previous iteration. 400 uint8x16x2_t samples_LUT = { { s3456, s78910 } }; 401 uint8x16_t s4567 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]); 402 uint8x16_t s5678 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]); 403 uint8x16_t s6789 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]); 404 405 int16x4_t d0 = convolve8_4_v(s0123, s4567, filter); 406 int16x4_t d1 = convolve8_4_v(s1234, s5678, filter); 407 int16x4_t d2 = convolve8_4_v(s2345, s6789, filter); 408 int16x4_t d3 = convolve8_4_v(s3456, s78910, filter); 409 uint8x8_t d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS - 1); 410 uint8x8_t d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), FILTER_BITS - 1); 411 412 store_u8(dst + 0 * dst_stride, dst_stride, d01); 413 store_u8(dst + 2 * dst_stride, dst_stride, d23); 414 415 // Prepare block for next iteration - re-using as much as possible. 416 // Shuffle everything up four rows. 417 s0123 = s4567; 418 s1234 = s5678; 419 s2345 = s6789; 420 s3456 = s78910; 421 422 src += 4 * src_stride; 423 dst += 4 * dst_stride; 424 h -= 4; 425 } while (h != 0); 426 } else { 427 do { 428 const uint8_t *s = src; 429 uint8_t *d = dst; 430 int height = h; 431 432 uint8x8_t s0, s1, s2, s3, s4, s5, s6; 433 load_u8_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); 434 s += 7 * src_stride; 435 436 // This operation combines a conventional transpose and the sample permute 437 // (see horizontal case) required before computing the dot product. 438 uint8x16_t s0123_lo, s0123_hi, s1234_lo, s1234_hi, s2345_lo, s2345_hi, 439 s3456_lo, s3456_hi; 440 transpose_concat_u8_8x4(s0, s1, s2, s3, &s0123_lo, &s0123_hi); 441 transpose_concat_u8_8x4(s1, s2, s3, s4, &s1234_lo, &s1234_hi); 442 transpose_concat_u8_8x4(s2, s3, s4, s5, &s2345_lo, &s2345_hi); 443 transpose_concat_u8_8x4(s3, s4, s5, s6, &s3456_lo, &s3456_hi); 444 445 do { 446 uint8x8_t s7, s8, s9, s10; 447 load_u8_8x4(s, src_stride, &s7, &s8, &s9, &s10); 448 449 uint8x16_t s78910_lo, s78910_hi; 450 transpose_concat_u8_8x4(s7, s8, s9, s10, &s78910_lo, &s78910_hi); 451 452 // Merge new data into block from previous iteration. 453 uint8x16x2_t samples_LUT = { { s3456_lo, s78910_lo } }; 454 uint8x16_t s4567_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]); 455 uint8x16_t s5678_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]); 456 uint8x16_t s6789_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]); 457 458 samples_LUT.val[0] = s3456_hi; 459 samples_LUT.val[1] = s78910_hi; 460 uint8x16_t s4567_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]); 461 uint8x16_t s5678_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]); 462 uint8x16_t s6789_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]); 463 464 uint8x8_t d0 = 465 convolve8_8_v(s0123_lo, s4567_lo, s0123_hi, s4567_hi, filter); 466 uint8x8_t d1 = 467 convolve8_8_v(s1234_lo, s5678_lo, s1234_hi, s5678_hi, filter); 468 uint8x8_t d2 = 469 convolve8_8_v(s2345_lo, s6789_lo, s2345_hi, s6789_hi, filter); 470 uint8x8_t d3 = 471 convolve8_8_v(s3456_lo, s78910_lo, s3456_hi, s78910_hi, filter); 472 473 store_u8_8x4(d, dst_stride, d0, d1, d2, d3); 474 475 // Prepare block for next iteration - re-using as much as possible. 476 // Shuffle everything up four rows. 477 s0123_lo = s4567_lo; 478 s0123_hi = s4567_hi; 479 s1234_lo = s5678_lo; 480 s1234_hi = s5678_hi; 481 s2345_lo = s6789_lo; 482 s2345_hi = s6789_hi; 483 s3456_lo = s78910_lo; 484 s3456_hi = s78910_hi; 485 486 s += 4 * src_stride; 487 d += 4 * dst_stride; 488 height -= 4; 489 } while (height != 0); 490 src += 8; 491 dst += 8; 492 w -= 8; 493 } while (w != 0); 494 } 495 } 496 497 void vpx_convolve8_vert_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride, 498 uint8_t *dst, ptrdiff_t dst_stride, 499 const InterpKernel *filter, int x0_q4, 500 int x_step_q4, int y0_q4, int y_step_q4, 501 int w, int h) { 502 assert((intptr_t)dst % 4 == 0); 503 assert(dst_stride % 4 == 0); 504 assert(y_step_q4 == 16); 505 506 (void)x0_q4; 507 (void)x_step_q4; 508 (void)y_step_q4; 509 510 if (vpx_get_filter_taps(filter[y0_q4]) <= 4) { 511 const int16x8_t y_filter = vld1q_s16(filter[y0_q4]); 512 513 convolve_4tap_vert_neon(src - src_stride, src_stride, dst, dst_stride, w, h, 514 y_filter); 515 } else { 516 const int8x8_t y_filter = vmovn_s16(vld1q_s16(filter[y0_q4])); 517 518 convolve_8tap_vert_neon_i8mm(src - 3 * src_stride, src_stride, dst, 519 dst_stride, w, h, y_filter); 520 } 521 } 522 523 void vpx_convolve8_avg_vert_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride, 524 uint8_t *dst, ptrdiff_t dst_stride, 525 const InterpKernel *filter, int x0_q4, 526 int x_step_q4, int y0_q4, int y_step_q4, 527 int w, int h) { 528 const int8x8_t filters = vmovn_s16(vld1q_s16(filter[y0_q4])); 529 const uint8x16x3_t merge_block_tbl = vld1q_u8_x3(dot_prod_merge_block_tbl); 530 531 assert((intptr_t)dst % 4 == 0); 532 assert(dst_stride % 4 == 0); 533 assert(y_step_q4 == 16); 534 535 (void)x0_q4; 536 (void)x_step_q4; 537 (void)y_step_q4; 538 539 src -= 3 * src_stride; 540 541 if (w == 4) { 542 uint8x8_t s0, s1, s2, s3, s4, s5, s6; 543 load_u8_8x7(src, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); 544 src += 7 * src_stride; 545 546 // This operation combines a conventional transpose and the sample permute 547 // (see horizontal case) required before computing the dot product. 548 uint8x16_t s0123, s1234, s2345, s3456; 549 transpose_concat_u8_4x4(s0, s1, s2, s3, &s0123); 550 transpose_concat_u8_4x4(s1, s2, s3, s4, &s1234); 551 transpose_concat_u8_4x4(s2, s3, s4, s5, &s2345); 552 transpose_concat_u8_4x4(s3, s4, s5, s6, &s3456); 553 554 do { 555 uint8x8_t s7, s8, s9, s10; 556 load_u8_8x4(src, src_stride, &s7, &s8, &s9, &s10); 557 558 uint8x16_t s78910; 559 transpose_concat_u8_4x4(s7, s8, s9, s10, &s78910); 560 561 // Merge new data into block from previous iteration. 562 uint8x16x2_t samples_LUT = { { s3456, s78910 } }; 563 uint8x16_t s4567 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]); 564 uint8x16_t s5678 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]); 565 uint8x16_t s6789 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]); 566 567 int16x4_t d0 = convolve8_4_v(s0123, s4567, filters); 568 int16x4_t d1 = convolve8_4_v(s1234, s5678, filters); 569 int16x4_t d2 = convolve8_4_v(s2345, s6789, filters); 570 int16x4_t d3 = convolve8_4_v(s3456, s78910, filters); 571 uint8x8_t d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS - 1); 572 uint8x8_t d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), FILTER_BITS - 1); 573 574 uint8x8_t dd01 = load_u8(dst + 0 * dst_stride, dst_stride); 575 uint8x8_t dd23 = load_u8(dst + 2 * dst_stride, dst_stride); 576 577 d01 = vrhadd_u8(d01, dd01); 578 d23 = vrhadd_u8(d23, dd23); 579 580 store_u8(dst + 0 * dst_stride, dst_stride, d01); 581 store_u8(dst + 2 * dst_stride, dst_stride, d23); 582 583 // Prepare block for next iteration - re-using as much as possible. 584 // Shuffle everything up four rows. 585 s0123 = s4567; 586 s1234 = s5678; 587 s2345 = s6789; 588 s3456 = s78910; 589 590 src += 4 * src_stride; 591 dst += 4 * dst_stride; 592 h -= 4; 593 } while (h != 0); 594 } else { 595 do { 596 const uint8_t *s = src; 597 uint8_t *d = dst; 598 int height = h; 599 600 uint8x8_t s0, s1, s2, s3, s4, s5, s6; 601 load_u8_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6); 602 s += 7 * src_stride; 603 604 // This operation combines a conventional transpose and the sample permute 605 // (see horizontal case) required before computing the dot product. 606 uint8x16_t s0123_lo, s0123_hi, s1234_lo, s1234_hi, s2345_lo, s2345_hi, 607 s3456_lo, s3456_hi; 608 transpose_concat_u8_8x4(s0, s1, s2, s3, &s0123_lo, &s0123_hi); 609 transpose_concat_u8_8x4(s1, s2, s3, s4, &s1234_lo, &s1234_hi); 610 transpose_concat_u8_8x4(s2, s3, s4, s5, &s2345_lo, &s2345_hi); 611 transpose_concat_u8_8x4(s3, s4, s5, s6, &s3456_lo, &s3456_hi); 612 613 do { 614 uint8x8_t s7, s8, s9, s10; 615 load_u8_8x4(s, src_stride, &s7, &s8, &s9, &s10); 616 617 uint8x16_t s78910_lo, s78910_hi; 618 transpose_concat_u8_8x4(s7, s8, s9, s10, &s78910_lo, &s78910_hi); 619 620 // Merge new data into block from previous iteration. 621 uint8x16x2_t samples_LUT = { { s3456_lo, s78910_lo } }; 622 uint8x16_t s4567_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]); 623 uint8x16_t s5678_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]); 624 uint8x16_t s6789_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]); 625 626 samples_LUT.val[0] = s3456_hi; 627 samples_LUT.val[1] = s78910_hi; 628 uint8x16_t s4567_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]); 629 uint8x16_t s5678_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]); 630 uint8x16_t s6789_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]); 631 632 uint8x8_t d0 = 633 convolve8_8_v(s0123_lo, s4567_lo, s0123_hi, s4567_hi, filters); 634 uint8x8_t d1 = 635 convolve8_8_v(s1234_lo, s5678_lo, s1234_hi, s5678_hi, filters); 636 uint8x8_t d2 = 637 convolve8_8_v(s2345_lo, s6789_lo, s2345_hi, s6789_hi, filters); 638 uint8x8_t d3 = 639 convolve8_8_v(s3456_lo, s78910_lo, s3456_hi, s78910_hi, filters); 640 641 uint8x8_t dd0, dd1, dd2, dd3; 642 load_u8_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3); 643 644 d0 = vrhadd_u8(d0, dd0); 645 d1 = vrhadd_u8(d1, dd1); 646 d2 = vrhadd_u8(d2, dd2); 647 d3 = vrhadd_u8(d3, dd3); 648 649 store_u8_8x4(d, dst_stride, d0, d1, d2, d3); 650 651 /* Prepare block for next iteration - re-using as much as possible. */ 652 /* Shuffle everything up four rows. */ 653 s0123_lo = s4567_lo; 654 s0123_hi = s4567_hi; 655 s1234_lo = s5678_lo; 656 s1234_hi = s5678_hi; 657 s2345_lo = s6789_lo; 658 s2345_hi = s6789_hi; 659 s3456_lo = s78910_lo; 660 s3456_hi = s78910_hi; 661 662 s += 4 * src_stride; 663 d += 4 * dst_stride; 664 height -= 4; 665 } while (height != 0); 666 src += 8; 667 dst += 8; 668 w -= 8; 669 } while (w != 0); 670 } 671 } 672 673 static INLINE void convolve_4tap_2d_neon_i8mm(const uint8_t *src, 674 ptrdiff_t src_stride, 675 uint8_t *dst, 676 ptrdiff_t dst_stride, int w, 677 int h, const int8x8_t x_filter, 678 const uint8x8_t y_filter) { 679 // Neon does not have lane-referencing multiply or multiply-accumulate 680 // instructions that operate on vectors of 8-bit elements. This means we have 681 // to duplicate filter taps into a whole vector and use standard multiply / 682 // multiply-accumulate instructions. 683 const uint8x8_t y_filter_taps[4] = { vdup_lane_u8(y_filter, 2), 684 vdup_lane_u8(y_filter, 3), 685 vdup_lane_u8(y_filter, 4), 686 vdup_lane_u8(y_filter, 5) }; 687 688 if (w == 4) { 689 const uint8x16_t permute_tbl = vld1q_u8(dot_prod_permute_tbl); 690 691 uint8x16_t h_s0, h_s1, h_s2; 692 load_u8_16x3(src, src_stride, &h_s0, &h_s1, &h_s2); 693 694 int16x4_t t0 = convolve4_4_h(h_s0, x_filter, permute_tbl); 695 int16x4_t t1 = convolve4_4_h(h_s1, x_filter, permute_tbl); 696 int16x4_t t2 = convolve4_4_h(h_s2, x_filter, permute_tbl); 697 // We halved the filter values so -1 from right shift. 698 uint8x8_t v_s01 = vqrshrun_n_s16(vcombine_s16(t0, t1), FILTER_BITS - 1); 699 uint8x8_t v_s12 = vqrshrun_n_s16(vcombine_s16(t1, t2), FILTER_BITS - 1); 700 701 src += 3 * src_stride; 702 703 do { 704 uint8x16_t h_s3, h_s4, h_s5, h_s6; 705 load_u8_16x4(src, src_stride, &h_s3, &h_s4, &h_s5, &h_s6); 706 707 int16x4_t t3 = convolve4_4_h(h_s3, x_filter, permute_tbl); 708 int16x4_t t4 = convolve4_4_h(h_s4, x_filter, permute_tbl); 709 int16x4_t t5 = convolve4_4_h(h_s5, x_filter, permute_tbl); 710 int16x4_t t6 = convolve4_4_h(h_s6, x_filter, permute_tbl); 711 // We halved the filter values so -1 from right shift. 712 uint8x8_t v_s34 = vqrshrun_n_s16(vcombine_s16(t3, t4), FILTER_BITS - 1); 713 uint8x8_t v_s56 = vqrshrun_n_s16(vcombine_s16(t5, t6), FILTER_BITS - 1); 714 uint8x8_t v_s23 = vext_u8(v_s12, v_s34, 4); 715 uint8x8_t v_s45 = vext_u8(v_s34, v_s56, 4); 716 717 uint8x8_t d01 = convolve4_8(v_s01, v_s12, v_s23, v_s34, y_filter_taps); 718 uint8x8_t d23 = convolve4_8(v_s23, v_s34, v_s45, v_s56, y_filter_taps); 719 720 store_unaligned_u8(dst + 0 * dst_stride, dst_stride, d01); 721 store_unaligned_u8(dst + 2 * dst_stride, dst_stride, d23); 722 723 v_s01 = v_s45; 724 v_s12 = v_s56; 725 src += 4 * src_stride; 726 dst += 4 * dst_stride; 727 h -= 4; 728 } while (h != 0); 729 } else { 730 const uint8x16x2_t permute_tbl = vld1q_u8_x2(dot_prod_permute_tbl); 731 732 do { 733 const uint8_t *s = src; 734 uint8_t *d = dst; 735 int height = h; 736 737 uint8x16_t h_s0, h_s1, h_s2; 738 load_u8_16x3(s, src_stride, &h_s0, &h_s1, &h_s2); 739 740 uint8x8_t v_s0 = convolve4_8_h(h_s0, x_filter, permute_tbl); 741 uint8x8_t v_s1 = convolve4_8_h(h_s1, x_filter, permute_tbl); 742 uint8x8_t v_s2 = convolve4_8_h(h_s2, x_filter, permute_tbl); 743 744 s += 3 * src_stride; 745 746 do { 747 uint8x16_t h_s3, h_s4, h_s5, h_s6; 748 load_u8_16x4(s, src_stride, &h_s3, &h_s4, &h_s5, &h_s6); 749 750 uint8x8_t v_s3 = convolve4_8_h(h_s3, x_filter, permute_tbl); 751 uint8x8_t v_s4 = convolve4_8_h(h_s4, x_filter, permute_tbl); 752 uint8x8_t v_s5 = convolve4_8_h(h_s5, x_filter, permute_tbl); 753 uint8x8_t v_s6 = convolve4_8_h(h_s6, x_filter, permute_tbl); 754 755 uint8x8_t d0 = convolve4_8(v_s0, v_s1, v_s2, v_s3, y_filter_taps); 756 uint8x8_t d1 = convolve4_8(v_s1, v_s2, v_s3, v_s4, y_filter_taps); 757 uint8x8_t d2 = convolve4_8(v_s2, v_s3, v_s4, v_s5, y_filter_taps); 758 uint8x8_t d3 = convolve4_8(v_s3, v_s4, v_s5, v_s6, y_filter_taps); 759 760 store_u8_8x4(d, dst_stride, d0, d1, d2, d3); 761 762 v_s0 = v_s4; 763 v_s1 = v_s5; 764 v_s2 = v_s6; 765 s += 4 * src_stride; 766 d += 4 * dst_stride; 767 height -= 4; 768 } while (height != 0); 769 src += 8; 770 dst += 8; 771 w -= 8; 772 } while (w != 0); 773 } 774 } 775 776 static INLINE void convolve_8tap_2d_horiz_neon_i8mm( 777 const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, 778 ptrdiff_t dst_stride, int w, int h, const int8x8_t filter) { 779 if (w == 4) { 780 const uint8x16x2_t permute_tbl = vld1q_u8_x2(dot_prod_permute_tbl); 781 782 do { 783 uint8x16_t s0, s1, s2, s3; 784 load_u8_16x4(src, src_stride, &s0, &s1, &s2, &s3); 785 786 int16x4_t d0 = convolve8_4_h(s0, filter, permute_tbl); 787 int16x4_t d1 = convolve8_4_h(s1, filter, permute_tbl); 788 int16x4_t d2 = convolve8_4_h(s2, filter, permute_tbl); 789 int16x4_t d3 = convolve8_4_h(s3, filter, permute_tbl); 790 uint8x8_t d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS - 1); 791 uint8x8_t d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), FILTER_BITS - 1); 792 793 store_u8(dst + 0 * dst_stride, dst_stride, d01); 794 store_u8(dst + 2 * dst_stride, dst_stride, d23); 795 796 src += 4 * src_stride; 797 dst += 4 * dst_stride; 798 h -= 4; 799 } while (h > 3); 800 801 // Process final three rows (h % 4 == 3). See vpx_convolve_neon_i8mm() 802 // below for further details on possible values of block height. 803 uint8x16_t s0, s1, s2; 804 load_u8_16x3(src, src_stride, &s0, &s1, &s2); 805 806 int16x4_t d0 = convolve8_4_h(s0, filter, permute_tbl); 807 int16x4_t d1 = convolve8_4_h(s1, filter, permute_tbl); 808 int16x4_t d2 = convolve8_4_h(s2, filter, permute_tbl); 809 uint8x8_t d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS - 1); 810 uint8x8_t d23 = 811 vqrshrun_n_s16(vcombine_s16(d2, vdup_n_s16(0)), FILTER_BITS - 1); 812 813 store_u8(dst + 0 * dst_stride, dst_stride, d01); 814 store_u8_4x1(dst + 2 * dst_stride, d23); 815 } else { 816 const uint8x16x3_t permute_tbl = vld1q_u8_x3(dot_prod_permute_tbl); 817 818 do { 819 const uint8_t *s = src; 820 uint8_t *d = dst; 821 int width = w; 822 823 do { 824 uint8x16_t s0, s1, s2, s3; 825 load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3); 826 827 uint8x8_t d0 = convolve8_8_h(s0, filter, permute_tbl); 828 uint8x8_t d1 = convolve8_8_h(s1, filter, permute_tbl); 829 uint8x8_t d2 = convolve8_8_h(s2, filter, permute_tbl); 830 uint8x8_t d3 = convolve8_8_h(s3, filter, permute_tbl); 831 832 store_u8_8x4(d, dst_stride, d0, d1, d2, d3); 833 834 s += 8; 835 d += 8; 836 width -= 8; 837 } while (width > 0); 838 src += 4 * src_stride; 839 dst += 4 * dst_stride; 840 h -= 4; 841 } while (h > 3); 842 843 // Process final three rows (h % 4 == 3). See vpx_convolve_neon_i8mm() 844 // below for further details on possible values of block height. 845 const uint8_t *s = src; 846 uint8_t *d = dst; 847 int width = w; 848 849 do { 850 uint8x16_t s0, s1, s2; 851 load_u8_16x3(s, src_stride, &s0, &s1, &s2); 852 853 uint8x8_t d0 = convolve8_8_h(s0, filter, permute_tbl); 854 uint8x8_t d1 = convolve8_8_h(s1, filter, permute_tbl); 855 uint8x8_t d2 = convolve8_8_h(s2, filter, permute_tbl); 856 857 store_u8_8x3(d, dst_stride, d0, d1, d2); 858 859 s += 8; 860 d += 8; 861 width -= 8; 862 } while (width > 0); 863 } 864 } 865 866 void vpx_convolve8_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride, 867 uint8_t *dst, ptrdiff_t dst_stride, 868 const InterpKernel *filter, int x0_q4, 869 int x_step_q4, int y0_q4, int y_step_q4, int w, 870 int h) { 871 assert(x_step_q4 == 16); 872 assert(y_step_q4 == 16); 873 874 (void)x_step_q4; 875 (void)y_step_q4; 876 877 const int x_filter_taps = vpx_get_filter_taps(filter[x0_q4]) <= 4 ? 4 : 8; 878 const int y_filter_taps = vpx_get_filter_taps(filter[y0_q4]) <= 4 ? 4 : 8; 879 // Account for needing filter_taps / 2 - 1 lines prior and filter_taps / 2 880 // lines post both horizontally and vertically. 881 const ptrdiff_t horiz_offset = x_filter_taps / 2 - 1; 882 const ptrdiff_t vert_offset = (y_filter_taps / 2 - 1) * src_stride; 883 884 if (x_filter_taps == 4 && y_filter_taps == 4) { 885 const int16x4_t x_filter = vld1_s16(filter[x0_q4] + 2); 886 const int16x8_t y_filter = vld1q_s16(filter[y0_q4]); 887 888 // 4-tap and bilinear filter values are even, so halve them to reduce 889 // intermediate precision requirements. 890 const int8x8_t x_filter_4tap = 891 vshrn_n_s16(vcombine_s16(x_filter, vdup_n_s16(0)), 1); 892 const uint8x8_t y_filter_4tap = 893 vshrn_n_u16(vreinterpretq_u16_s16(vabsq_s16(y_filter)), 1); 894 895 convolve_4tap_2d_neon_i8mm(src - horiz_offset - vert_offset, src_stride, 896 dst, dst_stride, w, h, x_filter_4tap, 897 y_filter_4tap); 898 return; 899 } 900 901 // Given our constraints: w <= 64, h <= 64, taps <= 8 we can reduce the 902 // maximum buffer size to 64 * (64 + 7). 903 DECLARE_ALIGNED(32, uint8_t, im_block[64 * 71]); 904 const int im_stride = 64; 905 const int im_height = h + SUBPEL_TAPS - 1; 906 907 const int8x8_t x_filter_8tap = vmovn_s16(vld1q_s16(filter[x0_q4])); 908 const int8x8_t y_filter_8tap = vmovn_s16(vld1q_s16(filter[y0_q4])); 909 910 convolve_8tap_2d_horiz_neon_i8mm(src - horiz_offset - vert_offset, src_stride, 911 im_block, im_stride, w, im_height, 912 x_filter_8tap); 913 914 convolve_8tap_vert_neon_i8mm(im_block, im_stride, dst, dst_stride, w, h, 915 y_filter_8tap); 916 } 917 918 void vpx_convolve8_avg_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride, 919 uint8_t *dst, ptrdiff_t dst_stride, 920 const InterpKernel *filter, int x0_q4, 921 int x_step_q4, int y0_q4, int y_step_q4, int w, 922 int h) { 923 DECLARE_ALIGNED(32, uint8_t, im_block[64 * 71]); 924 const int im_stride = 64; 925 926 // Averaging convolution always uses an 8-tap filter. 927 // Account for the vertical phase needing 3 lines prior and 4 lines post. 928 const int im_height = h + SUBPEL_TAPS - 1; 929 const ptrdiff_t offset = SUBPEL_TAPS / 2 - 1; 930 931 assert(y_step_q4 == 16); 932 assert(x_step_q4 == 16); 933 934 const int8x8_t x_filter_8tap = vmovn_s16(vld1q_s16(filter[x0_q4])); 935 936 convolve_8tap_2d_horiz_neon_i8mm(src - offset - offset * src_stride, 937 src_stride, im_block, im_stride, w, 938 im_height, x_filter_8tap); 939 940 vpx_convolve8_avg_vert_neon_i8mm(im_block + offset * im_stride, im_stride, 941 dst, dst_stride, filter, x0_q4, x_step_q4, 942 y0_q4, y_step_q4, w, h); 943 }