highbd_hadamard_neon.c (7829B)
1 /* 2 * Copyright (c) 2023 The WebM project authors. All rights reserved. 3 * Copyright (c) 2023, Alliance for Open Media. All rights reserved. 4 * 5 * This source code is subject to the terms of the BSD 2 Clause License and 6 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License 7 * was not distributed with this source code in the LICENSE file, you can 8 * obtain it at www.aomedia.org/license/software. If the Alliance for Open 9 * Media Patent License 1.0 was not distributed with this source code in the 10 * PATENTS file, you can obtain it at www.aomedia.org/license/patent. 11 */ 12 13 #include <arm_neon.h> 14 #include "config/aom_dsp_rtcd.h" 15 #include "aom/aom_integer.h" 16 #include "aom_dsp/arm/mem_neon.h" 17 #include "aom_dsp/arm/transpose_neon.h" 18 #include "aom_dsp/arm/sum_neon.h" 19 #include "aom_ports/mem.h" 20 21 static inline void hadamard_highbd_col8_first_pass(int16x8_t *a0, int16x8_t *a1, 22 int16x8_t *a2, int16x8_t *a3, 23 int16x8_t *a4, int16x8_t *a5, 24 int16x8_t *a6, 25 int16x8_t *a7) { 26 int16x8_t b0 = vaddq_s16(*a0, *a1); 27 int16x8_t b1 = vsubq_s16(*a0, *a1); 28 int16x8_t b2 = vaddq_s16(*a2, *a3); 29 int16x8_t b3 = vsubq_s16(*a2, *a3); 30 int16x8_t b4 = vaddq_s16(*a4, *a5); 31 int16x8_t b5 = vsubq_s16(*a4, *a5); 32 int16x8_t b6 = vaddq_s16(*a6, *a7); 33 int16x8_t b7 = vsubq_s16(*a6, *a7); 34 35 int16x8_t c0 = vaddq_s16(b0, b2); 36 int16x8_t c2 = vsubq_s16(b0, b2); 37 int16x8_t c1 = vaddq_s16(b1, b3); 38 int16x8_t c3 = vsubq_s16(b1, b3); 39 int16x8_t c4 = vaddq_s16(b4, b6); 40 int16x8_t c6 = vsubq_s16(b4, b6); 41 int16x8_t c5 = vaddq_s16(b5, b7); 42 int16x8_t c7 = vsubq_s16(b5, b7); 43 44 *a0 = vaddq_s16(c0, c4); 45 *a2 = vsubq_s16(c0, c4); 46 *a7 = vaddq_s16(c1, c5); 47 *a6 = vsubq_s16(c1, c5); 48 *a3 = vaddq_s16(c2, c6); 49 *a1 = vsubq_s16(c2, c6); 50 *a4 = vaddq_s16(c3, c7); 51 *a5 = vsubq_s16(c3, c7); 52 } 53 54 static inline void hadamard_highbd_col4_second_pass(int16x4_t a0, int16x4_t a1, 55 int16x4_t a2, int16x4_t a3, 56 int16x4_t a4, int16x4_t a5, 57 int16x4_t a6, int16x4_t a7, 58 tran_low_t *coeff) { 59 int32x4_t b0 = vaddl_s16(a0, a1); 60 int32x4_t b1 = vsubl_s16(a0, a1); 61 int32x4_t b2 = vaddl_s16(a2, a3); 62 int32x4_t b3 = vsubl_s16(a2, a3); 63 int32x4_t b4 = vaddl_s16(a4, a5); 64 int32x4_t b5 = vsubl_s16(a4, a5); 65 int32x4_t b6 = vaddl_s16(a6, a7); 66 int32x4_t b7 = vsubl_s16(a6, a7); 67 68 int32x4_t c0 = vaddq_s32(b0, b2); 69 int32x4_t c2 = vsubq_s32(b0, b2); 70 int32x4_t c1 = vaddq_s32(b1, b3); 71 int32x4_t c3 = vsubq_s32(b1, b3); 72 int32x4_t c4 = vaddq_s32(b4, b6); 73 int32x4_t c6 = vsubq_s32(b4, b6); 74 int32x4_t c5 = vaddq_s32(b5, b7); 75 int32x4_t c7 = vsubq_s32(b5, b7); 76 77 int32x4_t d0 = vaddq_s32(c0, c4); 78 int32x4_t d2 = vsubq_s32(c0, c4); 79 int32x4_t d7 = vaddq_s32(c1, c5); 80 int32x4_t d6 = vsubq_s32(c1, c5); 81 int32x4_t d3 = vaddq_s32(c2, c6); 82 int32x4_t d1 = vsubq_s32(c2, c6); 83 int32x4_t d4 = vaddq_s32(c3, c7); 84 int32x4_t d5 = vsubq_s32(c3, c7); 85 86 vst1q_s32(coeff + 0, d0); 87 vst1q_s32(coeff + 4, d1); 88 vst1q_s32(coeff + 8, d2); 89 vst1q_s32(coeff + 12, d3); 90 vst1q_s32(coeff + 16, d4); 91 vst1q_s32(coeff + 20, d5); 92 vst1q_s32(coeff + 24, d6); 93 vst1q_s32(coeff + 28, d7); 94 } 95 96 void aom_highbd_hadamard_8x8_neon(const int16_t *src_diff, ptrdiff_t src_stride, 97 tran_low_t *coeff) { 98 int16x4_t b0, b1, b2, b3, b4, b5, b6, b7; 99 100 int16x8_t s0 = vld1q_s16(src_diff + 0 * src_stride); 101 int16x8_t s1 = vld1q_s16(src_diff + 1 * src_stride); 102 int16x8_t s2 = vld1q_s16(src_diff + 2 * src_stride); 103 int16x8_t s3 = vld1q_s16(src_diff + 3 * src_stride); 104 int16x8_t s4 = vld1q_s16(src_diff + 4 * src_stride); 105 int16x8_t s5 = vld1q_s16(src_diff + 5 * src_stride); 106 int16x8_t s6 = vld1q_s16(src_diff + 6 * src_stride); 107 int16x8_t s7 = vld1q_s16(src_diff + 7 * src_stride); 108 109 // For the first pass we can stay in 16-bit elements (4095*8 = 32760). 110 hadamard_highbd_col8_first_pass(&s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7); 111 112 transpose_elems_inplace_s16_8x8(&s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7); 113 114 // For the second pass we need to widen to 32-bit elements, so we're 115 // processing 4 columns at a time. 116 // Skip the second transpose because it is not required. 117 118 b0 = vget_low_s16(s0); 119 b1 = vget_low_s16(s1); 120 b2 = vget_low_s16(s2); 121 b3 = vget_low_s16(s3); 122 b4 = vget_low_s16(s4); 123 b5 = vget_low_s16(s5); 124 b6 = vget_low_s16(s6); 125 b7 = vget_low_s16(s7); 126 127 hadamard_highbd_col4_second_pass(b0, b1, b2, b3, b4, b5, b6, b7, coeff); 128 129 b0 = vget_high_s16(s0); 130 b1 = vget_high_s16(s1); 131 b2 = vget_high_s16(s2); 132 b3 = vget_high_s16(s3); 133 b4 = vget_high_s16(s4); 134 b5 = vget_high_s16(s5); 135 b6 = vget_high_s16(s6); 136 b7 = vget_high_s16(s7); 137 138 hadamard_highbd_col4_second_pass(b0, b1, b2, b3, b4, b5, b6, b7, coeff + 32); 139 } 140 141 void aom_highbd_hadamard_16x16_neon(const int16_t *src_diff, 142 ptrdiff_t src_stride, tran_low_t *coeff) { 143 // Rearrange 16x16 to 8x32 and remove stride. 144 // Top left first. 145 aom_highbd_hadamard_8x8_neon(src_diff, src_stride, coeff); 146 // Top right. 147 aom_highbd_hadamard_8x8_neon(src_diff + 8, src_stride, coeff + 64); 148 // Bottom left. 149 aom_highbd_hadamard_8x8_neon(src_diff + 8 * src_stride, src_stride, 150 coeff + 128); 151 // Bottom right. 152 aom_highbd_hadamard_8x8_neon(src_diff + 8 * src_stride + 8, src_stride, 153 coeff + 192); 154 155 for (int i = 0; i < 16; i++) { 156 int32x4_t a0 = vld1q_s32(coeff + 4 * i); 157 int32x4_t a1 = vld1q_s32(coeff + 4 * i + 64); 158 int32x4_t a2 = vld1q_s32(coeff + 4 * i + 128); 159 int32x4_t a3 = vld1q_s32(coeff + 4 * i + 192); 160 161 int32x4_t b0 = vhaddq_s32(a0, a1); 162 int32x4_t b1 = vhsubq_s32(a0, a1); 163 int32x4_t b2 = vhaddq_s32(a2, a3); 164 int32x4_t b3 = vhsubq_s32(a2, a3); 165 166 int32x4_t c0 = vaddq_s32(b0, b2); 167 int32x4_t c1 = vaddq_s32(b1, b3); 168 int32x4_t c2 = vsubq_s32(b0, b2); 169 int32x4_t c3 = vsubq_s32(b1, b3); 170 171 vst1q_s32(coeff + 4 * i, c0); 172 vst1q_s32(coeff + 4 * i + 64, c1); 173 vst1q_s32(coeff + 4 * i + 128, c2); 174 vst1q_s32(coeff + 4 * i + 192, c3); 175 } 176 } 177 178 void aom_highbd_hadamard_32x32_neon(const int16_t *src_diff, 179 ptrdiff_t src_stride, tran_low_t *coeff) { 180 // Rearrange 32x32 to 16x64 and remove stride. 181 // Top left first. 182 aom_highbd_hadamard_16x16_neon(src_diff, src_stride, coeff); 183 // Top right. 184 aom_highbd_hadamard_16x16_neon(src_diff + 16, src_stride, coeff + 256); 185 // Bottom left. 186 aom_highbd_hadamard_16x16_neon(src_diff + 16 * src_stride, src_stride, 187 coeff + 512); 188 // Bottom right. 189 aom_highbd_hadamard_16x16_neon(src_diff + 16 * src_stride + 16, src_stride, 190 coeff + 768); 191 192 for (int i = 0; i < 64; i++) { 193 int32x4_t a0 = vld1q_s32(coeff + 4 * i); 194 int32x4_t a1 = vld1q_s32(coeff + 4 * i + 256); 195 int32x4_t a2 = vld1q_s32(coeff + 4 * i + 512); 196 int32x4_t a3 = vld1q_s32(coeff + 4 * i + 768); 197 198 int32x4_t b0 = vshrq_n_s32(vaddq_s32(a0, a1), 2); 199 int32x4_t b1 = vshrq_n_s32(vsubq_s32(a0, a1), 2); 200 int32x4_t b2 = vshrq_n_s32(vaddq_s32(a2, a3), 2); 201 int32x4_t b3 = vshrq_n_s32(vsubq_s32(a2, a3), 2); 202 203 int32x4_t c0 = vaddq_s32(b0, b2); 204 int32x4_t c1 = vaddq_s32(b1, b3); 205 int32x4_t c2 = vsubq_s32(b0, b2); 206 int32x4_t c3 = vsubq_s32(b1, b3); 207 208 vst1q_s32(coeff + 4 * i, c0); 209 vst1q_s32(coeff + 4 * i + 256, c1); 210 vst1q_s32(coeff + 4 * i + 512, c2); 211 vst1q_s32(coeff + 4 * i + 768, c3); 212 } 213 }