dec_msa.c (41156B)
1 // Copyright 2016 Google Inc. All Rights Reserved. 2 // 3 // Use of this source code is governed by a BSD-style license 4 // that can be found in the COPYING file in the root of the source 5 // tree. An additional intellectual property rights grant can be found 6 // in the file PATENTS. All contributing project authors may 7 // be found in the AUTHORS file in the root of the source tree. 8 // ----------------------------------------------------------------------------- 9 // 10 // MSA version of dsp functions 11 // 12 // Author(s): Prashant Patil (prashant.patil@imgtec.com) 13 14 15 #include "src/dsp/dsp.h" 16 17 #if defined(WEBP_USE_MSA) 18 19 #include "src/dsp/msa_macro.h" 20 21 //------------------------------------------------------------------------------ 22 // Transforms 23 24 #define IDCT_1D_W(in0, in1, in2, in3, out0, out1, out2, out3) { \ 25 v4i32 a1_m, b1_m, c1_m, d1_m; \ 26 v4i32 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \ 27 const v4i32 cospi8sqrt2minus1 = __msa_fill_w(20091); \ 28 const v4i32 sinpi8sqrt2 = __msa_fill_w(35468); \ 29 \ 30 a1_m = in0 + in2; \ 31 b1_m = in0 - in2; \ 32 c_tmp1_m = (in1 * sinpi8sqrt2) >> 16; \ 33 c_tmp2_m = in3 + ((in3 * cospi8sqrt2minus1) >> 16); \ 34 c1_m = c_tmp1_m - c_tmp2_m; \ 35 d_tmp1_m = in1 + ((in1 * cospi8sqrt2minus1) >> 16); \ 36 d_tmp2_m = (in3 * sinpi8sqrt2) >> 16; \ 37 d1_m = d_tmp1_m + d_tmp2_m; \ 38 BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \ 39 } 40 41 static void TransformOne(const int16_t* WEBP_RESTRICT in, 42 uint8_t* WEBP_RESTRICT dst) { 43 v8i16 input0, input1; 44 v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; 45 v4i32 res0, res1, res2, res3; 46 const v16i8 zero = { 0 }; 47 v16i8 dest0, dest1, dest2, dest3; 48 49 LD_SH2(in, 8, input0, input1); 50 UNPCK_SH_SW(input0, in0, in1); 51 UNPCK_SH_SW(input1, in2, in3); 52 IDCT_1D_W(in0, in1, in2, in3, hz0, hz1, hz2, hz3); 53 TRANSPOSE4x4_SW_SW(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3); 54 IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3); 55 SRARI_W4_SW(vt0, vt1, vt2, vt3, 3); 56 TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); 57 LD_SB4(dst, BPS, dest0, dest1, dest2, dest3); 58 ILVR_B4_SW(zero, dest0, zero, dest1, zero, dest2, zero, dest3, 59 res0, res1, res2, res3); 60 ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3, 61 res0, res1, res2, res3); 62 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); 63 CLIP_SW4_0_255(res0, res1, res2, res3); 64 PCKEV_B2_SW(res0, res1, res2, res3, vt0, vt1); 65 res0 = (v4i32)__msa_pckev_b((v16i8)vt0, (v16i8)vt1); 66 ST4x4_UB(res0, res0, 3, 2, 1, 0, dst, BPS); 67 } 68 69 static void TransformTwo(const int16_t* WEBP_RESTRICT in, 70 uint8_t* WEBP_RESTRICT dst, int do_two) { 71 TransformOne(in, dst); 72 if (do_two) { 73 TransformOne(in + 16, dst + 4); 74 } 75 } 76 77 static void TransformWHT(const int16_t* WEBP_RESTRICT in, 78 int16_t* WEBP_RESTRICT out) { 79 v8i16 input0, input1; 80 const v8i16 mask0 = { 0, 1, 2, 3, 8, 9, 10, 11 }; 81 const v8i16 mask1 = { 4, 5, 6, 7, 12, 13, 14, 15 }; 82 const v8i16 mask2 = { 0, 4, 8, 12, 1, 5, 9, 13 }; 83 const v8i16 mask3 = { 3, 7, 11, 15, 2, 6, 10, 14 }; 84 v8i16 tmp0, tmp1, tmp2, tmp3; 85 v8i16 out0, out1; 86 87 LD_SH2(in, 8, input0, input1); 88 input1 = SLDI_SH(input1, input1, 8); 89 tmp0 = input0 + input1; 90 tmp1 = input0 - input1; 91 VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask0, mask1, tmp2, tmp3); 92 out0 = tmp2 + tmp3; 93 out1 = tmp2 - tmp3; 94 VSHF_H2_SH(out0, out1, out0, out1, mask2, mask3, input0, input1); 95 tmp0 = input0 + input1; 96 tmp1 = input0 - input1; 97 VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask0, mask1, tmp2, tmp3); 98 tmp0 = tmp2 + tmp3; 99 tmp1 = tmp2 - tmp3; 100 ADDVI_H2_SH(tmp0, 3, tmp1, 3, out0, out1); 101 SRAI_H2_SH(out0, out1, 3); 102 out[0] = __msa_copy_s_h(out0, 0); 103 out[16] = __msa_copy_s_h(out0, 4); 104 out[32] = __msa_copy_s_h(out1, 0); 105 out[48] = __msa_copy_s_h(out1, 4); 106 out[64] = __msa_copy_s_h(out0, 1); 107 out[80] = __msa_copy_s_h(out0, 5); 108 out[96] = __msa_copy_s_h(out1, 1); 109 out[112] = __msa_copy_s_h(out1, 5); 110 out[128] = __msa_copy_s_h(out0, 2); 111 out[144] = __msa_copy_s_h(out0, 6); 112 out[160] = __msa_copy_s_h(out1, 2); 113 out[176] = __msa_copy_s_h(out1, 6); 114 out[192] = __msa_copy_s_h(out0, 3); 115 out[208] = __msa_copy_s_h(out0, 7); 116 out[224] = __msa_copy_s_h(out1, 3); 117 out[240] = __msa_copy_s_h(out1, 7); 118 } 119 120 static void TransformDC(const int16_t* WEBP_RESTRICT in, 121 uint8_t* WEBP_RESTRICT dst) { 122 const int DC = (in[0] + 4) >> 3; 123 const v8i16 tmp0 = __msa_fill_h(DC); 124 ADDBLK_ST4x4_UB(tmp0, tmp0, tmp0, tmp0, dst, BPS); 125 } 126 127 static void TransformAC3(const int16_t* WEBP_RESTRICT in, 128 uint8_t* WEBP_RESTRICT dst) { 129 const int a = in[0] + 4; 130 const int c4 = WEBP_TRANSFORM_AC3_MUL2(in[4]); 131 const int d4 = WEBP_TRANSFORM_AC3_MUL1(in[4]); 132 const int in2 = WEBP_TRANSFORM_AC3_MUL2(in[1]); 133 const int in3 = WEBP_TRANSFORM_AC3_MUL1(in[1]); 134 v4i32 tmp0 = { 0 }; 135 v4i32 out0 = __msa_fill_w(a + d4); 136 v4i32 out1 = __msa_fill_w(a + c4); 137 v4i32 out2 = __msa_fill_w(a - c4); 138 v4i32 out3 = __msa_fill_w(a - d4); 139 v4i32 res0, res1, res2, res3; 140 const v4i32 zero = { 0 }; 141 v16u8 dest0, dest1, dest2, dest3; 142 143 INSERT_W4_SW(in3, in2, -in2, -in3, tmp0); 144 ADD4(out0, tmp0, out1, tmp0, out2, tmp0, out3, tmp0, 145 out0, out1, out2, out3); 146 SRAI_W4_SW(out0, out1, out2, out3, 3); 147 LD_UB4(dst, BPS, dest0, dest1, dest2, dest3); 148 ILVR_B4_SW(zero, dest0, zero, dest1, zero, dest2, zero, dest3, 149 res0, res1, res2, res3); 150 ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3, 151 res0, res1, res2, res3); 152 ADD4(res0, out0, res1, out1, res2, out2, res3, out3, res0, res1, res2, res3); 153 CLIP_SW4_0_255(res0, res1, res2, res3); 154 PCKEV_B2_SW(res0, res1, res2, res3, out0, out1); 155 res0 = (v4i32)__msa_pckev_b((v16i8)out0, (v16i8)out1); 156 ST4x4_UB(res0, res0, 3, 2, 1, 0, dst, BPS); 157 } 158 159 //------------------------------------------------------------------------------ 160 // Edge filtering functions 161 162 #define FLIP_SIGN2(in0, in1, out0, out1) { \ 163 out0 = (v16i8)__msa_xori_b(in0, 0x80); \ 164 out1 = (v16i8)__msa_xori_b(in1, 0x80); \ 165 } 166 167 #define FLIP_SIGN4(in0, in1, in2, in3, out0, out1, out2, out3) { \ 168 FLIP_SIGN2(in0, in1, out0, out1); \ 169 FLIP_SIGN2(in2, in3, out2, out3); \ 170 } 171 172 #define FILT_VAL(q0_m, p0_m, mask, filt) do { \ 173 v16i8 q0_sub_p0; \ 174 q0_sub_p0 = __msa_subs_s_b(q0_m, p0_m); \ 175 filt = __msa_adds_s_b(filt, q0_sub_p0); \ 176 filt = __msa_adds_s_b(filt, q0_sub_p0); \ 177 filt = __msa_adds_s_b(filt, q0_sub_p0); \ 178 filt = filt & mask; \ 179 } while (0) 180 181 #define FILT2(q_m, p_m, q, p) do { \ 182 u_r = SRAI_H(temp1, 7); \ 183 u_r = __msa_sat_s_h(u_r, 7); \ 184 u_l = SRAI_H(temp3, 7); \ 185 u_l = __msa_sat_s_h(u_l, 7); \ 186 u = __msa_pckev_b((v16i8)u_l, (v16i8)u_r); \ 187 q_m = __msa_subs_s_b(q_m, u); \ 188 p_m = __msa_adds_s_b(p_m, u); \ 189 q = __msa_xori_b((v16u8)q_m, 0x80); \ 190 p = __msa_xori_b((v16u8)p_m, 0x80); \ 191 } while (0) 192 193 #define LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev) do { \ 194 v16i8 p1_m, p0_m, q0_m, q1_m; \ 195 v16i8 filt, t1, t2; \ 196 const v16i8 cnst4b = __msa_ldi_b(4); \ 197 const v16i8 cnst3b = __msa_ldi_b(3); \ 198 \ 199 FLIP_SIGN4(p1, p0, q0, q1, p1_m, p0_m, q0_m, q1_m); \ 200 filt = __msa_subs_s_b(p1_m, q1_m); \ 201 filt = filt & hev; \ 202 FILT_VAL(q0_m, p0_m, mask, filt); \ 203 t1 = __msa_adds_s_b(filt, cnst4b); \ 204 t1 = SRAI_B(t1, 3); \ 205 t2 = __msa_adds_s_b(filt, cnst3b); \ 206 t2 = SRAI_B(t2, 3); \ 207 q0_m = __msa_subs_s_b(q0_m, t1); \ 208 q0 = __msa_xori_b((v16u8)q0_m, 0x80); \ 209 p0_m = __msa_adds_s_b(p0_m, t2); \ 210 p0 = __msa_xori_b((v16u8)p0_m, 0x80); \ 211 filt = __msa_srari_b(t1, 1); \ 212 hev = __msa_xori_b(hev, 0xff); \ 213 filt = filt & hev; \ 214 q1_m = __msa_subs_s_b(q1_m, filt); \ 215 q1 = __msa_xori_b((v16u8)q1_m, 0x80); \ 216 p1_m = __msa_adds_s_b(p1_m, filt); \ 217 p1 = __msa_xori_b((v16u8)p1_m, 0x80); \ 218 } while (0) 219 220 #define LPF_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev) do { \ 221 v16i8 p2_m, p1_m, p0_m, q2_m, q1_m, q0_m; \ 222 v16i8 u, filt, t1, t2, filt_sign; \ 223 v8i16 filt_r, filt_l, u_r, u_l; \ 224 v8i16 temp0, temp1, temp2, temp3; \ 225 const v16i8 cnst4b = __msa_ldi_b(4); \ 226 const v16i8 cnst3b = __msa_ldi_b(3); \ 227 const v8i16 cnst9h = __msa_ldi_h(9); \ 228 const v8i16 cnst63h = __msa_ldi_h(63); \ 229 \ 230 FLIP_SIGN4(p1, p0, q0, q1, p1_m, p0_m, q0_m, q1_m); \ 231 filt = __msa_subs_s_b(p1_m, q1_m); \ 232 FILT_VAL(q0_m, p0_m, mask, filt); \ 233 FLIP_SIGN2(p2, q2, p2_m, q2_m); \ 234 t2 = filt & hev; \ 235 /* filt_val &= ~hev */ \ 236 hev = __msa_xori_b(hev, 0xff); \ 237 filt = filt & hev; \ 238 t1 = __msa_adds_s_b(t2, cnst4b); \ 239 t1 = SRAI_B(t1, 3); \ 240 t2 = __msa_adds_s_b(t2, cnst3b); \ 241 t2 = SRAI_B(t2, 3); \ 242 q0_m = __msa_subs_s_b(q0_m, t1); \ 243 p0_m = __msa_adds_s_b(p0_m, t2); \ 244 filt_sign = __msa_clti_s_b(filt, 0); \ 245 ILVRL_B2_SH(filt_sign, filt, filt_r, filt_l); \ 246 /* update q2/p2 */ \ 247 temp0 = filt_r * cnst9h; \ 248 temp1 = temp0 + cnst63h; \ 249 temp2 = filt_l * cnst9h; \ 250 temp3 = temp2 + cnst63h; \ 251 FILT2(q2_m, p2_m, q2, p2); \ 252 /* update q1/p1 */ \ 253 temp1 = temp1 + temp0; \ 254 temp3 = temp3 + temp2; \ 255 FILT2(q1_m, p1_m, q1, p1); \ 256 /* update q0/p0 */ \ 257 temp1 = temp1 + temp0; \ 258 temp3 = temp3 + temp2; \ 259 FILT2(q0_m, p0_m, q0, p0); \ 260 } while (0) 261 262 #define LPF_MASK_HEV(p3_in, p2_in, p1_in, p0_in, \ 263 q0_in, q1_in, q2_in, q3_in, \ 264 limit_in, b_limit_in, thresh_in, \ 265 hev_out, mask_out) do { \ 266 v16u8 p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \ 267 v16u8 p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \ 268 v16u8 flat_out; \ 269 \ 270 /* absolute subtraction of pixel values */ \ 271 p3_asub_p2_m = __msa_asub_u_b(p3_in, p2_in); \ 272 p2_asub_p1_m = __msa_asub_u_b(p2_in, p1_in); \ 273 p1_asub_p0_m = __msa_asub_u_b(p1_in, p0_in); \ 274 q1_asub_q0_m = __msa_asub_u_b(q1_in, q0_in); \ 275 q2_asub_q1_m = __msa_asub_u_b(q2_in, q1_in); \ 276 q3_asub_q2_m = __msa_asub_u_b(q3_in, q2_in); \ 277 p0_asub_q0_m = __msa_asub_u_b(p0_in, q0_in); \ 278 p1_asub_q1_m = __msa_asub_u_b(p1_in, q1_in); \ 279 /* calculation of hev */ \ 280 flat_out = __msa_max_u_b(p1_asub_p0_m, q1_asub_q0_m); \ 281 hev_out = (thresh_in < flat_out); \ 282 /* calculation of mask */ \ 283 p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p0_asub_q0_m); \ 284 p1_asub_q1_m = SRAI_B(p1_asub_q1_m, 1); \ 285 p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p1_asub_q1_m); \ 286 mask_out = (b_limit_in < p0_asub_q0_m); \ 287 mask_out = __msa_max_u_b(flat_out, mask_out); \ 288 p3_asub_p2_m = __msa_max_u_b(p3_asub_p2_m, p2_asub_p1_m); \ 289 mask_out = __msa_max_u_b(p3_asub_p2_m, mask_out); \ 290 q2_asub_q1_m = __msa_max_u_b(q2_asub_q1_m, q3_asub_q2_m); \ 291 mask_out = __msa_max_u_b(q2_asub_q1_m, mask_out); \ 292 mask_out = (limit_in < mask_out); \ 293 mask_out = __msa_xori_b(mask_out, 0xff); \ 294 } while (0) 295 296 #define ST6x1_UB(in0, in0_idx, in1, in1_idx, pdst, stride) do { \ 297 const uint16_t tmp0_h = __msa_copy_s_h((v8i16)in1, in1_idx); \ 298 const uint32_t tmp0_w = __msa_copy_s_w((v4i32)in0, in0_idx); \ 299 SW(tmp0_w, pdst); \ 300 SH(tmp0_h, pdst + stride); \ 301 } while (0) 302 303 #define ST6x4_UB(in0, start_in0_idx, in1, start_in1_idx, pdst, stride) do { \ 304 uint8_t* ptmp1 = (uint8_t*)pdst; \ 305 ST6x1_UB(in0, start_in0_idx, in1, start_in1_idx, ptmp1, 4); \ 306 ptmp1 += stride; \ 307 ST6x1_UB(in0, start_in0_idx + 1, in1, start_in1_idx + 1, ptmp1, 4); \ 308 ptmp1 += stride; \ 309 ST6x1_UB(in0, start_in0_idx + 2, in1, start_in1_idx + 2, ptmp1, 4); \ 310 ptmp1 += stride; \ 311 ST6x1_UB(in0, start_in0_idx + 3, in1, start_in1_idx + 3, ptmp1, 4); \ 312 } while (0) 313 314 #define LPF_SIMPLE_FILT(p1_in, p0_in, q0_in, q1_in, mask) do { \ 315 v16i8 p1_m, p0_m, q0_m, q1_m, filt, filt1, filt2; \ 316 const v16i8 cnst4b = __msa_ldi_b(4); \ 317 const v16i8 cnst3b = __msa_ldi_b(3); \ 318 \ 319 FLIP_SIGN4(p1_in, p0_in, q0_in, q1_in, p1_m, p0_m, q0_m, q1_m); \ 320 filt = __msa_subs_s_b(p1_m, q1_m); \ 321 FILT_VAL(q0_m, p0_m, mask, filt); \ 322 filt1 = __msa_adds_s_b(filt, cnst4b); \ 323 filt1 = SRAI_B(filt1, 3); \ 324 filt2 = __msa_adds_s_b(filt, cnst3b); \ 325 filt2 = SRAI_B(filt2, 3); \ 326 q0_m = __msa_subs_s_b(q0_m, filt1); \ 327 p0_m = __msa_adds_s_b(p0_m, filt2); \ 328 q0_in = __msa_xori_b((v16u8)q0_m, 0x80); \ 329 p0_in = __msa_xori_b((v16u8)p0_m, 0x80); \ 330 } while (0) 331 332 #define LPF_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask) do { \ 333 v16u8 p1_a_sub_q1, p0_a_sub_q0; \ 334 \ 335 p0_a_sub_q0 = __msa_asub_u_b(p0, q0); \ 336 p1_a_sub_q1 = __msa_asub_u_b(p1, q1); \ 337 p1_a_sub_q1 = (v16u8)__msa_srli_b((v16i8)p1_a_sub_q1, 1); \ 338 p0_a_sub_q0 = __msa_adds_u_b(p0_a_sub_q0, p0_a_sub_q0); \ 339 mask = __msa_adds_u_b(p0_a_sub_q0, p1_a_sub_q1); \ 340 mask = (mask <= b_limit); \ 341 } while (0) 342 343 static void VFilter16(uint8_t* src, int stride, 344 int b_limit_in, int limit_in, int thresh_in) { 345 uint8_t* ptemp = src - 4 * stride; 346 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; 347 v16u8 mask, hev; 348 const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in); 349 const v16u8 limit = (v16u8)__msa_fill_b(limit_in); 350 const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in); 351 352 LD_UB8(ptemp, stride, p3, p2, p1, p0, q0, q1, q2, q3); 353 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, 354 hev, mask); 355 LPF_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); 356 ptemp = src - 3 * stride; 357 ST_UB4(p2, p1, p0, q0, ptemp, stride); 358 ptemp += (4 * stride); 359 ST_UB2(q1, q2, ptemp, stride); 360 } 361 362 static void HFilter16(uint8_t* src, int stride, 363 int b_limit_in, int limit_in, int thresh_in) { 364 uint8_t* ptmp = src - 4; 365 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; 366 v16u8 mask, hev; 367 v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; 368 v16u8 row9, row10, row11, row12, row13, row14, row15; 369 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; 370 const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in); 371 const v16u8 limit = (v16u8)__msa_fill_b(limit_in); 372 const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in); 373 374 LD_UB8(ptmp, stride, row0, row1, row2, row3, row4, row5, row6, row7); 375 ptmp += (8 * stride); 376 LD_UB8(ptmp, stride, row8, row9, row10, row11, row12, row13, row14, row15); 377 TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, 378 row8, row9, row10, row11, row12, row13, row14, row15, 379 p3, p2, p1, p0, q0, q1, q2, q3); 380 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, 381 hev, mask); 382 LPF_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); 383 ILVR_B2_SH(p1, p2, q0, p0, tmp0, tmp1); 384 ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4); 385 ILVL_B2_SH(p1, p2, q0, p0, tmp0, tmp1); 386 ILVRL_H2_SH(tmp1, tmp0, tmp6, tmp7); 387 ILVRL_B2_SH(q2, q1, tmp2, tmp5); 388 ptmp = src - 3; 389 ST6x1_UB(tmp3, 0, tmp2, 0, ptmp, 4); 390 ptmp += stride; 391 ST6x1_UB(tmp3, 1, tmp2, 1, ptmp, 4); 392 ptmp += stride; 393 ST6x1_UB(tmp3, 2, tmp2, 2, ptmp, 4); 394 ptmp += stride; 395 ST6x1_UB(tmp3, 3, tmp2, 3, ptmp, 4); 396 ptmp += stride; 397 ST6x1_UB(tmp4, 0, tmp2, 4, ptmp, 4); 398 ptmp += stride; 399 ST6x1_UB(tmp4, 1, tmp2, 5, ptmp, 4); 400 ptmp += stride; 401 ST6x1_UB(tmp4, 2, tmp2, 6, ptmp, 4); 402 ptmp += stride; 403 ST6x1_UB(tmp4, 3, tmp2, 7, ptmp, 4); 404 ptmp += stride; 405 ST6x1_UB(tmp6, 0, tmp5, 0, ptmp, 4); 406 ptmp += stride; 407 ST6x1_UB(tmp6, 1, tmp5, 1, ptmp, 4); 408 ptmp += stride; 409 ST6x1_UB(tmp6, 2, tmp5, 2, ptmp, 4); 410 ptmp += stride; 411 ST6x1_UB(tmp6, 3, tmp5, 3, ptmp, 4); 412 ptmp += stride; 413 ST6x1_UB(tmp7, 0, tmp5, 4, ptmp, 4); 414 ptmp += stride; 415 ST6x1_UB(tmp7, 1, tmp5, 5, ptmp, 4); 416 ptmp += stride; 417 ST6x1_UB(tmp7, 2, tmp5, 6, ptmp, 4); 418 ptmp += stride; 419 ST6x1_UB(tmp7, 3, tmp5, 7, ptmp, 4); 420 } 421 422 // on three inner edges 423 static void VFilterHorEdge16i(uint8_t* src, int stride, 424 int b_limit, int limit, int thresh) { 425 v16u8 mask, hev; 426 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; 427 const v16u8 thresh0 = (v16u8)__msa_fill_b(thresh); 428 const v16u8 b_limit0 = (v16u8)__msa_fill_b(b_limit); 429 const v16u8 limit0 = (v16u8)__msa_fill_b(limit); 430 431 LD_UB8((src - 4 * stride), stride, p3, p2, p1, p0, q0, q1, q2, q3); 432 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, 433 hev, mask); 434 LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); 435 ST_UB4(p1, p0, q0, q1, (src - 2 * stride), stride); 436 } 437 438 static void VFilter16i(uint8_t* src_y, int stride, 439 int b_limit, int limit, int thresh) { 440 VFilterHorEdge16i(src_y + 4 * stride, stride, b_limit, limit, thresh); 441 VFilterHorEdge16i(src_y + 8 * stride, stride, b_limit, limit, thresh); 442 VFilterHorEdge16i(src_y + 12 * stride, stride, b_limit, limit, thresh); 443 } 444 445 static void HFilterVertEdge16i(uint8_t* src, int stride, 446 int b_limit, int limit, int thresh) { 447 v16u8 mask, hev; 448 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; 449 v16u8 row0, row1, row2, row3, row4, row5, row6, row7; 450 v16u8 row8, row9, row10, row11, row12, row13, row14, row15; 451 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; 452 const v16u8 thresh0 = (v16u8)__msa_fill_b(thresh); 453 const v16u8 b_limit0 = (v16u8)__msa_fill_b(b_limit); 454 const v16u8 limit0 = (v16u8)__msa_fill_b(limit); 455 456 LD_UB8(src - 4, stride, row0, row1, row2, row3, row4, row5, row6, row7); 457 LD_UB8(src - 4 + (8 * stride), stride, 458 row8, row9, row10, row11, row12, row13, row14, row15); 459 TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, 460 row8, row9, row10, row11, row12, row13, row14, row15, 461 p3, p2, p1, p0, q0, q1, q2, q3); 462 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, 463 hev, mask); 464 LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); 465 ILVR_B2_SH(p0, p1, q1, q0, tmp0, tmp1); 466 ILVRL_H2_SH(tmp1, tmp0, tmp2, tmp3); 467 ILVL_B2_SH(p0, p1, q1, q0, tmp0, tmp1); 468 ILVRL_H2_SH(tmp1, tmp0, tmp4, tmp5); 469 src -= 2; 470 ST4x8_UB(tmp2, tmp3, src, stride); 471 src += (8 * stride); 472 ST4x8_UB(tmp4, tmp5, src, stride); 473 } 474 475 static void HFilter16i(uint8_t* src_y, int stride, 476 int b_limit, int limit, int thresh) { 477 HFilterVertEdge16i(src_y + 4, stride, b_limit, limit, thresh); 478 HFilterVertEdge16i(src_y + 8, stride, b_limit, limit, thresh); 479 HFilterVertEdge16i(src_y + 12, stride, b_limit, limit, thresh); 480 } 481 482 // 8-pixels wide variants, for chroma filtering 483 static void VFilter8(uint8_t* WEBP_RESTRICT src_u, uint8_t* WEBP_RESTRICT src_v, 484 int stride, int b_limit_in, int limit_in, int thresh_in) { 485 uint8_t* ptmp_src_u = src_u - 4 * stride; 486 uint8_t* ptmp_src_v = src_v - 4 * stride; 487 uint64_t p2_d, p1_d, p0_d, q0_d, q1_d, q2_d; 488 v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev; 489 v16u8 p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u; 490 v16u8 p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v; 491 const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in); 492 const v16u8 limit = (v16u8)__msa_fill_b(limit_in); 493 const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in); 494 495 LD_UB8(ptmp_src_u, stride, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u); 496 LD_UB8(ptmp_src_v, stride, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v); 497 ILVR_D4_UB(p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0); 498 ILVR_D4_UB(q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3); 499 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, 500 hev, mask); 501 LPF_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); 502 p2_d = __msa_copy_s_d((v2i64)p2, 0); 503 p1_d = __msa_copy_s_d((v2i64)p1, 0); 504 p0_d = __msa_copy_s_d((v2i64)p0, 0); 505 q0_d = __msa_copy_s_d((v2i64)q0, 0); 506 q1_d = __msa_copy_s_d((v2i64)q1, 0); 507 q2_d = __msa_copy_s_d((v2i64)q2, 0); 508 ptmp_src_u += stride; 509 SD4(p2_d, p1_d, p0_d, q0_d, ptmp_src_u, stride); 510 ptmp_src_u += (4 * stride); 511 SD(q1_d, ptmp_src_u); 512 ptmp_src_u += stride; 513 SD(q2_d, ptmp_src_u); 514 p2_d = __msa_copy_s_d((v2i64)p2, 1); 515 p1_d = __msa_copy_s_d((v2i64)p1, 1); 516 p0_d = __msa_copy_s_d((v2i64)p0, 1); 517 q0_d = __msa_copy_s_d((v2i64)q0, 1); 518 q1_d = __msa_copy_s_d((v2i64)q1, 1); 519 q2_d = __msa_copy_s_d((v2i64)q2, 1); 520 ptmp_src_v += stride; 521 SD4(p2_d, p1_d, p0_d, q0_d, ptmp_src_v, stride); 522 ptmp_src_v += (4 * stride); 523 SD(q1_d, ptmp_src_v); 524 ptmp_src_v += stride; 525 SD(q2_d, ptmp_src_v); 526 } 527 528 static void HFilter8(uint8_t* WEBP_RESTRICT src_u, uint8_t* WEBP_RESTRICT src_v, 529 int stride, int b_limit_in, int limit_in, int thresh_in) { 530 uint8_t* ptmp_src_u = src_u - 4; 531 uint8_t* ptmp_src_v = src_v - 4; 532 v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev; 533 v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; 534 v16u8 row9, row10, row11, row12, row13, row14, row15; 535 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; 536 const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in); 537 const v16u8 limit = (v16u8)__msa_fill_b(limit_in); 538 const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in); 539 540 LD_UB8(ptmp_src_u, stride, row0, row1, row2, row3, row4, row5, row6, row7); 541 LD_UB8(ptmp_src_v, stride, 542 row8, row9, row10, row11, row12, row13, row14, row15); 543 TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, 544 row8, row9, row10, row11, row12, row13, row14, row15, 545 p3, p2, p1, p0, q0, q1, q2, q3); 546 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, 547 hev, mask); 548 LPF_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); 549 ILVR_B2_SH(p1, p2, q0, p0, tmp0, tmp1); 550 ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4); 551 ILVL_B2_SH(p1, p2, q0, p0, tmp0, tmp1); 552 ILVRL_H2_SH(tmp1, tmp0, tmp6, tmp7); 553 ILVRL_B2_SH(q2, q1, tmp2, tmp5); 554 ptmp_src_u += 1; 555 ST6x4_UB(tmp3, 0, tmp2, 0, ptmp_src_u, stride); 556 ptmp_src_u += 4 * stride; 557 ST6x4_UB(tmp4, 0, tmp2, 4, ptmp_src_u, stride); 558 ptmp_src_v += 1; 559 ST6x4_UB(tmp6, 0, tmp5, 0, ptmp_src_v, stride); 560 ptmp_src_v += 4 * stride; 561 ST6x4_UB(tmp7, 0, tmp5, 4, ptmp_src_v, stride); 562 } 563 564 static void VFilter8i(uint8_t* WEBP_RESTRICT src_u, 565 uint8_t* WEBP_RESTRICT src_v, int stride, 566 int b_limit_in, int limit_in, int thresh_in) { 567 uint64_t p1_d, p0_d, q0_d, q1_d; 568 v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev; 569 v16u8 p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u; 570 v16u8 p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v; 571 const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in); 572 const v16u8 limit = (v16u8)__msa_fill_b(limit_in); 573 const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in); 574 575 LD_UB8(src_u, stride, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u); 576 src_u += (5 * stride); 577 LD_UB8(src_v, stride, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v); 578 src_v += (5 * stride); 579 ILVR_D4_UB(p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0); 580 ILVR_D4_UB(q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3); 581 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, 582 hev, mask); 583 LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); 584 p1_d = __msa_copy_s_d((v2i64)p1, 0); 585 p0_d = __msa_copy_s_d((v2i64)p0, 0); 586 q0_d = __msa_copy_s_d((v2i64)q0, 0); 587 q1_d = __msa_copy_s_d((v2i64)q1, 0); 588 SD4(q1_d, q0_d, p0_d, p1_d, src_u, -stride); 589 p1_d = __msa_copy_s_d((v2i64)p1, 1); 590 p0_d = __msa_copy_s_d((v2i64)p0, 1); 591 q0_d = __msa_copy_s_d((v2i64)q0, 1); 592 q1_d = __msa_copy_s_d((v2i64)q1, 1); 593 SD4(q1_d, q0_d, p0_d, p1_d, src_v, -stride); 594 } 595 596 static void HFilter8i(uint8_t* WEBP_RESTRICT src_u, 597 uint8_t* WEBP_RESTRICT src_v, int stride, 598 int b_limit_in, int limit_in, int thresh_in) { 599 v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev; 600 v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; 601 v16u8 row9, row10, row11, row12, row13, row14, row15; 602 v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; 603 const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in); 604 const v16u8 limit = (v16u8)__msa_fill_b(limit_in); 605 const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in); 606 607 LD_UB8(src_u, stride, row0, row1, row2, row3, row4, row5, row6, row7); 608 LD_UB8(src_v, stride, 609 row8, row9, row10, row11, row12, row13, row14, row15); 610 TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, 611 row8, row9, row10, row11, row12, row13, row14, row15, 612 p3, p2, p1, p0, q0, q1, q2, q3); 613 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, 614 hev, mask); 615 LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); 616 ILVR_B2_SW(p0, p1, q1, q0, tmp0, tmp1); 617 ILVRL_H2_SW(tmp1, tmp0, tmp2, tmp3); 618 ILVL_B2_SW(p0, p1, q1, q0, tmp0, tmp1); 619 ILVRL_H2_SW(tmp1, tmp0, tmp4, tmp5); 620 src_u += 2; 621 ST4x4_UB(tmp2, tmp2, 0, 1, 2, 3, src_u, stride); 622 src_u += 4 * stride; 623 ST4x4_UB(tmp3, tmp3, 0, 1, 2, 3, src_u, stride); 624 src_v += 2; 625 ST4x4_UB(tmp4, tmp4, 0, 1, 2, 3, src_v, stride); 626 src_v += 4 * stride; 627 ST4x4_UB(tmp5, tmp5, 0, 1, 2, 3, src_v, stride); 628 } 629 630 static void SimpleVFilter16(uint8_t* src, int stride, int b_limit_in) { 631 v16u8 p1, p0, q1, q0, mask; 632 const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in); 633 634 LD_UB4(src - 2 * stride, stride, p1, p0, q0, q1); 635 LPF_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask); 636 LPF_SIMPLE_FILT(p1, p0, q0, q1, mask); 637 ST_UB2(p0, q0, src - stride, stride); 638 } 639 640 static void SimpleHFilter16(uint8_t* src, int stride, int b_limit_in) { 641 v16u8 p1, p0, q1, q0, mask, row0, row1, row2, row3, row4, row5, row6, row7; 642 v16u8 row8, row9, row10, row11, row12, row13, row14, row15; 643 v8i16 tmp0, tmp1; 644 const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in); 645 uint8_t* ptemp_src = src - 2; 646 647 LD_UB8(ptemp_src, stride, row0, row1, row2, row3, row4, row5, row6, row7); 648 LD_UB8(ptemp_src + 8 * stride, stride, 649 row8, row9, row10, row11, row12, row13, row14, row15); 650 TRANSPOSE16x4_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, 651 row8, row9, row10, row11, row12, row13, row14, row15, 652 p1, p0, q0, q1); 653 LPF_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask); 654 LPF_SIMPLE_FILT(p1, p0, q0, q1, mask); 655 ILVRL_B2_SH(q0, p0, tmp1, tmp0); 656 ptemp_src += 1; 657 ST2x4_UB(tmp1, 0, ptemp_src, stride); 658 ptemp_src += 4 * stride; 659 ST2x4_UB(tmp1, 4, ptemp_src, stride); 660 ptemp_src += 4 * stride; 661 ST2x4_UB(tmp0, 0, ptemp_src, stride); 662 ptemp_src += 4 * stride; 663 ST2x4_UB(tmp0, 4, ptemp_src, stride); 664 ptemp_src += 4 * stride; 665 } 666 667 static void SimpleVFilter16i(uint8_t* src_y, int stride, int b_limit_in) { 668 SimpleVFilter16(src_y + 4 * stride, stride, b_limit_in); 669 SimpleVFilter16(src_y + 8 * stride, stride, b_limit_in); 670 SimpleVFilter16(src_y + 12 * stride, stride, b_limit_in); 671 } 672 673 static void SimpleHFilter16i(uint8_t* src_y, int stride, int b_limit_in) { 674 SimpleHFilter16(src_y + 4, stride, b_limit_in); 675 SimpleHFilter16(src_y + 8, stride, b_limit_in); 676 SimpleHFilter16(src_y + 12, stride, b_limit_in); 677 } 678 679 //------------------------------------------------------------------------------ 680 // Intra predictions 681 //------------------------------------------------------------------------------ 682 683 // 4x4 684 685 static void DC4(uint8_t* dst) { // DC 686 uint32_t dc = 4; 687 int i; 688 for (i = 0; i < 4; ++i) dc += dst[i - BPS] + dst[-1 + i * BPS]; 689 dc >>= 3; 690 dc = dc | (dc << 8) | (dc << 16) | (dc << 24); 691 SW4(dc, dc, dc, dc, dst, BPS); 692 } 693 694 static void TM4(uint8_t* dst) { 695 const uint8_t* const ptemp = dst - BPS - 1; 696 v8i16 T, d, r0, r1, r2, r3; 697 const v16i8 zero = { 0 }; 698 const v8i16 TL = (v8i16)__msa_fill_h(ptemp[0 * BPS]); 699 const v8i16 L0 = (v8i16)__msa_fill_h(ptemp[1 * BPS]); 700 const v8i16 L1 = (v8i16)__msa_fill_h(ptemp[2 * BPS]); 701 const v8i16 L2 = (v8i16)__msa_fill_h(ptemp[3 * BPS]); 702 const v8i16 L3 = (v8i16)__msa_fill_h(ptemp[4 * BPS]); 703 const v16u8 T1 = LD_UB(ptemp + 1); 704 705 T = (v8i16)__msa_ilvr_b(zero, (v16i8)T1); 706 d = T - TL; 707 ADD4(d, L0, d, L1, d, L2, d, L3, r0, r1, r2, r3); 708 CLIP_SH4_0_255(r0, r1, r2, r3); 709 PCKEV_ST4x4_UB(r0, r1, r2, r3, dst, BPS); 710 } 711 712 static void VE4(uint8_t* dst) { // vertical 713 const uint8_t* const ptop = dst - BPS - 1; 714 const uint32_t val0 = LW(ptop + 0); 715 const uint32_t val1 = LW(ptop + 4); 716 uint32_t out; 717 v16u8 A = { 0 }, B, C, AC, B2, R; 718 719 INSERT_W2_UB(val0, val1, A); 720 B = SLDI_UB(A, A, 1); 721 C = SLDI_UB(A, A, 2); 722 AC = __msa_ave_u_b(A, C); 723 B2 = __msa_ave_u_b(B, B); 724 R = __msa_aver_u_b(AC, B2); 725 out = __msa_copy_s_w((v4i32)R, 0); 726 SW4(out, out, out, out, dst, BPS); 727 } 728 729 static void RD4(uint8_t* dst) { // Down-right 730 const uint8_t* const ptop = dst - 1 - BPS; 731 uint32_t val0 = LW(ptop + 0); 732 uint32_t val1 = LW(ptop + 4); 733 uint32_t val2, val3; 734 v16u8 A, B, C, AC, B2, R, A1 = { 0 }; 735 736 INSERT_W2_UB(val0, val1, A1); 737 A = SLDI_UB(A1, A1, 12); 738 A = (v16u8)__msa_insert_b((v16i8)A, 3, ptop[1 * BPS]); 739 A = (v16u8)__msa_insert_b((v16i8)A, 2, ptop[2 * BPS]); 740 A = (v16u8)__msa_insert_b((v16i8)A, 1, ptop[3 * BPS]); 741 A = (v16u8)__msa_insert_b((v16i8)A, 0, ptop[4 * BPS]); 742 B = SLDI_UB(A, A, 1); 743 C = SLDI_UB(A, A, 2); 744 AC = __msa_ave_u_b(A, C); 745 B2 = __msa_ave_u_b(B, B); 746 R = __msa_aver_u_b(AC, B2); 747 val3 = __msa_copy_s_w((v4i32)R, 0); 748 R = SLDI_UB(R, R, 1); 749 val2 = __msa_copy_s_w((v4i32)R, 0); 750 R = SLDI_UB(R, R, 1); 751 val1 = __msa_copy_s_w((v4i32)R, 0); 752 R = SLDI_UB(R, R, 1); 753 val0 = __msa_copy_s_w((v4i32)R, 0); 754 SW4(val0, val1, val2, val3, dst, BPS); 755 } 756 757 static void LD4(uint8_t* dst) { // Down-Left 758 const uint8_t* const ptop = dst - BPS; 759 uint32_t val0 = LW(ptop + 0); 760 uint32_t val1 = LW(ptop + 4); 761 uint32_t val2, val3; 762 v16u8 A = { 0 }, B, C, AC, B2, R; 763 764 INSERT_W2_UB(val0, val1, A); 765 B = SLDI_UB(A, A, 1); 766 C = SLDI_UB(A, A, 2); 767 C = (v16u8)__msa_insert_b((v16i8)C, 6, ptop[7]); 768 AC = __msa_ave_u_b(A, C); 769 B2 = __msa_ave_u_b(B, B); 770 R = __msa_aver_u_b(AC, B2); 771 val0 = __msa_copy_s_w((v4i32)R, 0); 772 R = SLDI_UB(R, R, 1); 773 val1 = __msa_copy_s_w((v4i32)R, 0); 774 R = SLDI_UB(R, R, 1); 775 val2 = __msa_copy_s_w((v4i32)R, 0); 776 R = SLDI_UB(R, R, 1); 777 val3 = __msa_copy_s_w((v4i32)R, 0); 778 SW4(val0, val1, val2, val3, dst, BPS); 779 } 780 781 // 16x16 782 783 static void DC16(uint8_t* dst) { // DC 784 uint32_t dc = 16; 785 int i; 786 const v16u8 rtop = LD_UB(dst - BPS); 787 const v8u16 dctop = __msa_hadd_u_h(rtop, rtop); 788 v16u8 out; 789 790 for (i = 0; i < 16; ++i) { 791 dc += dst[-1 + i * BPS]; 792 } 793 dc += HADD_UH_U32(dctop); 794 out = (v16u8)__msa_fill_b(dc >> 5); 795 ST_UB8(out, out, out, out, out, out, out, out, dst, BPS); 796 ST_UB8(out, out, out, out, out, out, out, out, dst + 8 * BPS, BPS); 797 } 798 799 static void TM16(uint8_t* dst) { 800 int j; 801 v8i16 d1, d2; 802 const v16i8 zero = { 0 }; 803 const v8i16 TL = (v8i16)__msa_fill_h(dst[-1 - BPS]); 804 const v16i8 T = LD_SB(dst - BPS); 805 806 ILVRL_B2_SH(zero, T, d1, d2); 807 SUB2(d1, TL, d2, TL, d1, d2); 808 for (j = 0; j < 16; j += 4) { 809 v16i8 t0, t1, t2, t3; 810 v8i16 r0, r1, r2, r3, r4, r5, r6, r7; 811 const v8i16 L0 = (v8i16)__msa_fill_h(dst[-1 + 0 * BPS]); 812 const v8i16 L1 = (v8i16)__msa_fill_h(dst[-1 + 1 * BPS]); 813 const v8i16 L2 = (v8i16)__msa_fill_h(dst[-1 + 2 * BPS]); 814 const v8i16 L3 = (v8i16)__msa_fill_h(dst[-1 + 3 * BPS]); 815 ADD4(d1, L0, d1, L1, d1, L2, d1, L3, r0, r1, r2, r3); 816 ADD4(d2, L0, d2, L1, d2, L2, d2, L3, r4, r5, r6, r7); 817 CLIP_SH4_0_255(r0, r1, r2, r3); 818 CLIP_SH4_0_255(r4, r5, r6, r7); 819 PCKEV_B4_SB(r4, r0, r5, r1, r6, r2, r7, r3, t0, t1, t2, t3); 820 ST_SB4(t0, t1, t2, t3, dst, BPS); 821 dst += 4 * BPS; 822 } 823 } 824 825 static void VE16(uint8_t* dst) { // vertical 826 const v16u8 rtop = LD_UB(dst - BPS); 827 ST_UB8(rtop, rtop, rtop, rtop, rtop, rtop, rtop, rtop, dst, BPS); 828 ST_UB8(rtop, rtop, rtop, rtop, rtop, rtop, rtop, rtop, dst + 8 * BPS, BPS); 829 } 830 831 static void HE16(uint8_t* dst) { // horizontal 832 int j; 833 for (j = 16; j > 0; j -= 4) { 834 const v16u8 L0 = (v16u8)__msa_fill_b(dst[-1 + 0 * BPS]); 835 const v16u8 L1 = (v16u8)__msa_fill_b(dst[-1 + 1 * BPS]); 836 const v16u8 L2 = (v16u8)__msa_fill_b(dst[-1 + 2 * BPS]); 837 const v16u8 L3 = (v16u8)__msa_fill_b(dst[-1 + 3 * BPS]); 838 ST_UB4(L0, L1, L2, L3, dst, BPS); 839 dst += 4 * BPS; 840 } 841 } 842 843 static void DC16NoTop(uint8_t* dst) { // DC with top samples not available 844 int j; 845 uint32_t dc = 8; 846 v16u8 out; 847 848 for (j = 0; j < 16; ++j) { 849 dc += dst[-1 + j * BPS]; 850 } 851 out = (v16u8)__msa_fill_b(dc >> 4); 852 ST_UB8(out, out, out, out, out, out, out, out, dst, BPS); 853 ST_UB8(out, out, out, out, out, out, out, out, dst + 8 * BPS, BPS); 854 } 855 856 static void DC16NoLeft(uint8_t* dst) { // DC with left samples not available 857 uint32_t dc = 8; 858 const v16u8 rtop = LD_UB(dst - BPS); 859 const v8u16 dctop = __msa_hadd_u_h(rtop, rtop); 860 v16u8 out; 861 862 dc += HADD_UH_U32(dctop); 863 out = (v16u8)__msa_fill_b(dc >> 4); 864 ST_UB8(out, out, out, out, out, out, out, out, dst, BPS); 865 ST_UB8(out, out, out, out, out, out, out, out, dst + 8 * BPS, BPS); 866 } 867 868 static void DC16NoTopLeft(uint8_t* dst) { // DC with nothing 869 const v16u8 out = (v16u8)__msa_fill_b(0x80); 870 ST_UB8(out, out, out, out, out, out, out, out, dst, BPS); 871 ST_UB8(out, out, out, out, out, out, out, out, dst + 8 * BPS, BPS); 872 } 873 874 // Chroma 875 876 #define STORE8x8(out, dst) do { \ 877 SD4(out, out, out, out, dst + 0 * BPS, BPS); \ 878 SD4(out, out, out, out, dst + 4 * BPS, BPS); \ 879 } while (0) 880 881 static void DC8uv(uint8_t* dst) { // DC 882 uint32_t dc = 8; 883 int i; 884 uint64_t out; 885 const v16u8 rtop = LD_UB(dst - BPS); 886 const v8u16 temp0 = __msa_hadd_u_h(rtop, rtop); 887 const v4u32 temp1 = __msa_hadd_u_w(temp0, temp0); 888 const v2u64 temp2 = __msa_hadd_u_d(temp1, temp1); 889 v16u8 dctemp; 890 891 for (i = 0; i < 8; ++i) { 892 dc += dst[-1 + i * BPS]; 893 } 894 dc += __msa_copy_s_w((v4i32)temp2, 0); 895 dctemp = (v16u8)__msa_fill_b(dc >> 4); 896 out = __msa_copy_s_d((v2i64)dctemp, 0); 897 STORE8x8(out, dst); 898 } 899 900 static void TM8uv(uint8_t* dst) { 901 int j; 902 const v16i8 T1 = LD_SB(dst - BPS); 903 const v16i8 zero = { 0 }; 904 const v8i16 T = (v8i16)__msa_ilvr_b(zero, T1); 905 const v8i16 TL = (v8i16)__msa_fill_h(dst[-1 - BPS]); 906 const v8i16 d = T - TL; 907 908 for (j = 0; j < 8; j += 4) { 909 v16i8 t0, t1; 910 v8i16 r0 = (v8i16)__msa_fill_h(dst[-1 + 0 * BPS]); 911 v8i16 r1 = (v8i16)__msa_fill_h(dst[-1 + 1 * BPS]); 912 v8i16 r2 = (v8i16)__msa_fill_h(dst[-1 + 2 * BPS]); 913 v8i16 r3 = (v8i16)__msa_fill_h(dst[-1 + 3 * BPS]); 914 ADD4(d, r0, d, r1, d, r2, d, r3, r0, r1, r2, r3); 915 CLIP_SH4_0_255(r0, r1, r2, r3); 916 PCKEV_B2_SB(r1, r0, r3, r2, t0, t1); 917 ST4x4_UB(t0, t1, 0, 2, 0, 2, dst, BPS); 918 ST4x4_UB(t0, t1, 1, 3, 1, 3, dst + 4, BPS); 919 dst += 4 * BPS; 920 } 921 } 922 923 static void VE8uv(uint8_t* dst) { // vertical 924 const v16u8 rtop = LD_UB(dst - BPS); 925 const uint64_t out = __msa_copy_s_d((v2i64)rtop, 0); 926 STORE8x8(out, dst); 927 } 928 929 static void HE8uv(uint8_t* dst) { // horizontal 930 int j; 931 for (j = 0; j < 8; j += 4) { 932 const v16u8 L0 = (v16u8)__msa_fill_b(dst[-1 + 0 * BPS]); 933 const v16u8 L1 = (v16u8)__msa_fill_b(dst[-1 + 1 * BPS]); 934 const v16u8 L2 = (v16u8)__msa_fill_b(dst[-1 + 2 * BPS]); 935 const v16u8 L3 = (v16u8)__msa_fill_b(dst[-1 + 3 * BPS]); 936 const uint64_t out0 = __msa_copy_s_d((v2i64)L0, 0); 937 const uint64_t out1 = __msa_copy_s_d((v2i64)L1, 0); 938 const uint64_t out2 = __msa_copy_s_d((v2i64)L2, 0); 939 const uint64_t out3 = __msa_copy_s_d((v2i64)L3, 0); 940 SD4(out0, out1, out2, out3, dst, BPS); 941 dst += 4 * BPS; 942 } 943 } 944 945 static void DC8uvNoLeft(uint8_t* dst) { // DC with no left samples 946 const uint32_t dc = 4; 947 const v16u8 rtop = LD_UB(dst - BPS); 948 const v8u16 temp0 = __msa_hadd_u_h(rtop, rtop); 949 const v4u32 temp1 = __msa_hadd_u_w(temp0, temp0); 950 const v2u64 temp2 = __msa_hadd_u_d(temp1, temp1); 951 const uint32_t sum_m = __msa_copy_s_w((v4i32)temp2, 0); 952 const v16u8 dcval = (v16u8)__msa_fill_b((dc + sum_m) >> 3); 953 const uint64_t out = __msa_copy_s_d((v2i64)dcval, 0); 954 STORE8x8(out, dst); 955 } 956 957 static void DC8uvNoTop(uint8_t* dst) { // DC with no top samples 958 uint32_t dc = 4; 959 int i; 960 uint64_t out; 961 v16u8 dctemp; 962 963 for (i = 0; i < 8; ++i) { 964 dc += dst[-1 + i * BPS]; 965 } 966 dctemp = (v16u8)__msa_fill_b(dc >> 3); 967 out = __msa_copy_s_d((v2i64)dctemp, 0); 968 STORE8x8(out, dst); 969 } 970 971 static void DC8uvNoTopLeft(uint8_t* dst) { // DC with nothing 972 const uint64_t out = 0x8080808080808080ULL; 973 STORE8x8(out, dst); 974 } 975 976 //------------------------------------------------------------------------------ 977 // Entry point 978 979 extern void VP8DspInitMSA(void); 980 981 WEBP_TSAN_IGNORE_FUNCTION void VP8DspInitMSA(void) { 982 VP8TransformWHT = TransformWHT; 983 VP8Transform = TransformTwo; 984 VP8TransformDC = TransformDC; 985 VP8TransformAC3 = TransformAC3; 986 987 VP8VFilter16 = VFilter16; 988 VP8HFilter16 = HFilter16; 989 VP8VFilter16i = VFilter16i; 990 VP8HFilter16i = HFilter16i; 991 VP8VFilter8 = VFilter8; 992 VP8HFilter8 = HFilter8; 993 VP8VFilter8i = VFilter8i; 994 VP8HFilter8i = HFilter8i; 995 VP8SimpleVFilter16 = SimpleVFilter16; 996 VP8SimpleHFilter16 = SimpleHFilter16; 997 VP8SimpleVFilter16i = SimpleVFilter16i; 998 VP8SimpleHFilter16i = SimpleHFilter16i; 999 1000 VP8PredLuma4[0] = DC4; 1001 VP8PredLuma4[1] = TM4; 1002 VP8PredLuma4[2] = VE4; 1003 VP8PredLuma4[4] = RD4; 1004 VP8PredLuma4[6] = LD4; 1005 VP8PredLuma16[0] = DC16; 1006 VP8PredLuma16[1] = TM16; 1007 VP8PredLuma16[2] = VE16; 1008 VP8PredLuma16[3] = HE16; 1009 VP8PredLuma16[4] = DC16NoTop; 1010 VP8PredLuma16[5] = DC16NoLeft; 1011 VP8PredLuma16[6] = DC16NoTopLeft; 1012 VP8PredChroma8[0] = DC8uv; 1013 VP8PredChroma8[1] = TM8uv; 1014 VP8PredChroma8[2] = VE8uv; 1015 VP8PredChroma8[3] = HE8uv; 1016 VP8PredChroma8[4] = DC8uvNoTop; 1017 VP8PredChroma8[5] = DC8uvNoLeft; 1018 VP8PredChroma8[6] = DC8uvNoTopLeft; 1019 } 1020 1021 #else // !WEBP_USE_MSA 1022 1023 WEBP_DSP_INIT_STUB(VP8DspInitMSA) 1024 1025 #endif // WEBP_USE_MSA