upsampling_sse41.c (13892B)
1 // Copyright 2011 Google Inc. All Rights Reserved. 2 // 3 // Use of this source code is governed by a BSD-style license 4 // that can be found in the COPYING file in the root of the source 5 // tree. An additional intellectual property rights grant can be found 6 // in the file PATENTS. All contributing project authors may 7 // be found in the AUTHORS file in the root of the source tree. 8 // ----------------------------------------------------------------------------- 9 // 10 // SSE41 version of YUV to RGB upsampling functions. 11 // 12 // Author: somnath@google.com (Somnath Banerjee) 13 14 #include "src/dsp/dsp.h" 15 16 #if defined(WEBP_USE_SSE41) 17 #include <smmintrin.h> 18 19 #include <assert.h> 20 #include <string.h> 21 22 #include "src/webp/types.h" 23 #include "src/dsp/cpu.h" 24 #include "src/dsp/yuv.h" 25 #include "src/webp/decode.h" 26 27 #ifdef FANCY_UPSAMPLING 28 29 #if !defined(WEBP_REDUCE_CSP) 30 31 // We compute (9*a + 3*b + 3*c + d + 8) / 16 as follows 32 // u = (9*a + 3*b + 3*c + d + 8) / 16 33 // = (a + (a + 3*b + 3*c + d) / 8 + 1) / 2 34 // = (a + m + 1) / 2 35 // where m = (a + 3*b + 3*c + d) / 8 36 // = ((a + b + c + d) / 2 + b + c) / 4 37 // 38 // Let's say k = (a + b + c + d) / 4. 39 // We can compute k as 40 // k = (s + t + 1) / 2 - ((a^d) | (b^c) | (s^t)) & 1 41 // where s = (a + d + 1) / 2 and t = (b + c + 1) / 2 42 // 43 // Then m can be written as 44 // m = (k + t + 1) / 2 - (((b^c) & (s^t)) | (k^t)) & 1 45 46 // Computes out = (k + in + 1) / 2 - ((ij & (s^t)) | (k^in)) & 1 47 #define GET_M(ij, in, out) do { \ 48 const __m128i tmp0 = _mm_avg_epu8(k, (in)); /* (k + in + 1) / 2 */ \ 49 const __m128i tmp1 = _mm_and_si128((ij), st); /* (ij) & (s^t) */ \ 50 const __m128i tmp2 = _mm_xor_si128(k, (in)); /* (k^in) */ \ 51 const __m128i tmp3 = _mm_or_si128(tmp1, tmp2); /* ((ij) & (s^t)) | (k^in) */\ 52 const __m128i tmp4 = _mm_and_si128(tmp3, one); /* & 1 -> lsb_correction */ \ 53 (out) = _mm_sub_epi8(tmp0, tmp4); /* (k + in + 1) / 2 - lsb_correction */ \ 54 } while (0) 55 56 // pack and store two alternating pixel rows 57 #define PACK_AND_STORE(a, b, da, db, out) do { \ 58 const __m128i t_a = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \ 59 const __m128i t_b = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \ 60 const __m128i t_1 = _mm_unpacklo_epi8(t_a, t_b); \ 61 const __m128i t_2 = _mm_unpackhi_epi8(t_a, t_b); \ 62 _mm_store_si128(((__m128i*)(out)) + 0, t_1); \ 63 _mm_store_si128(((__m128i*)(out)) + 1, t_2); \ 64 } while (0) 65 66 // Loads 17 pixels each from rows r1 and r2 and generates 32 pixels. 67 #define UPSAMPLE_32PIXELS(r1, r2, out) do { \ 68 const __m128i one = _mm_set1_epi8(1); \ 69 const __m128i a = _mm_loadu_si128((const __m128i*)&(r1)[0]); \ 70 const __m128i b = _mm_loadu_si128((const __m128i*)&(r1)[1]); \ 71 const __m128i c = _mm_loadu_si128((const __m128i*)&(r2)[0]); \ 72 const __m128i d = _mm_loadu_si128((const __m128i*)&(r2)[1]); \ 73 \ 74 const __m128i s = _mm_avg_epu8(a, d); /* s = (a + d + 1) / 2 */ \ 75 const __m128i t = _mm_avg_epu8(b, c); /* t = (b + c + 1) / 2 */ \ 76 const __m128i st = _mm_xor_si128(s, t); /* st = s^t */ \ 77 \ 78 const __m128i ad = _mm_xor_si128(a, d); /* ad = a^d */ \ 79 const __m128i bc = _mm_xor_si128(b, c); /* bc = b^c */ \ 80 \ 81 const __m128i t1 = _mm_or_si128(ad, bc); /* (a^d) | (b^c) */ \ 82 const __m128i t2 = _mm_or_si128(t1, st); /* (a^d) | (b^c) | (s^t) */ \ 83 const __m128i t3 = _mm_and_si128(t2, one); /* (a^d) | (b^c) | (s^t) & 1 */ \ 84 const __m128i t4 = _mm_avg_epu8(s, t); \ 85 const __m128i k = _mm_sub_epi8(t4, t3); /* k = (a + b + c + d) / 4 */ \ 86 __m128i diag1, diag2; \ 87 \ 88 GET_M(bc, t, diag1); /* diag1 = (a + 3b + 3c + d) / 8 */ \ 89 GET_M(ad, s, diag2); /* diag2 = (3a + b + c + 3d) / 8 */ \ 90 \ 91 /* pack the alternate pixels */ \ 92 PACK_AND_STORE(a, b, diag1, diag2, (out) + 0); /* store top */ \ 93 PACK_AND_STORE(c, d, diag2, diag1, (out) + 2 * 32); /* store bottom */ \ 94 } while (0) 95 96 // Turn the macro into a function for reducing code-size when non-critical 97 static void Upsample32Pixels_SSE41(const uint8_t* WEBP_RESTRICT const r1, 98 const uint8_t* WEBP_RESTRICT const r2, 99 uint8_t* WEBP_RESTRICT const out) { 100 UPSAMPLE_32PIXELS(r1, r2, out); 101 } 102 103 #define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \ 104 uint8_t r1[17], r2[17]; \ 105 memcpy(r1, (tb), (num_pixels)); \ 106 memcpy(r2, (bb), (num_pixels)); \ 107 /* replicate last byte */ \ 108 memset(r1 + (num_pixels), r1[(num_pixels) - 1], 17 - (num_pixels)); \ 109 memset(r2 + (num_pixels), r2[(num_pixels) - 1], 17 - (num_pixels)); \ 110 /* using the shared function instead of the macro saves ~3k code size */ \ 111 Upsample32Pixels_SSE41(r1, r2, out); \ 112 } 113 114 #define CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, \ 115 top_dst, bottom_dst, cur_x) do { \ 116 FUNC##32_SSE41((top_y) + (cur_x), r_u, r_v, (top_dst) + (cur_x) * (XSTEP)); \ 117 if ((bottom_y) != NULL) { \ 118 FUNC##32_SSE41((bottom_y) + (cur_x), r_u + 64, r_v + 64, \ 119 (bottom_dst) + (cur_x) * (XSTEP)); \ 120 } \ 121 } while (0) 122 123 #define SSE4_UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \ 124 static void FUNC_NAME(const uint8_t* WEBP_RESTRICT top_y, \ 125 const uint8_t* WEBP_RESTRICT bottom_y, \ 126 const uint8_t* WEBP_RESTRICT top_u, \ 127 const uint8_t* WEBP_RESTRICT top_v, \ 128 const uint8_t* WEBP_RESTRICT cur_u, \ 129 const uint8_t* WEBP_RESTRICT cur_v, \ 130 uint8_t* WEBP_RESTRICT top_dst, \ 131 uint8_t* WEBP_RESTRICT bottom_dst, int len) { \ 132 int uv_pos, pos; \ 133 /* 16byte-aligned array to cache reconstructed u and v */ \ 134 uint8_t uv_buf[14 * 32 + 15] = { 0 }; \ 135 uint8_t* const r_u = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~(uintptr_t)15); \ 136 uint8_t* const r_v = r_u + 32; \ 137 \ 138 assert(top_y != NULL); \ 139 { /* Treat the first pixel in regular way */ \ 140 const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \ 141 const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \ 142 const int u0_t = (top_u[0] + u_diag) >> 1; \ 143 const int v0_t = (top_v[0] + v_diag) >> 1; \ 144 FUNC(top_y[0], u0_t, v0_t, top_dst); \ 145 if (bottom_y != NULL) { \ 146 const int u0_b = (cur_u[0] + u_diag) >> 1; \ 147 const int v0_b = (cur_v[0] + v_diag) >> 1; \ 148 FUNC(bottom_y[0], u0_b, v0_b, bottom_dst); \ 149 } \ 150 } \ 151 /* For UPSAMPLE_32PIXELS, 17 u/v values must be read-able for each block */ \ 152 for (pos = 1, uv_pos = 0; pos + 32 + 1 <= len; pos += 32, uv_pos += 16) { \ 153 UPSAMPLE_32PIXELS(top_u + uv_pos, cur_u + uv_pos, r_u); \ 154 UPSAMPLE_32PIXELS(top_v + uv_pos, cur_v + uv_pos, r_v); \ 155 CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, top_dst, bottom_dst, pos); \ 156 } \ 157 if (len > 1) { \ 158 const int left_over = ((len + 1) >> 1) - (pos >> 1); \ 159 uint8_t* const tmp_top_dst = r_u + 4 * 32; \ 160 uint8_t* const tmp_bottom_dst = tmp_top_dst + 4 * 32; \ 161 uint8_t* const tmp_top = tmp_bottom_dst + 4 * 32; \ 162 uint8_t* const tmp_bottom = (bottom_y == NULL) ? NULL : tmp_top + 32; \ 163 assert(left_over > 0); \ 164 UPSAMPLE_LAST_BLOCK(top_u + uv_pos, cur_u + uv_pos, left_over, r_u); \ 165 UPSAMPLE_LAST_BLOCK(top_v + uv_pos, cur_v + uv_pos, left_over, r_v); \ 166 memcpy(tmp_top, top_y + pos, len - pos); \ 167 if (bottom_y != NULL) memcpy(tmp_bottom, bottom_y + pos, len - pos); \ 168 CONVERT2RGB_32(FUNC, XSTEP, tmp_top, tmp_bottom, tmp_top_dst, \ 169 tmp_bottom_dst, 0); \ 170 memcpy(top_dst + pos * (XSTEP), tmp_top_dst, (len - pos) * (XSTEP)); \ 171 if (bottom_y != NULL) { \ 172 memcpy(bottom_dst + pos * (XSTEP), tmp_bottom_dst, \ 173 (len - pos) * (XSTEP)); \ 174 } \ 175 } \ 176 } 177 178 // SSE4 variants of the fancy upsampler. 179 SSE4_UPSAMPLE_FUNC(UpsampleRgbLinePair_SSE41, VP8YuvToRgb, 3) 180 SSE4_UPSAMPLE_FUNC(UpsampleBgrLinePair_SSE41, VP8YuvToBgr, 3) 181 182 #undef GET_M 183 #undef PACK_AND_STORE 184 #undef UPSAMPLE_32PIXELS 185 #undef UPSAMPLE_LAST_BLOCK 186 #undef CONVERT2RGB 187 #undef CONVERT2RGB_32 188 #undef SSE4_UPSAMPLE_FUNC 189 190 #endif // WEBP_REDUCE_CSP 191 192 //------------------------------------------------------------------------------ 193 // Entry point 194 195 extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */]; 196 197 extern void WebPInitUpsamplersSSE41(void); 198 199 WEBP_TSAN_IGNORE_FUNCTION void WebPInitUpsamplersSSE41(void) { 200 #if !defined(WEBP_REDUCE_CSP) 201 WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair_SSE41; 202 WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair_SSE41; 203 #endif // WEBP_REDUCE_CSP 204 } 205 206 #endif // FANCY_UPSAMPLING 207 208 //------------------------------------------------------------------------------ 209 210 extern WebPYUV444Converter WebPYUV444Converters[/* MODE_LAST */]; 211 extern void WebPInitYUV444ConvertersSSE41(void); 212 213 #define YUV444_FUNC(FUNC_NAME, CALL, CALL_C, XSTEP) \ 214 extern void CALL_C(const uint8_t* WEBP_RESTRICT y, \ 215 const uint8_t* WEBP_RESTRICT u, \ 216 const uint8_t* WEBP_RESTRICT v, \ 217 uint8_t* WEBP_RESTRICT dst, int len); \ 218 static void FUNC_NAME(const uint8_t* WEBP_RESTRICT y, \ 219 const uint8_t* WEBP_RESTRICT u, \ 220 const uint8_t* WEBP_RESTRICT v, \ 221 uint8_t* WEBP_RESTRICT dst, int len) { \ 222 int i; \ 223 const int max_len = len & ~31; \ 224 for (i = 0; i < max_len; i += 32) { \ 225 CALL(y + i, u + i, v + i, dst + i * (XSTEP)); \ 226 } \ 227 if (i < len) { /* C-fallback */ \ 228 CALL_C(y + i, u + i, v + i, dst + i * (XSTEP), len - i); \ 229 } \ 230 } 231 232 #if !defined(WEBP_REDUCE_CSP) 233 YUV444_FUNC(Yuv444ToRgb_SSE41, VP8YuvToRgb32_SSE41, WebPYuv444ToRgb_C, 3) 234 YUV444_FUNC(Yuv444ToBgr_SSE41, VP8YuvToBgr32_SSE41, WebPYuv444ToBgr_C, 3) 235 #endif // WEBP_REDUCE_CSP 236 237 WEBP_TSAN_IGNORE_FUNCTION void WebPInitYUV444ConvertersSSE41(void) { 238 #if !defined(WEBP_REDUCE_CSP) 239 WebPYUV444Converters[MODE_RGB] = Yuv444ToRgb_SSE41; 240 WebPYUV444Converters[MODE_BGR] = Yuv444ToBgr_SSE41; 241 #endif // WEBP_REDUCE_CSP 242 } 243 244 #else 245 246 WEBP_DSP_INIT_STUB(WebPInitYUV444ConvertersSSE41) 247 248 #endif // WEBP_USE_SSE41 249 250 #if !(defined(FANCY_UPSAMPLING) && defined(WEBP_USE_SSE41)) 251 WEBP_DSP_INIT_STUB(WebPInitUpsamplersSSE41) 252 #endif