jidctfst-sse2.asm (20620B)
1 ; 2 ; jidctfst.asm - fast integer IDCT (64-bit SSE2) 3 ; 4 ; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB 5 ; Copyright (C) 2009, 2016, 2024, D. R. Commander. 6 ; Copyright (C) 2018, Matthias Räncker. 7 ; Copyright (C) 2023, Aliaksiej Kandracienka. 8 ; 9 ; Based on the x86 SIMD extension for IJG JPEG library 10 ; Copyright (C) 1999-2006, MIYASAKA Masaru. 11 ; For conditions of distribution and use, see copyright notice in jsimdext.inc 12 ; 13 ; This file should be assembled with NASM (Netwide Assembler) or Yasm. 14 ; 15 ; This file contains a fast, not so accurate integer implementation of 16 ; the inverse DCT (Discrete Cosine Transform). The following code is 17 ; based directly on the IJG's original jidctfst.c; see the jidctfst.c 18 ; for more details. 19 20 %include "jsimdext.inc" 21 %include "jdct.inc" 22 23 ; -------------------------------------------------------------------------- 24 25 %define CONST_BITS 8 ; 14 is also OK. 26 %define PASS1_BITS 2 27 28 %if IFAST_SCALE_BITS != PASS1_BITS 29 %error "'IFAST_SCALE_BITS' must be equal to 'PASS1_BITS'." 30 %endif 31 32 %if CONST_BITS == 8 33 F_1_082 equ 277 ; FIX(1.082392200) 34 F_1_414 equ 362 ; FIX(1.414213562) 35 F_1_847 equ 473 ; FIX(1.847759065) 36 F_2_613 equ 669 ; FIX(2.613125930) 37 F_1_613 equ (F_2_613 - 256) ; FIX(2.613125930) - FIX(1) 38 %else 39 ; NASM cannot do compile-time arithmetic on floating-point constants. 40 %define DESCALE(x, n) (((x) + (1 << ((n) - 1))) >> (n)) 41 F_1_082 equ DESCALE(1162209775, 30 - CONST_BITS) ; FIX(1.082392200) 42 F_1_414 equ DESCALE(1518500249, 30 - CONST_BITS) ; FIX(1.414213562) 43 F_1_847 equ DESCALE(1984016188, 30 - CONST_BITS) ; FIX(1.847759065) 44 F_2_613 equ DESCALE(2805822602, 30 - CONST_BITS) ; FIX(2.613125930) 45 F_1_613 equ (F_2_613 - (1 << CONST_BITS)) ; FIX(2.613125930) - FIX(1) 46 %endif 47 48 ; -------------------------------------------------------------------------- 49 SECTION SEG_CONST 50 51 ; PRE_MULTIPLY_SCALE_BITS <= 2 (to avoid overflow) 52 ; CONST_BITS + CONST_SHIFT + PRE_MULTIPLY_SCALE_BITS == 16 (for pmulhw) 53 54 %define PRE_MULTIPLY_SCALE_BITS 2 55 %define CONST_SHIFT (16 - PRE_MULTIPLY_SCALE_BITS - CONST_BITS) 56 57 ALIGNZ 32 58 GLOBAL_DATA(jconst_idct_ifast_sse2) 59 60 EXTN(jconst_idct_ifast_sse2): 61 62 PW_F1414 times 8 dw F_1_414 << CONST_SHIFT 63 PW_F1847 times 8 dw F_1_847 << CONST_SHIFT 64 PW_MF1613 times 8 dw -F_1_613 << CONST_SHIFT 65 PW_F1082 times 8 dw F_1_082 << CONST_SHIFT 66 PB_CENTERJSAMP times 16 db CENTERJSAMPLE 67 68 ALIGNZ 32 69 70 ; -------------------------------------------------------------------------- 71 SECTION SEG_TEXT 72 BITS 64 73 ; 74 ; Perform dequantization and inverse DCT on one block of coefficients. 75 ; 76 ; GLOBAL(void) 77 ; jsimd_idct_ifast_sse2(void *dct_table, JCOEFPTR coef_block, 78 ; JSAMPARRAY output_buf, JDIMENSION output_col) 79 ; 80 81 ; r10 = jpeg_component_info *compptr 82 ; r11 = JCOEFPTR coef_block 83 ; r12 = JSAMPARRAY output_buf 84 ; r13d = JDIMENSION output_col 85 86 %define wk(i) r15 - (WK_NUM - (i)) * SIZEOF_XMMWORD 87 ; xmmword wk[WK_NUM] 88 %define WK_NUM 2 89 90 align 32 91 GLOBAL_FUNCTION(jsimd_idct_ifast_sse2) 92 93 EXTN(jsimd_idct_ifast_sse2): 94 ENDBR64 95 push rbp 96 mov rbp, rsp 97 push r15 98 and rsp, byte (-SIZEOF_XMMWORD) ; align to 128 bits 99 ; Allocate stack space for wk array. r15 is used to access it. 100 mov r15, rsp 101 sub rsp, byte (SIZEOF_XMMWORD * WK_NUM) 102 COLLECT_ARGS 4 103 104 ; ---- Pass 1: process columns from input. 105 106 mov rdx, r10 ; quantptr 107 mov rsi, r11 ; inptr 108 109 %ifndef NO_ZERO_COLUMN_TEST_IFAST_SSE2 110 mov eax, dword [DWBLOCK(1,0,rsi,SIZEOF_JCOEF)] 111 or eax, dword [DWBLOCK(2,0,rsi,SIZEOF_JCOEF)] 112 jnz near .columnDCT 113 114 movdqa xmm0, XMMWORD [XMMBLOCK(1,0,rsi,SIZEOF_JCOEF)] 115 movdqa xmm1, XMMWORD [XMMBLOCK(2,0,rsi,SIZEOF_JCOEF)] 116 por xmm0, XMMWORD [XMMBLOCK(3,0,rsi,SIZEOF_JCOEF)] 117 por xmm1, XMMWORD [XMMBLOCK(4,0,rsi,SIZEOF_JCOEF)] 118 por xmm0, XMMWORD [XMMBLOCK(5,0,rsi,SIZEOF_JCOEF)] 119 por xmm1, XMMWORD [XMMBLOCK(6,0,rsi,SIZEOF_JCOEF)] 120 por xmm0, XMMWORD [XMMBLOCK(7,0,rsi,SIZEOF_JCOEF)] 121 por xmm1, xmm0 122 packsswb xmm1, xmm1 123 packsswb xmm1, xmm1 124 movd eax, xmm1 125 test rax, rax 126 jnz short .columnDCT 127 128 ; -- AC terms all zero 129 130 movdqa xmm0, XMMWORD [XMMBLOCK(0,0,rsi,SIZEOF_JCOEF)] 131 pmullw xmm0, XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_ISLOW_MULT_TYPE)] 132 133 movdqa xmm7, xmm0 ; xmm0=in0=(00 01 02 03 04 05 06 07) 134 punpcklwd xmm0, xmm0 ; xmm0=(00 00 01 01 02 02 03 03) 135 punpckhwd xmm7, xmm7 ; xmm7=(04 04 05 05 06 06 07 07) 136 137 pshufd xmm6, xmm0, 0x00 ; xmm6=col0=(00 00 00 00 00 00 00 00) 138 pshufd xmm2, xmm0, 0x55 ; xmm2=col1=(01 01 01 01 01 01 01 01) 139 pshufd xmm5, xmm0, 0xAA ; xmm5=col2=(02 02 02 02 02 02 02 02) 140 pshufd xmm0, xmm0, 0xFF ; xmm0=col3=(03 03 03 03 03 03 03 03) 141 pshufd xmm1, xmm7, 0x00 ; xmm1=col4=(04 04 04 04 04 04 04 04) 142 pshufd xmm4, xmm7, 0x55 ; xmm4=col5=(05 05 05 05 05 05 05 05) 143 pshufd xmm3, xmm7, 0xAA ; xmm3=col6=(06 06 06 06 06 06 06 06) 144 pshufd xmm7, xmm7, 0xFF ; xmm7=col7=(07 07 07 07 07 07 07 07) 145 146 movdqa XMMWORD [wk(0)], xmm2 ; wk(0)=col1 147 movdqa XMMWORD [wk(1)], xmm0 ; wk(1)=col3 148 jmp near .column_end 149 %endif 150 .columnDCT: 151 152 ; -- Even part 153 154 movdqa xmm0, XMMWORD [XMMBLOCK(0,0,rsi,SIZEOF_JCOEF)] 155 movdqa xmm1, XMMWORD [XMMBLOCK(2,0,rsi,SIZEOF_JCOEF)] 156 pmullw xmm0, XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_IFAST_MULT_TYPE)] 157 pmullw xmm1, XMMWORD [XMMBLOCK(2,0,rdx,SIZEOF_IFAST_MULT_TYPE)] 158 movdqa xmm2, XMMWORD [XMMBLOCK(4,0,rsi,SIZEOF_JCOEF)] 159 movdqa xmm3, XMMWORD [XMMBLOCK(6,0,rsi,SIZEOF_JCOEF)] 160 pmullw xmm2, XMMWORD [XMMBLOCK(4,0,rdx,SIZEOF_IFAST_MULT_TYPE)] 161 pmullw xmm3, XMMWORD [XMMBLOCK(6,0,rdx,SIZEOF_IFAST_MULT_TYPE)] 162 163 movdqa xmm4, xmm0 164 movdqa xmm5, xmm1 165 psubw xmm0, xmm2 ; xmm0=tmp11 166 psubw xmm1, xmm3 167 paddw xmm4, xmm2 ; xmm4=tmp10 168 paddw xmm5, xmm3 ; xmm5=tmp13 169 170 psllw xmm1, PRE_MULTIPLY_SCALE_BITS 171 pmulhw xmm1, [rel PW_F1414] 172 psubw xmm1, xmm5 ; xmm1=tmp12 173 174 movdqa xmm6, xmm4 175 movdqa xmm7, xmm0 176 psubw xmm4, xmm5 ; xmm4=tmp3 177 psubw xmm0, xmm1 ; xmm0=tmp2 178 paddw xmm6, xmm5 ; xmm6=tmp0 179 paddw xmm7, xmm1 ; xmm7=tmp1 180 181 movdqa XMMWORD [wk(1)], xmm4 ; wk(1)=tmp3 182 movdqa XMMWORD [wk(0)], xmm0 ; wk(0)=tmp2 183 184 ; -- Odd part 185 186 movdqa xmm2, XMMWORD [XMMBLOCK(1,0,rsi,SIZEOF_JCOEF)] 187 movdqa xmm3, XMMWORD [XMMBLOCK(3,0,rsi,SIZEOF_JCOEF)] 188 pmullw xmm2, XMMWORD [XMMBLOCK(1,0,rdx,SIZEOF_IFAST_MULT_TYPE)] 189 pmullw xmm3, XMMWORD [XMMBLOCK(3,0,rdx,SIZEOF_IFAST_MULT_TYPE)] 190 movdqa xmm5, XMMWORD [XMMBLOCK(5,0,rsi,SIZEOF_JCOEF)] 191 movdqa xmm1, XMMWORD [XMMBLOCK(7,0,rsi,SIZEOF_JCOEF)] 192 pmullw xmm5, XMMWORD [XMMBLOCK(5,0,rdx,SIZEOF_IFAST_MULT_TYPE)] 193 pmullw xmm1, XMMWORD [XMMBLOCK(7,0,rdx,SIZEOF_IFAST_MULT_TYPE)] 194 195 movdqa xmm4, xmm2 196 movdqa xmm0, xmm5 197 psubw xmm2, xmm1 ; xmm2=z12 198 psubw xmm5, xmm3 ; xmm5=z10 199 paddw xmm4, xmm1 ; xmm4=z11 200 paddw xmm0, xmm3 ; xmm0=z13 201 202 movdqa xmm1, xmm5 ; xmm1=z10(unscaled) 203 psllw xmm2, PRE_MULTIPLY_SCALE_BITS 204 psllw xmm5, PRE_MULTIPLY_SCALE_BITS 205 206 movdqa xmm3, xmm4 207 psubw xmm4, xmm0 208 paddw xmm3, xmm0 ; xmm3=tmp7 209 210 psllw xmm4, PRE_MULTIPLY_SCALE_BITS 211 pmulhw xmm4, [rel PW_F1414] ; xmm4=tmp11 212 213 ; To avoid overflow... 214 ; 215 ; (Original) 216 ; tmp12 = -2.613125930 * z10 + z5; 217 ; 218 ; (This implementation) 219 ; tmp12 = (-1.613125930 - 1) * z10 + z5; 220 ; = -1.613125930 * z10 - z10 + z5; 221 222 movdqa xmm0, xmm5 223 paddw xmm5, xmm2 224 pmulhw xmm5, [rel PW_F1847] ; xmm5=z5 225 pmulhw xmm0, [rel PW_MF1613] 226 pmulhw xmm2, [rel PW_F1082] 227 psubw xmm0, xmm1 228 psubw xmm2, xmm5 ; xmm2=tmp10 229 paddw xmm0, xmm5 ; xmm0=tmp12 230 231 ; -- Final output stage 232 233 psubw xmm0, xmm3 ; xmm0=tmp6 234 movdqa xmm1, xmm6 235 movdqa xmm5, xmm7 236 paddw xmm6, xmm3 ; xmm6=data0=(00 01 02 03 04 05 06 07) 237 paddw xmm7, xmm0 ; xmm7=data1=(10 11 12 13 14 15 16 17) 238 psubw xmm1, xmm3 ; xmm1=data7=(70 71 72 73 74 75 76 77) 239 psubw xmm5, xmm0 ; xmm5=data6=(60 61 62 63 64 65 66 67) 240 psubw xmm4, xmm0 ; xmm4=tmp5 241 242 movdqa xmm3, xmm6 ; transpose coefficients(phase 1) 243 punpcklwd xmm6, xmm7 ; xmm6=(00 10 01 11 02 12 03 13) 244 punpckhwd xmm3, xmm7 ; xmm3=(04 14 05 15 06 16 07 17) 245 movdqa xmm0, xmm5 ; transpose coefficients(phase 1) 246 punpcklwd xmm5, xmm1 ; xmm5=(60 70 61 71 62 72 63 73) 247 punpckhwd xmm0, xmm1 ; xmm0=(64 74 65 75 66 76 67 77) 248 249 movdqa xmm7, XMMWORD [wk(0)] ; xmm7=tmp2 250 movdqa xmm1, XMMWORD [wk(1)] ; xmm1=tmp3 251 252 movdqa XMMWORD [wk(0)], xmm5 ; wk(0)=(60 70 61 71 62 72 63 73) 253 movdqa XMMWORD [wk(1)], xmm0 ; wk(1)=(64 74 65 75 66 76 67 77) 254 255 paddw xmm2, xmm4 ; xmm2=tmp4 256 movdqa xmm5, xmm7 257 movdqa xmm0, xmm1 258 paddw xmm7, xmm4 ; xmm7=data2=(20 21 22 23 24 25 26 27) 259 paddw xmm1, xmm2 ; xmm1=data4=(40 41 42 43 44 45 46 47) 260 psubw xmm5, xmm4 ; xmm5=data5=(50 51 52 53 54 55 56 57) 261 psubw xmm0, xmm2 ; xmm0=data3=(30 31 32 33 34 35 36 37) 262 263 movdqa xmm4, xmm7 ; transpose coefficients(phase 1) 264 punpcklwd xmm7, xmm0 ; xmm7=(20 30 21 31 22 32 23 33) 265 punpckhwd xmm4, xmm0 ; xmm4=(24 34 25 35 26 36 27 37) 266 movdqa xmm2, xmm1 ; transpose coefficients(phase 1) 267 punpcklwd xmm1, xmm5 ; xmm1=(40 50 41 51 42 52 43 53) 268 punpckhwd xmm2, xmm5 ; xmm2=(44 54 45 55 46 56 47 57) 269 270 movdqa xmm0, xmm3 ; transpose coefficients(phase 2) 271 punpckldq xmm3, xmm4 ; xmm3=(04 14 24 34 05 15 25 35) 272 punpckhdq xmm0, xmm4 ; xmm0=(06 16 26 36 07 17 27 37) 273 movdqa xmm5, xmm6 ; transpose coefficients(phase 2) 274 punpckldq xmm6, xmm7 ; xmm6=(00 10 20 30 01 11 21 31) 275 punpckhdq xmm5, xmm7 ; xmm5=(02 12 22 32 03 13 23 33) 276 277 movdqa xmm4, XMMWORD [wk(0)] ; xmm4=(60 70 61 71 62 72 63 73) 278 movdqa xmm7, XMMWORD [wk(1)] ; xmm7=(64 74 65 75 66 76 67 77) 279 280 movdqa XMMWORD [wk(0)], xmm3 ; wk(0)=(04 14 24 34 05 15 25 35) 281 movdqa XMMWORD [wk(1)], xmm0 ; wk(1)=(06 16 26 36 07 17 27 37) 282 283 movdqa xmm3, xmm1 ; transpose coefficients(phase 2) 284 punpckldq xmm1, xmm4 ; xmm1=(40 50 60 70 41 51 61 71) 285 punpckhdq xmm3, xmm4 ; xmm3=(42 52 62 72 43 53 63 73) 286 movdqa xmm0, xmm2 ; transpose coefficients(phase 2) 287 punpckldq xmm2, xmm7 ; xmm2=(44 54 64 74 45 55 65 75) 288 punpckhdq xmm0, xmm7 ; xmm0=(46 56 66 76 47 57 67 77) 289 290 movdqa xmm4, xmm6 ; transpose coefficients(phase 3) 291 punpcklqdq xmm6, xmm1 ; xmm6=col0=(00 10 20 30 40 50 60 70) 292 punpckhqdq xmm4, xmm1 ; xmm4=col1=(01 11 21 31 41 51 61 71) 293 movdqa xmm7, xmm5 ; transpose coefficients(phase 3) 294 punpcklqdq xmm5, xmm3 ; xmm5=col2=(02 12 22 32 42 52 62 72) 295 punpckhqdq xmm7, xmm3 ; xmm7=col3=(03 13 23 33 43 53 63 73) 296 297 movdqa xmm1, XMMWORD [wk(0)] ; xmm1=(04 14 24 34 05 15 25 35) 298 movdqa xmm3, XMMWORD [wk(1)] ; xmm3=(06 16 26 36 07 17 27 37) 299 300 movdqa XMMWORD [wk(0)], xmm4 ; wk(0)=col1 301 movdqa XMMWORD [wk(1)], xmm7 ; wk(1)=col3 302 303 movdqa xmm4, xmm1 ; transpose coefficients(phase 3) 304 punpcklqdq xmm1, xmm2 ; xmm1=col4=(04 14 24 34 44 54 64 74) 305 punpckhqdq xmm4, xmm2 ; xmm4=col5=(05 15 25 35 45 55 65 75) 306 movdqa xmm7, xmm3 ; transpose coefficients(phase 3) 307 punpcklqdq xmm3, xmm0 ; xmm3=col6=(06 16 26 36 46 56 66 76) 308 punpckhqdq xmm7, xmm0 ; xmm7=col7=(07 17 27 37 47 57 67 77) 309 .column_end: 310 311 ; -- Prefetch the next coefficient block 312 313 prefetchnta [rsi + DCTSIZE2*SIZEOF_JCOEF + 0*32] 314 prefetchnta [rsi + DCTSIZE2*SIZEOF_JCOEF + 1*32] 315 prefetchnta [rsi + DCTSIZE2*SIZEOF_JCOEF + 2*32] 316 prefetchnta [rsi + DCTSIZE2*SIZEOF_JCOEF + 3*32] 317 318 ; ---- Pass 2: process rows from work array, store into output array. 319 320 mov rdi, r12 ; (JSAMPROW *) 321 mov eax, r13d 322 323 ; -- Even part 324 325 ; xmm6=col0, xmm5=col2, xmm1=col4, xmm3=col6 326 327 movdqa xmm2, xmm6 328 movdqa xmm0, xmm5 329 psubw xmm6, xmm1 ; xmm6=tmp11 330 psubw xmm5, xmm3 331 paddw xmm2, xmm1 ; xmm2=tmp10 332 paddw xmm0, xmm3 ; xmm0=tmp13 333 334 psllw xmm5, PRE_MULTIPLY_SCALE_BITS 335 pmulhw xmm5, [rel PW_F1414] 336 psubw xmm5, xmm0 ; xmm5=tmp12 337 338 movdqa xmm1, xmm2 339 movdqa xmm3, xmm6 340 psubw xmm2, xmm0 ; xmm2=tmp3 341 psubw xmm6, xmm5 ; xmm6=tmp2 342 paddw xmm1, xmm0 ; xmm1=tmp0 343 paddw xmm3, xmm5 ; xmm3=tmp1 344 345 movdqa xmm0, XMMWORD [wk(0)] ; xmm0=col1 346 movdqa xmm5, XMMWORD [wk(1)] ; xmm5=col3 347 348 movdqa XMMWORD [wk(0)], xmm2 ; wk(0)=tmp3 349 movdqa XMMWORD [wk(1)], xmm6 ; wk(1)=tmp2 350 351 ; -- Odd part 352 353 ; xmm0=col1, xmm5=col3, xmm4=col5, xmm7=col7 354 355 movdqa xmm2, xmm0 356 movdqa xmm6, xmm4 357 psubw xmm0, xmm7 ; xmm0=z12 358 psubw xmm4, xmm5 ; xmm4=z10 359 paddw xmm2, xmm7 ; xmm2=z11 360 paddw xmm6, xmm5 ; xmm6=z13 361 362 movdqa xmm7, xmm4 ; xmm7=z10(unscaled) 363 psllw xmm0, PRE_MULTIPLY_SCALE_BITS 364 psllw xmm4, PRE_MULTIPLY_SCALE_BITS 365 366 movdqa xmm5, xmm2 367 psubw xmm2, xmm6 368 paddw xmm5, xmm6 ; xmm5=tmp7 369 370 psllw xmm2, PRE_MULTIPLY_SCALE_BITS 371 pmulhw xmm2, [rel PW_F1414] ; xmm2=tmp11 372 373 ; To avoid overflow... 374 ; 375 ; (Original) 376 ; tmp12 = -2.613125930 * z10 + z5; 377 ; 378 ; (This implementation) 379 ; tmp12 = (-1.613125930 - 1) * z10 + z5; 380 ; = -1.613125930 * z10 - z10 + z5; 381 382 movdqa xmm6, xmm4 383 paddw xmm4, xmm0 384 pmulhw xmm4, [rel PW_F1847] ; xmm4=z5 385 pmulhw xmm6, [rel PW_MF1613] 386 pmulhw xmm0, [rel PW_F1082] 387 psubw xmm6, xmm7 388 psubw xmm0, xmm4 ; xmm0=tmp10 389 paddw xmm6, xmm4 ; xmm6=tmp12 390 391 ; -- Final output stage 392 393 psubw xmm6, xmm5 ; xmm6=tmp6 394 movdqa xmm7, xmm1 395 movdqa xmm4, xmm3 396 paddw xmm1, xmm5 ; xmm1=data0=(00 10 20 30 40 50 60 70) 397 paddw xmm3, xmm6 ; xmm3=data1=(01 11 21 31 41 51 61 71) 398 psraw xmm1, (PASS1_BITS+3) ; descale 399 psraw xmm3, (PASS1_BITS+3) ; descale 400 psubw xmm7, xmm5 ; xmm7=data7=(07 17 27 37 47 57 67 77) 401 psubw xmm4, xmm6 ; xmm4=data6=(06 16 26 36 46 56 66 76) 402 psraw xmm7, (PASS1_BITS+3) ; descale 403 psraw xmm4, (PASS1_BITS+3) ; descale 404 psubw xmm2, xmm6 ; xmm2=tmp5 405 406 packsswb xmm1, xmm4 ; xmm1=(00 10 20 30 40 50 60 70 06 16 26 36 46 56 66 76) 407 packsswb xmm3, xmm7 ; xmm3=(01 11 21 31 41 51 61 71 07 17 27 37 47 57 67 77) 408 409 movdqa xmm5, XMMWORD [wk(1)] ; xmm5=tmp2 410 movdqa xmm6, XMMWORD [wk(0)] ; xmm6=tmp3 411 412 paddw xmm0, xmm2 ; xmm0=tmp4 413 movdqa xmm4, xmm5 414 movdqa xmm7, xmm6 415 paddw xmm5, xmm2 ; xmm5=data2=(02 12 22 32 42 52 62 72) 416 paddw xmm6, xmm0 ; xmm6=data4=(04 14 24 34 44 54 64 74) 417 psraw xmm5, (PASS1_BITS+3) ; descale 418 psraw xmm6, (PASS1_BITS+3) ; descale 419 psubw xmm4, xmm2 ; xmm4=data5=(05 15 25 35 45 55 65 75) 420 psubw xmm7, xmm0 ; xmm7=data3=(03 13 23 33 43 53 63 73) 421 psraw xmm4, (PASS1_BITS+3) ; descale 422 psraw xmm7, (PASS1_BITS+3) ; descale 423 424 movdqa xmm2, [rel PB_CENTERJSAMP] ; xmm2=[rel PB_CENTERJSAMP] 425 426 packsswb xmm5, xmm6 ; xmm5=(02 12 22 32 42 52 62 72 04 14 24 34 44 54 64 74) 427 packsswb xmm7, xmm4 ; xmm7=(03 13 23 33 43 53 63 73 05 15 25 35 45 55 65 75) 428 429 paddb xmm1, xmm2 430 paddb xmm3, xmm2 431 paddb xmm5, xmm2 432 paddb xmm7, xmm2 433 434 movdqa xmm0, xmm1 ; transpose coefficients(phase 1) 435 punpcklbw xmm1, xmm3 ; xmm1=(00 01 10 11 20 21 30 31 40 41 50 51 60 61 70 71) 436 punpckhbw xmm0, xmm3 ; xmm0=(06 07 16 17 26 27 36 37 46 47 56 57 66 67 76 77) 437 movdqa xmm6, xmm5 ; transpose coefficients(phase 1) 438 punpcklbw xmm5, xmm7 ; xmm5=(02 03 12 13 22 23 32 33 42 43 52 53 62 63 72 73) 439 punpckhbw xmm6, xmm7 ; xmm6=(04 05 14 15 24 25 34 35 44 45 54 55 64 65 74 75) 440 441 movdqa xmm4, xmm1 ; transpose coefficients(phase 2) 442 punpcklwd xmm1, xmm5 ; xmm1=(00 01 02 03 10 11 12 13 20 21 22 23 30 31 32 33) 443 punpckhwd xmm4, xmm5 ; xmm4=(40 41 42 43 50 51 52 53 60 61 62 63 70 71 72 73) 444 movdqa xmm2, xmm6 ; transpose coefficients(phase 2) 445 punpcklwd xmm6, xmm0 ; xmm6=(04 05 06 07 14 15 16 17 24 25 26 27 34 35 36 37) 446 punpckhwd xmm2, xmm0 ; xmm2=(44 45 46 47 54 55 56 57 64 65 66 67 74 75 76 77) 447 448 movdqa xmm3, xmm1 ; transpose coefficients(phase 3) 449 punpckldq xmm1, xmm6 ; xmm1=(00 01 02 03 04 05 06 07 10 11 12 13 14 15 16 17) 450 punpckhdq xmm3, xmm6 ; xmm3=(20 21 22 23 24 25 26 27 30 31 32 33 34 35 36 37) 451 movdqa xmm7, xmm4 ; transpose coefficients(phase 3) 452 punpckldq xmm4, xmm2 ; xmm4=(40 41 42 43 44 45 46 47 50 51 52 53 54 55 56 57) 453 punpckhdq xmm7, xmm2 ; xmm7=(60 61 62 63 64 65 66 67 70 71 72 73 74 75 76 77) 454 455 pshufd xmm5, xmm1, 0x4E ; xmm5=(10 11 12 13 14 15 16 17 00 01 02 03 04 05 06 07) 456 pshufd xmm0, xmm3, 0x4E ; xmm0=(30 31 32 33 34 35 36 37 20 21 22 23 24 25 26 27) 457 pshufd xmm6, xmm4, 0x4E ; xmm6=(50 51 52 53 54 55 56 57 40 41 42 43 44 45 46 47) 458 pshufd xmm2, xmm7, 0x4E ; xmm2=(70 71 72 73 74 75 76 77 60 61 62 63 64 65 66 67) 459 460 mov rdxp, JSAMPROW [rdi+0*SIZEOF_JSAMPROW] 461 mov rsip, JSAMPROW [rdi+2*SIZEOF_JSAMPROW] 462 movq XMM_MMWORD [rdx+rax*SIZEOF_JSAMPLE], xmm1 463 movq XMM_MMWORD [rsi+rax*SIZEOF_JSAMPLE], xmm3 464 mov rdxp, JSAMPROW [rdi+4*SIZEOF_JSAMPROW] 465 mov rsip, JSAMPROW [rdi+6*SIZEOF_JSAMPROW] 466 movq XMM_MMWORD [rdx+rax*SIZEOF_JSAMPLE], xmm4 467 movq XMM_MMWORD [rsi+rax*SIZEOF_JSAMPLE], xmm7 468 469 mov rdxp, JSAMPROW [rdi+1*SIZEOF_JSAMPROW] 470 mov rsip, JSAMPROW [rdi+3*SIZEOF_JSAMPROW] 471 movq XMM_MMWORD [rdx+rax*SIZEOF_JSAMPLE], xmm5 472 movq XMM_MMWORD [rsi+rax*SIZEOF_JSAMPLE], xmm0 473 mov rdxp, JSAMPROW [rdi+5*SIZEOF_JSAMPROW] 474 mov rsip, JSAMPROW [rdi+7*SIZEOF_JSAMPROW] 475 movq XMM_MMWORD [rdx+rax*SIZEOF_JSAMPLE], xmm6 476 movq XMM_MMWORD [rsi+rax*SIZEOF_JSAMPLE], xmm2 477 478 UNCOLLECT_ARGS 4 479 lea rsp, [rbp-8] 480 pop r15 481 pop rbp 482 ret 483 ret 484 485 ; For some reason, the OS X linker does not honor the request to align the 486 ; segment unless we do this. 487 align 32