Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | ; |
michael@0 | 2 | ; jiss2fst-64.asm - fast integer IDCT (64-bit SSE2) |
michael@0 | 3 | ; |
michael@0 | 4 | ; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB |
michael@0 | 5 | ; Copyright 2009 D. R. Commander |
michael@0 | 6 | ; |
michael@0 | 7 | ; Based on |
michael@0 | 8 | ; x86 SIMD extension for IJG JPEG library |
michael@0 | 9 | ; Copyright (C) 1999-2006, MIYASAKA Masaru. |
michael@0 | 10 | ; For conditions of distribution and use, see copyright notice in jsimdext.inc |
michael@0 | 11 | ; |
michael@0 | 12 | ; This file should be assembled with NASM (Netwide Assembler), |
michael@0 | 13 | ; can *not* be assembled with Microsoft's MASM or any compatible |
michael@0 | 14 | ; assembler (including Borland's Turbo Assembler). |
michael@0 | 15 | ; NASM is available from http://nasm.sourceforge.net/ or |
michael@0 | 16 | ; http://sourceforge.net/projecpt/showfiles.php?group_id=6208 |
michael@0 | 17 | ; |
michael@0 | 18 | ; This file contains a fast, not so accurate integer implementation of |
michael@0 | 19 | ; the inverse DCT (Discrete Cosine Transform). The following code is |
michael@0 | 20 | ; based directly on the IJG's original jidctfst.c; see the jidctfst.c |
michael@0 | 21 | ; for more details. |
michael@0 | 22 | ; |
michael@0 | 23 | ; [TAB8] |
michael@0 | 24 | |
michael@0 | 25 | %include "jsimdext.inc" |
michael@0 | 26 | %include "jdct.inc" |
michael@0 | 27 | |
michael@0 | 28 | ; -------------------------------------------------------------------------- |
michael@0 | 29 | |
michael@0 | 30 | %define CONST_BITS 8 ; 14 is also OK. |
michael@0 | 31 | %define PASS1_BITS 2 |
michael@0 | 32 | |
michael@0 | 33 | %if IFAST_SCALE_BITS != PASS1_BITS |
michael@0 | 34 | %error "'IFAST_SCALE_BITS' must be equal to 'PASS1_BITS'." |
michael@0 | 35 | %endif |
michael@0 | 36 | |
michael@0 | 37 | %if CONST_BITS == 8 |
michael@0 | 38 | F_1_082 equ 277 ; FIX(1.082392200) |
michael@0 | 39 | F_1_414 equ 362 ; FIX(1.414213562) |
michael@0 | 40 | F_1_847 equ 473 ; FIX(1.847759065) |
michael@0 | 41 | F_2_613 equ 669 ; FIX(2.613125930) |
michael@0 | 42 | F_1_613 equ (F_2_613 - 256) ; FIX(2.613125930) - FIX(1) |
michael@0 | 43 | %else |
michael@0 | 44 | ; NASM cannot do compile-time arithmetic on floating-point constants. |
michael@0 | 45 | %define DESCALE(x,n) (((x)+(1<<((n)-1)))>>(n)) |
michael@0 | 46 | F_1_082 equ DESCALE(1162209775,30-CONST_BITS) ; FIX(1.082392200) |
michael@0 | 47 | F_1_414 equ DESCALE(1518500249,30-CONST_BITS) ; FIX(1.414213562) |
michael@0 | 48 | F_1_847 equ DESCALE(1984016188,30-CONST_BITS) ; FIX(1.847759065) |
michael@0 | 49 | F_2_613 equ DESCALE(2805822602,30-CONST_BITS) ; FIX(2.613125930) |
michael@0 | 50 | F_1_613 equ (F_2_613 - (1 << CONST_BITS)) ; FIX(2.613125930) - FIX(1) |
michael@0 | 51 | %endif |
michael@0 | 52 | |
michael@0 | 53 | ; -------------------------------------------------------------------------- |
michael@0 | 54 | SECTION SEG_CONST |
michael@0 | 55 | |
michael@0 | 56 | ; PRE_MULTIPLY_SCALE_BITS <= 2 (to avoid overflow) |
michael@0 | 57 | ; CONST_BITS + CONST_SHIFT + PRE_MULTIPLY_SCALE_BITS == 16 (for pmulhw) |
michael@0 | 58 | |
michael@0 | 59 | %define PRE_MULTIPLY_SCALE_BITS 2 |
michael@0 | 60 | %define CONST_SHIFT (16 - PRE_MULTIPLY_SCALE_BITS - CONST_BITS) |
michael@0 | 61 | |
michael@0 | 62 | alignz 16 |
michael@0 | 63 | global EXTN(jconst_idct_ifast_sse2) |
michael@0 | 64 | |
michael@0 | 65 | EXTN(jconst_idct_ifast_sse2): |
michael@0 | 66 | |
michael@0 | 67 | PW_F1414 times 8 dw F_1_414 << CONST_SHIFT |
michael@0 | 68 | PW_F1847 times 8 dw F_1_847 << CONST_SHIFT |
michael@0 | 69 | PW_MF1613 times 8 dw -F_1_613 << CONST_SHIFT |
michael@0 | 70 | PW_F1082 times 8 dw F_1_082 << CONST_SHIFT |
michael@0 | 71 | PB_CENTERJSAMP times 16 db CENTERJSAMPLE |
michael@0 | 72 | |
michael@0 | 73 | alignz 16 |
michael@0 | 74 | |
michael@0 | 75 | ; -------------------------------------------------------------------------- |
michael@0 | 76 | SECTION SEG_TEXT |
michael@0 | 77 | BITS 64 |
michael@0 | 78 | ; |
michael@0 | 79 | ; Perform dequantization and inverse DCT on one block of coefficients. |
michael@0 | 80 | ; |
michael@0 | 81 | ; GLOBAL(void) |
michael@0 | 82 | ; jsimd_idct_ifast_sse2 (void * dct_table, JCOEFPTR coef_block, |
michael@0 | 83 | ; JSAMPARRAY output_buf, JDIMENSION output_col) |
michael@0 | 84 | ; |
michael@0 | 85 | |
michael@0 | 86 | ; r10 = jpeg_component_info * compptr |
michael@0 | 87 | ; r11 = JCOEFPTR coef_block |
michael@0 | 88 | ; r12 = JSAMPARRAY output_buf |
michael@0 | 89 | ; r13 = JDIMENSION output_col |
michael@0 | 90 | |
michael@0 | 91 | %define original_rbp rbp+0 |
michael@0 | 92 | %define wk(i) rbp-(WK_NUM-(i))*SIZEOF_XMMWORD ; xmmword wk[WK_NUM] |
michael@0 | 93 | %define WK_NUM 2 |
michael@0 | 94 | |
michael@0 | 95 | align 16 |
michael@0 | 96 | global EXTN(jsimd_idct_ifast_sse2) |
michael@0 | 97 | |
michael@0 | 98 | EXTN(jsimd_idct_ifast_sse2): |
michael@0 | 99 | push rbp |
michael@0 | 100 | mov rax,rsp ; rax = original rbp |
michael@0 | 101 | sub rsp, byte 4 |
michael@0 | 102 | and rsp, byte (-SIZEOF_XMMWORD) ; align to 128 bits |
michael@0 | 103 | mov [rsp],rax |
michael@0 | 104 | mov rbp,rsp ; rbp = aligned rbp |
michael@0 | 105 | lea rsp, [wk(0)] |
michael@0 | 106 | collect_args |
michael@0 | 107 | |
michael@0 | 108 | ; ---- Pass 1: process columns from input. |
michael@0 | 109 | |
michael@0 | 110 | mov rdx, r10 ; quantptr |
michael@0 | 111 | mov rsi, r11 ; inptr |
michael@0 | 112 | |
michael@0 | 113 | %ifndef NO_ZERO_COLUMN_TEST_IFAST_SSE2 |
michael@0 | 114 | mov eax, DWORD [DWBLOCK(1,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 115 | or eax, DWORD [DWBLOCK(2,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 116 | jnz near .columnDCT |
michael@0 | 117 | |
michael@0 | 118 | movdqa xmm0, XMMWORD [XMMBLOCK(1,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 119 | movdqa xmm1, XMMWORD [XMMBLOCK(2,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 120 | por xmm0, XMMWORD [XMMBLOCK(3,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 121 | por xmm1, XMMWORD [XMMBLOCK(4,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 122 | por xmm0, XMMWORD [XMMBLOCK(5,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 123 | por xmm1, XMMWORD [XMMBLOCK(6,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 124 | por xmm0, XMMWORD [XMMBLOCK(7,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 125 | por xmm1,xmm0 |
michael@0 | 126 | packsswb xmm1,xmm1 |
michael@0 | 127 | packsswb xmm1,xmm1 |
michael@0 | 128 | movd eax,xmm1 |
michael@0 | 129 | test rax,rax |
michael@0 | 130 | jnz short .columnDCT |
michael@0 | 131 | |
michael@0 | 132 | ; -- AC terms all zero |
michael@0 | 133 | |
michael@0 | 134 | movdqa xmm0, XMMWORD [XMMBLOCK(0,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 135 | pmullw xmm0, XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_ISLOW_MULT_TYPE)] |
michael@0 | 136 | |
michael@0 | 137 | movdqa xmm7,xmm0 ; xmm0=in0=(00 01 02 03 04 05 06 07) |
michael@0 | 138 | punpcklwd xmm0,xmm0 ; xmm0=(00 00 01 01 02 02 03 03) |
michael@0 | 139 | punpckhwd xmm7,xmm7 ; xmm7=(04 04 05 05 06 06 07 07) |
michael@0 | 140 | |
michael@0 | 141 | pshufd xmm6,xmm0,0x00 ; xmm6=col0=(00 00 00 00 00 00 00 00) |
michael@0 | 142 | pshufd xmm2,xmm0,0x55 ; xmm2=col1=(01 01 01 01 01 01 01 01) |
michael@0 | 143 | pshufd xmm5,xmm0,0xAA ; xmm5=col2=(02 02 02 02 02 02 02 02) |
michael@0 | 144 | pshufd xmm0,xmm0,0xFF ; xmm0=col3=(03 03 03 03 03 03 03 03) |
michael@0 | 145 | pshufd xmm1,xmm7,0x00 ; xmm1=col4=(04 04 04 04 04 04 04 04) |
michael@0 | 146 | pshufd xmm4,xmm7,0x55 ; xmm4=col5=(05 05 05 05 05 05 05 05) |
michael@0 | 147 | pshufd xmm3,xmm7,0xAA ; xmm3=col6=(06 06 06 06 06 06 06 06) |
michael@0 | 148 | pshufd xmm7,xmm7,0xFF ; xmm7=col7=(07 07 07 07 07 07 07 07) |
michael@0 | 149 | |
michael@0 | 150 | movdqa XMMWORD [wk(0)], xmm2 ; wk(0)=col1 |
michael@0 | 151 | movdqa XMMWORD [wk(1)], xmm0 ; wk(1)=col3 |
michael@0 | 152 | jmp near .column_end |
michael@0 | 153 | %endif |
michael@0 | 154 | .columnDCT: |
michael@0 | 155 | |
michael@0 | 156 | ; -- Even part |
michael@0 | 157 | |
michael@0 | 158 | movdqa xmm0, XMMWORD [XMMBLOCK(0,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 159 | movdqa xmm1, XMMWORD [XMMBLOCK(2,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 160 | pmullw xmm0, XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_IFAST_MULT_TYPE)] |
michael@0 | 161 | pmullw xmm1, XMMWORD [XMMBLOCK(2,0,rdx,SIZEOF_IFAST_MULT_TYPE)] |
michael@0 | 162 | movdqa xmm2, XMMWORD [XMMBLOCK(4,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 163 | movdqa xmm3, XMMWORD [XMMBLOCK(6,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 164 | pmullw xmm2, XMMWORD [XMMBLOCK(4,0,rdx,SIZEOF_IFAST_MULT_TYPE)] |
michael@0 | 165 | pmullw xmm3, XMMWORD [XMMBLOCK(6,0,rdx,SIZEOF_IFAST_MULT_TYPE)] |
michael@0 | 166 | |
michael@0 | 167 | movdqa xmm4,xmm0 |
michael@0 | 168 | movdqa xmm5,xmm1 |
michael@0 | 169 | psubw xmm0,xmm2 ; xmm0=tmp11 |
michael@0 | 170 | psubw xmm1,xmm3 |
michael@0 | 171 | paddw xmm4,xmm2 ; xmm4=tmp10 |
michael@0 | 172 | paddw xmm5,xmm3 ; xmm5=tmp13 |
michael@0 | 173 | |
michael@0 | 174 | psllw xmm1,PRE_MULTIPLY_SCALE_BITS |
michael@0 | 175 | pmulhw xmm1,[rel PW_F1414] |
michael@0 | 176 | psubw xmm1,xmm5 ; xmm1=tmp12 |
michael@0 | 177 | |
michael@0 | 178 | movdqa xmm6,xmm4 |
michael@0 | 179 | movdqa xmm7,xmm0 |
michael@0 | 180 | psubw xmm4,xmm5 ; xmm4=tmp3 |
michael@0 | 181 | psubw xmm0,xmm1 ; xmm0=tmp2 |
michael@0 | 182 | paddw xmm6,xmm5 ; xmm6=tmp0 |
michael@0 | 183 | paddw xmm7,xmm1 ; xmm7=tmp1 |
michael@0 | 184 | |
michael@0 | 185 | movdqa XMMWORD [wk(1)], xmm4 ; wk(1)=tmp3 |
michael@0 | 186 | movdqa XMMWORD [wk(0)], xmm0 ; wk(0)=tmp2 |
michael@0 | 187 | |
michael@0 | 188 | ; -- Odd part |
michael@0 | 189 | |
michael@0 | 190 | movdqa xmm2, XMMWORD [XMMBLOCK(1,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 191 | movdqa xmm3, XMMWORD [XMMBLOCK(3,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 192 | pmullw xmm2, XMMWORD [XMMBLOCK(1,0,rdx,SIZEOF_IFAST_MULT_TYPE)] |
michael@0 | 193 | pmullw xmm3, XMMWORD [XMMBLOCK(3,0,rdx,SIZEOF_IFAST_MULT_TYPE)] |
michael@0 | 194 | movdqa xmm5, XMMWORD [XMMBLOCK(5,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 195 | movdqa xmm1, XMMWORD [XMMBLOCK(7,0,rsi,SIZEOF_JCOEF)] |
michael@0 | 196 | pmullw xmm5, XMMWORD [XMMBLOCK(5,0,rdx,SIZEOF_IFAST_MULT_TYPE)] |
michael@0 | 197 | pmullw xmm1, XMMWORD [XMMBLOCK(7,0,rdx,SIZEOF_IFAST_MULT_TYPE)] |
michael@0 | 198 | |
michael@0 | 199 | movdqa xmm4,xmm2 |
michael@0 | 200 | movdqa xmm0,xmm5 |
michael@0 | 201 | psubw xmm2,xmm1 ; xmm2=z12 |
michael@0 | 202 | psubw xmm5,xmm3 ; xmm5=z10 |
michael@0 | 203 | paddw xmm4,xmm1 ; xmm4=z11 |
michael@0 | 204 | paddw xmm0,xmm3 ; xmm0=z13 |
michael@0 | 205 | |
michael@0 | 206 | movdqa xmm1,xmm5 ; xmm1=z10(unscaled) |
michael@0 | 207 | psllw xmm2,PRE_MULTIPLY_SCALE_BITS |
michael@0 | 208 | psllw xmm5,PRE_MULTIPLY_SCALE_BITS |
michael@0 | 209 | |
michael@0 | 210 | movdqa xmm3,xmm4 |
michael@0 | 211 | psubw xmm4,xmm0 |
michael@0 | 212 | paddw xmm3,xmm0 ; xmm3=tmp7 |
michael@0 | 213 | |
michael@0 | 214 | psllw xmm4,PRE_MULTIPLY_SCALE_BITS |
michael@0 | 215 | pmulhw xmm4,[rel PW_F1414] ; xmm4=tmp11 |
michael@0 | 216 | |
michael@0 | 217 | ; To avoid overflow... |
michael@0 | 218 | ; |
michael@0 | 219 | ; (Original) |
michael@0 | 220 | ; tmp12 = -2.613125930 * z10 + z5; |
michael@0 | 221 | ; |
michael@0 | 222 | ; (This implementation) |
michael@0 | 223 | ; tmp12 = (-1.613125930 - 1) * z10 + z5; |
michael@0 | 224 | ; = -1.613125930 * z10 - z10 + z5; |
michael@0 | 225 | |
michael@0 | 226 | movdqa xmm0,xmm5 |
michael@0 | 227 | paddw xmm5,xmm2 |
michael@0 | 228 | pmulhw xmm5,[rel PW_F1847] ; xmm5=z5 |
michael@0 | 229 | pmulhw xmm0,[rel PW_MF1613] |
michael@0 | 230 | pmulhw xmm2,[rel PW_F1082] |
michael@0 | 231 | psubw xmm0,xmm1 |
michael@0 | 232 | psubw xmm2,xmm5 ; xmm2=tmp10 |
michael@0 | 233 | paddw xmm0,xmm5 ; xmm0=tmp12 |
michael@0 | 234 | |
michael@0 | 235 | ; -- Final output stage |
michael@0 | 236 | |
michael@0 | 237 | psubw xmm0,xmm3 ; xmm0=tmp6 |
michael@0 | 238 | movdqa xmm1,xmm6 |
michael@0 | 239 | movdqa xmm5,xmm7 |
michael@0 | 240 | paddw xmm6,xmm3 ; xmm6=data0=(00 01 02 03 04 05 06 07) |
michael@0 | 241 | paddw xmm7,xmm0 ; xmm7=data1=(10 11 12 13 14 15 16 17) |
michael@0 | 242 | psubw xmm1,xmm3 ; xmm1=data7=(70 71 72 73 74 75 76 77) |
michael@0 | 243 | psubw xmm5,xmm0 ; xmm5=data6=(60 61 62 63 64 65 66 67) |
michael@0 | 244 | psubw xmm4,xmm0 ; xmm4=tmp5 |
michael@0 | 245 | |
michael@0 | 246 | movdqa xmm3,xmm6 ; transpose coefficients(phase 1) |
michael@0 | 247 | punpcklwd xmm6,xmm7 ; xmm6=(00 10 01 11 02 12 03 13) |
michael@0 | 248 | punpckhwd xmm3,xmm7 ; xmm3=(04 14 05 15 06 16 07 17) |
michael@0 | 249 | movdqa xmm0,xmm5 ; transpose coefficients(phase 1) |
michael@0 | 250 | punpcklwd xmm5,xmm1 ; xmm5=(60 70 61 71 62 72 63 73) |
michael@0 | 251 | punpckhwd xmm0,xmm1 ; xmm0=(64 74 65 75 66 76 67 77) |
michael@0 | 252 | |
michael@0 | 253 | movdqa xmm7, XMMWORD [wk(0)] ; xmm7=tmp2 |
michael@0 | 254 | movdqa xmm1, XMMWORD [wk(1)] ; xmm1=tmp3 |
michael@0 | 255 | |
michael@0 | 256 | movdqa XMMWORD [wk(0)], xmm5 ; wk(0)=(60 70 61 71 62 72 63 73) |
michael@0 | 257 | movdqa XMMWORD [wk(1)], xmm0 ; wk(1)=(64 74 65 75 66 76 67 77) |
michael@0 | 258 | |
michael@0 | 259 | paddw xmm2,xmm4 ; xmm2=tmp4 |
michael@0 | 260 | movdqa xmm5,xmm7 |
michael@0 | 261 | movdqa xmm0,xmm1 |
michael@0 | 262 | paddw xmm7,xmm4 ; xmm7=data2=(20 21 22 23 24 25 26 27) |
michael@0 | 263 | paddw xmm1,xmm2 ; xmm1=data4=(40 41 42 43 44 45 46 47) |
michael@0 | 264 | psubw xmm5,xmm4 ; xmm5=data5=(50 51 52 53 54 55 56 57) |
michael@0 | 265 | psubw xmm0,xmm2 ; xmm0=data3=(30 31 32 33 34 35 36 37) |
michael@0 | 266 | |
michael@0 | 267 | movdqa xmm4,xmm7 ; transpose coefficients(phase 1) |
michael@0 | 268 | punpcklwd xmm7,xmm0 ; xmm7=(20 30 21 31 22 32 23 33) |
michael@0 | 269 | punpckhwd xmm4,xmm0 ; xmm4=(24 34 25 35 26 36 27 37) |
michael@0 | 270 | movdqa xmm2,xmm1 ; transpose coefficients(phase 1) |
michael@0 | 271 | punpcklwd xmm1,xmm5 ; xmm1=(40 50 41 51 42 52 43 53) |
michael@0 | 272 | punpckhwd xmm2,xmm5 ; xmm2=(44 54 45 55 46 56 47 57) |
michael@0 | 273 | |
michael@0 | 274 | movdqa xmm0,xmm3 ; transpose coefficients(phase 2) |
michael@0 | 275 | punpckldq xmm3,xmm4 ; xmm3=(04 14 24 34 05 15 25 35) |
michael@0 | 276 | punpckhdq xmm0,xmm4 ; xmm0=(06 16 26 36 07 17 27 37) |
michael@0 | 277 | movdqa xmm5,xmm6 ; transpose coefficients(phase 2) |
michael@0 | 278 | punpckldq xmm6,xmm7 ; xmm6=(00 10 20 30 01 11 21 31) |
michael@0 | 279 | punpckhdq xmm5,xmm7 ; xmm5=(02 12 22 32 03 13 23 33) |
michael@0 | 280 | |
michael@0 | 281 | movdqa xmm4, XMMWORD [wk(0)] ; xmm4=(60 70 61 71 62 72 63 73) |
michael@0 | 282 | movdqa xmm7, XMMWORD [wk(1)] ; xmm7=(64 74 65 75 66 76 67 77) |
michael@0 | 283 | |
michael@0 | 284 | movdqa XMMWORD [wk(0)], xmm3 ; wk(0)=(04 14 24 34 05 15 25 35) |
michael@0 | 285 | movdqa XMMWORD [wk(1)], xmm0 ; wk(1)=(06 16 26 36 07 17 27 37) |
michael@0 | 286 | |
michael@0 | 287 | movdqa xmm3,xmm1 ; transpose coefficients(phase 2) |
michael@0 | 288 | punpckldq xmm1,xmm4 ; xmm1=(40 50 60 70 41 51 61 71) |
michael@0 | 289 | punpckhdq xmm3,xmm4 ; xmm3=(42 52 62 72 43 53 63 73) |
michael@0 | 290 | movdqa xmm0,xmm2 ; transpose coefficients(phase 2) |
michael@0 | 291 | punpckldq xmm2,xmm7 ; xmm2=(44 54 64 74 45 55 65 75) |
michael@0 | 292 | punpckhdq xmm0,xmm7 ; xmm0=(46 56 66 76 47 57 67 77) |
michael@0 | 293 | |
michael@0 | 294 | movdqa xmm4,xmm6 ; transpose coefficients(phase 3) |
michael@0 | 295 | punpcklqdq xmm6,xmm1 ; xmm6=col0=(00 10 20 30 40 50 60 70) |
michael@0 | 296 | punpckhqdq xmm4,xmm1 ; xmm4=col1=(01 11 21 31 41 51 61 71) |
michael@0 | 297 | movdqa xmm7,xmm5 ; transpose coefficients(phase 3) |
michael@0 | 298 | punpcklqdq xmm5,xmm3 ; xmm5=col2=(02 12 22 32 42 52 62 72) |
michael@0 | 299 | punpckhqdq xmm7,xmm3 ; xmm7=col3=(03 13 23 33 43 53 63 73) |
michael@0 | 300 | |
michael@0 | 301 | movdqa xmm1, XMMWORD [wk(0)] ; xmm1=(04 14 24 34 05 15 25 35) |
michael@0 | 302 | movdqa xmm3, XMMWORD [wk(1)] ; xmm3=(06 16 26 36 07 17 27 37) |
michael@0 | 303 | |
michael@0 | 304 | movdqa XMMWORD [wk(0)], xmm4 ; wk(0)=col1 |
michael@0 | 305 | movdqa XMMWORD [wk(1)], xmm7 ; wk(1)=col3 |
michael@0 | 306 | |
michael@0 | 307 | movdqa xmm4,xmm1 ; transpose coefficients(phase 3) |
michael@0 | 308 | punpcklqdq xmm1,xmm2 ; xmm1=col4=(04 14 24 34 44 54 64 74) |
michael@0 | 309 | punpckhqdq xmm4,xmm2 ; xmm4=col5=(05 15 25 35 45 55 65 75) |
michael@0 | 310 | movdqa xmm7,xmm3 ; transpose coefficients(phase 3) |
michael@0 | 311 | punpcklqdq xmm3,xmm0 ; xmm3=col6=(06 16 26 36 46 56 66 76) |
michael@0 | 312 | punpckhqdq xmm7,xmm0 ; xmm7=col7=(07 17 27 37 47 57 67 77) |
michael@0 | 313 | .column_end: |
michael@0 | 314 | |
michael@0 | 315 | ; -- Prefetch the next coefficient block |
michael@0 | 316 | |
michael@0 | 317 | prefetchnta [rsi + DCTSIZE2*SIZEOF_JCOEF + 0*32] |
michael@0 | 318 | prefetchnta [rsi + DCTSIZE2*SIZEOF_JCOEF + 1*32] |
michael@0 | 319 | prefetchnta [rsi + DCTSIZE2*SIZEOF_JCOEF + 2*32] |
michael@0 | 320 | prefetchnta [rsi + DCTSIZE2*SIZEOF_JCOEF + 3*32] |
michael@0 | 321 | |
michael@0 | 322 | ; ---- Pass 2: process rows from work array, store into output array. |
michael@0 | 323 | |
michael@0 | 324 | mov rax, [original_rbp] |
michael@0 | 325 | mov rdi, r12 ; (JSAMPROW *) |
michael@0 | 326 | mov rax, r13 |
michael@0 | 327 | |
michael@0 | 328 | ; -- Even part |
michael@0 | 329 | |
michael@0 | 330 | ; xmm6=col0, xmm5=col2, xmm1=col4, xmm3=col6 |
michael@0 | 331 | |
michael@0 | 332 | movdqa xmm2,xmm6 |
michael@0 | 333 | movdqa xmm0,xmm5 |
michael@0 | 334 | psubw xmm6,xmm1 ; xmm6=tmp11 |
michael@0 | 335 | psubw xmm5,xmm3 |
michael@0 | 336 | paddw xmm2,xmm1 ; xmm2=tmp10 |
michael@0 | 337 | paddw xmm0,xmm3 ; xmm0=tmp13 |
michael@0 | 338 | |
michael@0 | 339 | psllw xmm5,PRE_MULTIPLY_SCALE_BITS |
michael@0 | 340 | pmulhw xmm5,[rel PW_F1414] |
michael@0 | 341 | psubw xmm5,xmm0 ; xmm5=tmp12 |
michael@0 | 342 | |
michael@0 | 343 | movdqa xmm1,xmm2 |
michael@0 | 344 | movdqa xmm3,xmm6 |
michael@0 | 345 | psubw xmm2,xmm0 ; xmm2=tmp3 |
michael@0 | 346 | psubw xmm6,xmm5 ; xmm6=tmp2 |
michael@0 | 347 | paddw xmm1,xmm0 ; xmm1=tmp0 |
michael@0 | 348 | paddw xmm3,xmm5 ; xmm3=tmp1 |
michael@0 | 349 | |
michael@0 | 350 | movdqa xmm0, XMMWORD [wk(0)] ; xmm0=col1 |
michael@0 | 351 | movdqa xmm5, XMMWORD [wk(1)] ; xmm5=col3 |
michael@0 | 352 | |
michael@0 | 353 | movdqa XMMWORD [wk(0)], xmm2 ; wk(0)=tmp3 |
michael@0 | 354 | movdqa XMMWORD [wk(1)], xmm6 ; wk(1)=tmp2 |
michael@0 | 355 | |
michael@0 | 356 | ; -- Odd part |
michael@0 | 357 | |
michael@0 | 358 | ; xmm0=col1, xmm5=col3, xmm4=col5, xmm7=col7 |
michael@0 | 359 | |
michael@0 | 360 | movdqa xmm2,xmm0 |
michael@0 | 361 | movdqa xmm6,xmm4 |
michael@0 | 362 | psubw xmm0,xmm7 ; xmm0=z12 |
michael@0 | 363 | psubw xmm4,xmm5 ; xmm4=z10 |
michael@0 | 364 | paddw xmm2,xmm7 ; xmm2=z11 |
michael@0 | 365 | paddw xmm6,xmm5 ; xmm6=z13 |
michael@0 | 366 | |
michael@0 | 367 | movdqa xmm7,xmm4 ; xmm7=z10(unscaled) |
michael@0 | 368 | psllw xmm0,PRE_MULTIPLY_SCALE_BITS |
michael@0 | 369 | psllw xmm4,PRE_MULTIPLY_SCALE_BITS |
michael@0 | 370 | |
michael@0 | 371 | movdqa xmm5,xmm2 |
michael@0 | 372 | psubw xmm2,xmm6 |
michael@0 | 373 | paddw xmm5,xmm6 ; xmm5=tmp7 |
michael@0 | 374 | |
michael@0 | 375 | psllw xmm2,PRE_MULTIPLY_SCALE_BITS |
michael@0 | 376 | pmulhw xmm2,[rel PW_F1414] ; xmm2=tmp11 |
michael@0 | 377 | |
michael@0 | 378 | ; To avoid overflow... |
michael@0 | 379 | ; |
michael@0 | 380 | ; (Original) |
michael@0 | 381 | ; tmp12 = -2.613125930 * z10 + z5; |
michael@0 | 382 | ; |
michael@0 | 383 | ; (This implementation) |
michael@0 | 384 | ; tmp12 = (-1.613125930 - 1) * z10 + z5; |
michael@0 | 385 | ; = -1.613125930 * z10 - z10 + z5; |
michael@0 | 386 | |
michael@0 | 387 | movdqa xmm6,xmm4 |
michael@0 | 388 | paddw xmm4,xmm0 |
michael@0 | 389 | pmulhw xmm4,[rel PW_F1847] ; xmm4=z5 |
michael@0 | 390 | pmulhw xmm6,[rel PW_MF1613] |
michael@0 | 391 | pmulhw xmm0,[rel PW_F1082] |
michael@0 | 392 | psubw xmm6,xmm7 |
michael@0 | 393 | psubw xmm0,xmm4 ; xmm0=tmp10 |
michael@0 | 394 | paddw xmm6,xmm4 ; xmm6=tmp12 |
michael@0 | 395 | |
michael@0 | 396 | ; -- Final output stage |
michael@0 | 397 | |
michael@0 | 398 | psubw xmm6,xmm5 ; xmm6=tmp6 |
michael@0 | 399 | movdqa xmm7,xmm1 |
michael@0 | 400 | movdqa xmm4,xmm3 |
michael@0 | 401 | paddw xmm1,xmm5 ; xmm1=data0=(00 10 20 30 40 50 60 70) |
michael@0 | 402 | paddw xmm3,xmm6 ; xmm3=data1=(01 11 21 31 41 51 61 71) |
michael@0 | 403 | psraw xmm1,(PASS1_BITS+3) ; descale |
michael@0 | 404 | psraw xmm3,(PASS1_BITS+3) ; descale |
michael@0 | 405 | psubw xmm7,xmm5 ; xmm7=data7=(07 17 27 37 47 57 67 77) |
michael@0 | 406 | psubw xmm4,xmm6 ; xmm4=data6=(06 16 26 36 46 56 66 76) |
michael@0 | 407 | psraw xmm7,(PASS1_BITS+3) ; descale |
michael@0 | 408 | psraw xmm4,(PASS1_BITS+3) ; descale |
michael@0 | 409 | psubw xmm2,xmm6 ; xmm2=tmp5 |
michael@0 | 410 | |
michael@0 | 411 | packsswb xmm1,xmm4 ; xmm1=(00 10 20 30 40 50 60 70 06 16 26 36 46 56 66 76) |
michael@0 | 412 | packsswb xmm3,xmm7 ; xmm3=(01 11 21 31 41 51 61 71 07 17 27 37 47 57 67 77) |
michael@0 | 413 | |
michael@0 | 414 | movdqa xmm5, XMMWORD [wk(1)] ; xmm5=tmp2 |
michael@0 | 415 | movdqa xmm6, XMMWORD [wk(0)] ; xmm6=tmp3 |
michael@0 | 416 | |
michael@0 | 417 | paddw xmm0,xmm2 ; xmm0=tmp4 |
michael@0 | 418 | movdqa xmm4,xmm5 |
michael@0 | 419 | movdqa xmm7,xmm6 |
michael@0 | 420 | paddw xmm5,xmm2 ; xmm5=data2=(02 12 22 32 42 52 62 72) |
michael@0 | 421 | paddw xmm6,xmm0 ; xmm6=data4=(04 14 24 34 44 54 64 74) |
michael@0 | 422 | psraw xmm5,(PASS1_BITS+3) ; descale |
michael@0 | 423 | psraw xmm6,(PASS1_BITS+3) ; descale |
michael@0 | 424 | psubw xmm4,xmm2 ; xmm4=data5=(05 15 25 35 45 55 65 75) |
michael@0 | 425 | psubw xmm7,xmm0 ; xmm7=data3=(03 13 23 33 43 53 63 73) |
michael@0 | 426 | psraw xmm4,(PASS1_BITS+3) ; descale |
michael@0 | 427 | psraw xmm7,(PASS1_BITS+3) ; descale |
michael@0 | 428 | |
michael@0 | 429 | movdqa xmm2,[rel PB_CENTERJSAMP] ; xmm2=[rel PB_CENTERJSAMP] |
michael@0 | 430 | |
michael@0 | 431 | packsswb xmm5,xmm6 ; xmm5=(02 12 22 32 42 52 62 72 04 14 24 34 44 54 64 74) |
michael@0 | 432 | packsswb xmm7,xmm4 ; xmm7=(03 13 23 33 43 53 63 73 05 15 25 35 45 55 65 75) |
michael@0 | 433 | |
michael@0 | 434 | paddb xmm1,xmm2 |
michael@0 | 435 | paddb xmm3,xmm2 |
michael@0 | 436 | paddb xmm5,xmm2 |
michael@0 | 437 | paddb xmm7,xmm2 |
michael@0 | 438 | |
michael@0 | 439 | movdqa xmm0,xmm1 ; transpose coefficients(phase 1) |
michael@0 | 440 | punpcklbw xmm1,xmm3 ; xmm1=(00 01 10 11 20 21 30 31 40 41 50 51 60 61 70 71) |
michael@0 | 441 | punpckhbw xmm0,xmm3 ; xmm0=(06 07 16 17 26 27 36 37 46 47 56 57 66 67 76 77) |
michael@0 | 442 | movdqa xmm6,xmm5 ; transpose coefficients(phase 1) |
michael@0 | 443 | punpcklbw xmm5,xmm7 ; xmm5=(02 03 12 13 22 23 32 33 42 43 52 53 62 63 72 73) |
michael@0 | 444 | punpckhbw xmm6,xmm7 ; xmm6=(04 05 14 15 24 25 34 35 44 45 54 55 64 65 74 75) |
michael@0 | 445 | |
michael@0 | 446 | movdqa xmm4,xmm1 ; transpose coefficients(phase 2) |
michael@0 | 447 | punpcklwd xmm1,xmm5 ; xmm1=(00 01 02 03 10 11 12 13 20 21 22 23 30 31 32 33) |
michael@0 | 448 | punpckhwd xmm4,xmm5 ; xmm4=(40 41 42 43 50 51 52 53 60 61 62 63 70 71 72 73) |
michael@0 | 449 | movdqa xmm2,xmm6 ; transpose coefficients(phase 2) |
michael@0 | 450 | punpcklwd xmm6,xmm0 ; xmm6=(04 05 06 07 14 15 16 17 24 25 26 27 34 35 36 37) |
michael@0 | 451 | punpckhwd xmm2,xmm0 ; xmm2=(44 45 46 47 54 55 56 57 64 65 66 67 74 75 76 77) |
michael@0 | 452 | |
michael@0 | 453 | movdqa xmm3,xmm1 ; transpose coefficients(phase 3) |
michael@0 | 454 | punpckldq xmm1,xmm6 ; xmm1=(00 01 02 03 04 05 06 07 10 11 12 13 14 15 16 17) |
michael@0 | 455 | punpckhdq xmm3,xmm6 ; xmm3=(20 21 22 23 24 25 26 27 30 31 32 33 34 35 36 37) |
michael@0 | 456 | movdqa xmm7,xmm4 ; transpose coefficients(phase 3) |
michael@0 | 457 | punpckldq xmm4,xmm2 ; xmm4=(40 41 42 43 44 45 46 47 50 51 52 53 54 55 56 57) |
michael@0 | 458 | punpckhdq xmm7,xmm2 ; xmm7=(60 61 62 63 64 65 66 67 70 71 72 73 74 75 76 77) |
michael@0 | 459 | |
michael@0 | 460 | pshufd xmm5,xmm1,0x4E ; xmm5=(10 11 12 13 14 15 16 17 00 01 02 03 04 05 06 07) |
michael@0 | 461 | pshufd xmm0,xmm3,0x4E ; xmm0=(30 31 32 33 34 35 36 37 20 21 22 23 24 25 26 27) |
michael@0 | 462 | pshufd xmm6,xmm4,0x4E ; xmm6=(50 51 52 53 54 55 56 57 40 41 42 43 44 45 46 47) |
michael@0 | 463 | pshufd xmm2,xmm7,0x4E ; xmm2=(70 71 72 73 74 75 76 77 60 61 62 63 64 65 66 67) |
michael@0 | 464 | |
michael@0 | 465 | mov rdx, JSAMPROW [rdi+0*SIZEOF_JSAMPROW] |
michael@0 | 466 | mov rsi, JSAMPROW [rdi+2*SIZEOF_JSAMPROW] |
michael@0 | 467 | movq XMM_MMWORD [rdx+rax*SIZEOF_JSAMPLE], xmm1 |
michael@0 | 468 | movq XMM_MMWORD [rsi+rax*SIZEOF_JSAMPLE], xmm3 |
michael@0 | 469 | mov rdx, JSAMPROW [rdi+4*SIZEOF_JSAMPROW] |
michael@0 | 470 | mov rsi, JSAMPROW [rdi+6*SIZEOF_JSAMPROW] |
michael@0 | 471 | movq XMM_MMWORD [rdx+rax*SIZEOF_JSAMPLE], xmm4 |
michael@0 | 472 | movq XMM_MMWORD [rsi+rax*SIZEOF_JSAMPLE], xmm7 |
michael@0 | 473 | |
michael@0 | 474 | mov rdx, JSAMPROW [rdi+1*SIZEOF_JSAMPROW] |
michael@0 | 475 | mov rsi, JSAMPROW [rdi+3*SIZEOF_JSAMPROW] |
michael@0 | 476 | movq XMM_MMWORD [rdx+rax*SIZEOF_JSAMPLE], xmm5 |
michael@0 | 477 | movq XMM_MMWORD [rsi+rax*SIZEOF_JSAMPLE], xmm0 |
michael@0 | 478 | mov rdx, JSAMPROW [rdi+5*SIZEOF_JSAMPROW] |
michael@0 | 479 | mov rsi, JSAMPROW [rdi+7*SIZEOF_JSAMPROW] |
michael@0 | 480 | movq XMM_MMWORD [rdx+rax*SIZEOF_JSAMPLE], xmm6 |
michael@0 | 481 | movq XMM_MMWORD [rsi+rax*SIZEOF_JSAMPLE], xmm2 |
michael@0 | 482 | |
michael@0 | 483 | uncollect_args |
michael@0 | 484 | mov rsp,rbp ; rsp <- aligned rbp |
michael@0 | 485 | pop rsp ; rsp <- original rbp |
michael@0 | 486 | pop rbp |
michael@0 | 487 | ret |
michael@0 | 488 | ret |
michael@0 | 489 | |
michael@0 | 490 | ; For some reason, the OS X linker does not honor the request to align the |
michael@0 | 491 | ; segment unless we do this. |
michael@0 | 492 | align 16 |