Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | ; |
michael@0 | 2 | ; jdmrgss2-64.asm - merged upsampling/color conversion (64-bit SSE2) |
michael@0 | 3 | ; |
michael@0 | 4 | ; Copyright 2009, 2012 Pierre Ossman <ossman@cendio.se> for Cendio AB |
michael@0 | 5 | ; Copyright 2009, 2012 D. R. Commander |
michael@0 | 6 | ; |
michael@0 | 7 | ; Based on |
michael@0 | 8 | ; x86 SIMD extension for IJG JPEG library |
michael@0 | 9 | ; Copyright (C) 1999-2006, MIYASAKA Masaru. |
michael@0 | 10 | ; For conditions of distribution and use, see copyright notice in jsimdext.inc |
michael@0 | 11 | ; |
michael@0 | 12 | ; This file should be assembled with NASM (Netwide Assembler), |
michael@0 | 13 | ; can *not* be assembled with Microsoft's MASM or any compatible |
michael@0 | 14 | ; assembler (including Borland's Turbo Assembler). |
michael@0 | 15 | ; NASM is available from http://nasm.sourceforge.net/ or |
michael@0 | 16 | ; http://sourceforge.net/project/showfiles.php?group_id=6208 |
michael@0 | 17 | ; |
michael@0 | 18 | ; [TAB8] |
michael@0 | 19 | |
michael@0 | 20 | %include "jcolsamp.inc" |
michael@0 | 21 | |
michael@0 | 22 | ; -------------------------------------------------------------------------- |
michael@0 | 23 | ; |
michael@0 | 24 | ; Upsample and color convert for the case of 2:1 horizontal and 1:1 vertical. |
michael@0 | 25 | ; |
michael@0 | 26 | ; GLOBAL(void) |
michael@0 | 27 | ; jsimd_h2v1_merged_upsample_sse2 (JDIMENSION output_width, |
michael@0 | 28 | ; JSAMPIMAGE input_buf, |
michael@0 | 29 | ; JDIMENSION in_row_group_ctr, |
michael@0 | 30 | ; JSAMPARRAY output_buf); |
michael@0 | 31 | ; |
michael@0 | 32 | |
michael@0 | 33 | ; r10 = JDIMENSION output_width |
michael@0 | 34 | ; r11 = JSAMPIMAGE input_buf |
michael@0 | 35 | ; r12 = JDIMENSION in_row_group_ctr |
michael@0 | 36 | ; r13 = JSAMPARRAY output_buf |
michael@0 | 37 | |
michael@0 | 38 | %define wk(i) rbp-(WK_NUM-(i))*SIZEOF_XMMWORD ; xmmword wk[WK_NUM] |
michael@0 | 39 | %define WK_NUM 3 |
michael@0 | 40 | |
michael@0 | 41 | align 16 |
michael@0 | 42 | global EXTN(jsimd_h2v1_merged_upsample_sse2) |
michael@0 | 43 | |
michael@0 | 44 | EXTN(jsimd_h2v1_merged_upsample_sse2): |
michael@0 | 45 | push rbp |
michael@0 | 46 | mov rax,rsp ; rax = original rbp |
michael@0 | 47 | sub rsp, byte 4 |
michael@0 | 48 | and rsp, byte (-SIZEOF_XMMWORD) ; align to 128 bits |
michael@0 | 49 | mov [rsp],rax |
michael@0 | 50 | mov rbp,rsp ; rbp = aligned rbp |
michael@0 | 51 | lea rsp, [wk(0)] |
michael@0 | 52 | collect_args |
michael@0 | 53 | push rbx |
michael@0 | 54 | |
michael@0 | 55 | mov rcx, r10 ; col |
michael@0 | 56 | test rcx,rcx |
michael@0 | 57 | jz near .return |
michael@0 | 58 | |
michael@0 | 59 | push rcx |
michael@0 | 60 | |
michael@0 | 61 | mov rdi, r11 |
michael@0 | 62 | mov rcx, r12 |
michael@0 | 63 | mov rsi, JSAMPARRAY [rdi+0*SIZEOF_JSAMPARRAY] |
michael@0 | 64 | mov rbx, JSAMPARRAY [rdi+1*SIZEOF_JSAMPARRAY] |
michael@0 | 65 | mov rdx, JSAMPARRAY [rdi+2*SIZEOF_JSAMPARRAY] |
michael@0 | 66 | mov rdi, r13 |
michael@0 | 67 | mov rsi, JSAMPROW [rsi+rcx*SIZEOF_JSAMPROW] ; inptr0 |
michael@0 | 68 | mov rbx, JSAMPROW [rbx+rcx*SIZEOF_JSAMPROW] ; inptr1 |
michael@0 | 69 | mov rdx, JSAMPROW [rdx+rcx*SIZEOF_JSAMPROW] ; inptr2 |
michael@0 | 70 | mov rdi, JSAMPROW [rdi] ; outptr |
michael@0 | 71 | |
michael@0 | 72 | pop rcx ; col |
michael@0 | 73 | |
michael@0 | 74 | .columnloop: |
michael@0 | 75 | |
michael@0 | 76 | movdqa xmm6, XMMWORD [rbx] ; xmm6=Cb(0123456789ABCDEF) |
michael@0 | 77 | movdqa xmm7, XMMWORD [rdx] ; xmm7=Cr(0123456789ABCDEF) |
michael@0 | 78 | |
michael@0 | 79 | pxor xmm1,xmm1 ; xmm1=(all 0's) |
michael@0 | 80 | pcmpeqw xmm3,xmm3 |
michael@0 | 81 | psllw xmm3,7 ; xmm3={0xFF80 0xFF80 0xFF80 0xFF80 ..} |
michael@0 | 82 | |
michael@0 | 83 | movdqa xmm4,xmm6 |
michael@0 | 84 | punpckhbw xmm6,xmm1 ; xmm6=Cb(89ABCDEF)=CbH |
michael@0 | 85 | punpcklbw xmm4,xmm1 ; xmm4=Cb(01234567)=CbL |
michael@0 | 86 | movdqa xmm0,xmm7 |
michael@0 | 87 | punpckhbw xmm7,xmm1 ; xmm7=Cr(89ABCDEF)=CrH |
michael@0 | 88 | punpcklbw xmm0,xmm1 ; xmm0=Cr(01234567)=CrL |
michael@0 | 89 | |
michael@0 | 90 | paddw xmm6,xmm3 |
michael@0 | 91 | paddw xmm4,xmm3 |
michael@0 | 92 | paddw xmm7,xmm3 |
michael@0 | 93 | paddw xmm0,xmm3 |
michael@0 | 94 | |
michael@0 | 95 | ; (Original) |
michael@0 | 96 | ; R = Y + 1.40200 * Cr |
michael@0 | 97 | ; G = Y - 0.34414 * Cb - 0.71414 * Cr |
michael@0 | 98 | ; B = Y + 1.77200 * Cb |
michael@0 | 99 | ; |
michael@0 | 100 | ; (This implementation) |
michael@0 | 101 | ; R = Y + 0.40200 * Cr + Cr |
michael@0 | 102 | ; G = Y - 0.34414 * Cb + 0.28586 * Cr - Cr |
michael@0 | 103 | ; B = Y - 0.22800 * Cb + Cb + Cb |
michael@0 | 104 | |
michael@0 | 105 | movdqa xmm5,xmm6 ; xmm5=CbH |
michael@0 | 106 | movdqa xmm2,xmm4 ; xmm2=CbL |
michael@0 | 107 | paddw xmm6,xmm6 ; xmm6=2*CbH |
michael@0 | 108 | paddw xmm4,xmm4 ; xmm4=2*CbL |
michael@0 | 109 | movdqa xmm1,xmm7 ; xmm1=CrH |
michael@0 | 110 | movdqa xmm3,xmm0 ; xmm3=CrL |
michael@0 | 111 | paddw xmm7,xmm7 ; xmm7=2*CrH |
michael@0 | 112 | paddw xmm0,xmm0 ; xmm0=2*CrL |
michael@0 | 113 | |
michael@0 | 114 | pmulhw xmm6,[rel PW_MF0228] ; xmm6=(2*CbH * -FIX(0.22800)) |
michael@0 | 115 | pmulhw xmm4,[rel PW_MF0228] ; xmm4=(2*CbL * -FIX(0.22800)) |
michael@0 | 116 | pmulhw xmm7,[rel PW_F0402] ; xmm7=(2*CrH * FIX(0.40200)) |
michael@0 | 117 | pmulhw xmm0,[rel PW_F0402] ; xmm0=(2*CrL * FIX(0.40200)) |
michael@0 | 118 | |
michael@0 | 119 | paddw xmm6,[rel PW_ONE] |
michael@0 | 120 | paddw xmm4,[rel PW_ONE] |
michael@0 | 121 | psraw xmm6,1 ; xmm6=(CbH * -FIX(0.22800)) |
michael@0 | 122 | psraw xmm4,1 ; xmm4=(CbL * -FIX(0.22800)) |
michael@0 | 123 | paddw xmm7,[rel PW_ONE] |
michael@0 | 124 | paddw xmm0,[rel PW_ONE] |
michael@0 | 125 | psraw xmm7,1 ; xmm7=(CrH * FIX(0.40200)) |
michael@0 | 126 | psraw xmm0,1 ; xmm0=(CrL * FIX(0.40200)) |
michael@0 | 127 | |
michael@0 | 128 | paddw xmm6,xmm5 |
michael@0 | 129 | paddw xmm4,xmm2 |
michael@0 | 130 | paddw xmm6,xmm5 ; xmm6=(CbH * FIX(1.77200))=(B-Y)H |
michael@0 | 131 | paddw xmm4,xmm2 ; xmm4=(CbL * FIX(1.77200))=(B-Y)L |
michael@0 | 132 | paddw xmm7,xmm1 ; xmm7=(CrH * FIX(1.40200))=(R-Y)H |
michael@0 | 133 | paddw xmm0,xmm3 ; xmm0=(CrL * FIX(1.40200))=(R-Y)L |
michael@0 | 134 | |
michael@0 | 135 | movdqa XMMWORD [wk(0)], xmm6 ; wk(0)=(B-Y)H |
michael@0 | 136 | movdqa XMMWORD [wk(1)], xmm7 ; wk(1)=(R-Y)H |
michael@0 | 137 | |
michael@0 | 138 | movdqa xmm6,xmm5 |
michael@0 | 139 | movdqa xmm7,xmm2 |
michael@0 | 140 | punpcklwd xmm5,xmm1 |
michael@0 | 141 | punpckhwd xmm6,xmm1 |
michael@0 | 142 | pmaddwd xmm5,[rel PW_MF0344_F0285] |
michael@0 | 143 | pmaddwd xmm6,[rel PW_MF0344_F0285] |
michael@0 | 144 | punpcklwd xmm2,xmm3 |
michael@0 | 145 | punpckhwd xmm7,xmm3 |
michael@0 | 146 | pmaddwd xmm2,[rel PW_MF0344_F0285] |
michael@0 | 147 | pmaddwd xmm7,[rel PW_MF0344_F0285] |
michael@0 | 148 | |
michael@0 | 149 | paddd xmm5,[rel PD_ONEHALF] |
michael@0 | 150 | paddd xmm6,[rel PD_ONEHALF] |
michael@0 | 151 | psrad xmm5,SCALEBITS |
michael@0 | 152 | psrad xmm6,SCALEBITS |
michael@0 | 153 | paddd xmm2,[rel PD_ONEHALF] |
michael@0 | 154 | paddd xmm7,[rel PD_ONEHALF] |
michael@0 | 155 | psrad xmm2,SCALEBITS |
michael@0 | 156 | psrad xmm7,SCALEBITS |
michael@0 | 157 | |
michael@0 | 158 | packssdw xmm5,xmm6 ; xmm5=CbH*-FIX(0.344)+CrH*FIX(0.285) |
michael@0 | 159 | packssdw xmm2,xmm7 ; xmm2=CbL*-FIX(0.344)+CrL*FIX(0.285) |
michael@0 | 160 | psubw xmm5,xmm1 ; xmm5=CbH*-FIX(0.344)+CrH*-FIX(0.714)=(G-Y)H |
michael@0 | 161 | psubw xmm2,xmm3 ; xmm2=CbL*-FIX(0.344)+CrL*-FIX(0.714)=(G-Y)L |
michael@0 | 162 | |
michael@0 | 163 | movdqa XMMWORD [wk(2)], xmm5 ; wk(2)=(G-Y)H |
michael@0 | 164 | |
michael@0 | 165 | mov al,2 ; Yctr |
michael@0 | 166 | jmp short .Yloop_1st |
michael@0 | 167 | |
michael@0 | 168 | .Yloop_2nd: |
michael@0 | 169 | movdqa xmm0, XMMWORD [wk(1)] ; xmm0=(R-Y)H |
michael@0 | 170 | movdqa xmm2, XMMWORD [wk(2)] ; xmm2=(G-Y)H |
michael@0 | 171 | movdqa xmm4, XMMWORD [wk(0)] ; xmm4=(B-Y)H |
michael@0 | 172 | |
michael@0 | 173 | .Yloop_1st: |
michael@0 | 174 | movdqa xmm7, XMMWORD [rsi] ; xmm7=Y(0123456789ABCDEF) |
michael@0 | 175 | |
michael@0 | 176 | pcmpeqw xmm6,xmm6 |
michael@0 | 177 | psrlw xmm6,BYTE_BIT ; xmm6={0xFF 0x00 0xFF 0x00 ..} |
michael@0 | 178 | pand xmm6,xmm7 ; xmm6=Y(02468ACE)=YE |
michael@0 | 179 | psrlw xmm7,BYTE_BIT ; xmm7=Y(13579BDF)=YO |
michael@0 | 180 | |
michael@0 | 181 | movdqa xmm1,xmm0 ; xmm1=xmm0=(R-Y)(L/H) |
michael@0 | 182 | movdqa xmm3,xmm2 ; xmm3=xmm2=(G-Y)(L/H) |
michael@0 | 183 | movdqa xmm5,xmm4 ; xmm5=xmm4=(B-Y)(L/H) |
michael@0 | 184 | |
michael@0 | 185 | paddw xmm0,xmm6 ; xmm0=((R-Y)+YE)=RE=R(02468ACE) |
michael@0 | 186 | paddw xmm1,xmm7 ; xmm1=((R-Y)+YO)=RO=R(13579BDF) |
michael@0 | 187 | packuswb xmm0,xmm0 ; xmm0=R(02468ACE********) |
michael@0 | 188 | packuswb xmm1,xmm1 ; xmm1=R(13579BDF********) |
michael@0 | 189 | |
michael@0 | 190 | paddw xmm2,xmm6 ; xmm2=((G-Y)+YE)=GE=G(02468ACE) |
michael@0 | 191 | paddw xmm3,xmm7 ; xmm3=((G-Y)+YO)=GO=G(13579BDF) |
michael@0 | 192 | packuswb xmm2,xmm2 ; xmm2=G(02468ACE********) |
michael@0 | 193 | packuswb xmm3,xmm3 ; xmm3=G(13579BDF********) |
michael@0 | 194 | |
michael@0 | 195 | paddw xmm4,xmm6 ; xmm4=((B-Y)+YE)=BE=B(02468ACE) |
michael@0 | 196 | paddw xmm5,xmm7 ; xmm5=((B-Y)+YO)=BO=B(13579BDF) |
michael@0 | 197 | packuswb xmm4,xmm4 ; xmm4=B(02468ACE********) |
michael@0 | 198 | packuswb xmm5,xmm5 ; xmm5=B(13579BDF********) |
michael@0 | 199 | |
michael@0 | 200 | %if RGB_PIXELSIZE == 3 ; --------------- |
michael@0 | 201 | |
michael@0 | 202 | ; xmmA=(00 02 04 06 08 0A 0C 0E **), xmmB=(01 03 05 07 09 0B 0D 0F **) |
michael@0 | 203 | ; xmmC=(10 12 14 16 18 1A 1C 1E **), xmmD=(11 13 15 17 19 1B 1D 1F **) |
michael@0 | 204 | ; xmmE=(20 22 24 26 28 2A 2C 2E **), xmmF=(21 23 25 27 29 2B 2D 2F **) |
michael@0 | 205 | ; xmmG=(** ** ** ** ** ** ** ** **), xmmH=(** ** ** ** ** ** ** ** **) |
michael@0 | 206 | |
michael@0 | 207 | punpcklbw xmmA,xmmC ; xmmA=(00 10 02 12 04 14 06 16 08 18 0A 1A 0C 1C 0E 1E) |
michael@0 | 208 | punpcklbw xmmE,xmmB ; xmmE=(20 01 22 03 24 05 26 07 28 09 2A 0B 2C 0D 2E 0F) |
michael@0 | 209 | punpcklbw xmmD,xmmF ; xmmD=(11 21 13 23 15 25 17 27 19 29 1B 2B 1D 2D 1F 2F) |
michael@0 | 210 | |
michael@0 | 211 | movdqa xmmG,xmmA |
michael@0 | 212 | movdqa xmmH,xmmA |
michael@0 | 213 | punpcklwd xmmA,xmmE ; xmmA=(00 10 20 01 02 12 22 03 04 14 24 05 06 16 26 07) |
michael@0 | 214 | punpckhwd xmmG,xmmE ; xmmG=(08 18 28 09 0A 1A 2A 0B 0C 1C 2C 0D 0E 1E 2E 0F) |
michael@0 | 215 | |
michael@0 | 216 | psrldq xmmH,2 ; xmmH=(02 12 04 14 06 16 08 18 0A 1A 0C 1C 0E 1E -- --) |
michael@0 | 217 | psrldq xmmE,2 ; xmmE=(22 03 24 05 26 07 28 09 2A 0B 2C 0D 2E 0F -- --) |
michael@0 | 218 | |
michael@0 | 219 | movdqa xmmC,xmmD |
michael@0 | 220 | movdqa xmmB,xmmD |
michael@0 | 221 | punpcklwd xmmD,xmmH ; xmmD=(11 21 02 12 13 23 04 14 15 25 06 16 17 27 08 18) |
michael@0 | 222 | punpckhwd xmmC,xmmH ; xmmC=(19 29 0A 1A 1B 2B 0C 1C 1D 2D 0E 1E 1F 2F -- --) |
michael@0 | 223 | |
michael@0 | 224 | psrldq xmmB,2 ; xmmB=(13 23 15 25 17 27 19 29 1B 2B 1D 2D 1F 2F -- --) |
michael@0 | 225 | |
michael@0 | 226 | movdqa xmmF,xmmE |
michael@0 | 227 | punpcklwd xmmE,xmmB ; xmmE=(22 03 13 23 24 05 15 25 26 07 17 27 28 09 19 29) |
michael@0 | 228 | punpckhwd xmmF,xmmB ; xmmF=(2A 0B 1B 2B 2C 0D 1D 2D 2E 0F 1F 2F -- -- -- --) |
michael@0 | 229 | |
michael@0 | 230 | pshufd xmmH,xmmA,0x4E; xmmH=(04 14 24 05 06 16 26 07 00 10 20 01 02 12 22 03) |
michael@0 | 231 | movdqa xmmB,xmmE |
michael@0 | 232 | punpckldq xmmA,xmmD ; xmmA=(00 10 20 01 11 21 02 12 02 12 22 03 13 23 04 14) |
michael@0 | 233 | punpckldq xmmE,xmmH ; xmmE=(22 03 13 23 04 14 24 05 24 05 15 25 06 16 26 07) |
michael@0 | 234 | punpckhdq xmmD,xmmB ; xmmD=(15 25 06 16 26 07 17 27 17 27 08 18 28 09 19 29) |
michael@0 | 235 | |
michael@0 | 236 | pshufd xmmH,xmmG,0x4E; xmmH=(0C 1C 2C 0D 0E 1E 2E 0F 08 18 28 09 0A 1A 2A 0B) |
michael@0 | 237 | movdqa xmmB,xmmF |
michael@0 | 238 | punpckldq xmmG,xmmC ; xmmG=(08 18 28 09 19 29 0A 1A 0A 1A 2A 0B 1B 2B 0C 1C) |
michael@0 | 239 | punpckldq xmmF,xmmH ; xmmF=(2A 0B 1B 2B 0C 1C 2C 0D 2C 0D 1D 2D 0E 1E 2E 0F) |
michael@0 | 240 | punpckhdq xmmC,xmmB ; xmmC=(1D 2D 0E 1E 2E 0F 1F 2F 1F 2F -- -- -- -- -- --) |
michael@0 | 241 | |
michael@0 | 242 | punpcklqdq xmmA,xmmE ; xmmA=(00 10 20 01 11 21 02 12 22 03 13 23 04 14 24 05) |
michael@0 | 243 | punpcklqdq xmmD,xmmG ; xmmD=(15 25 06 16 26 07 17 27 08 18 28 09 19 29 0A 1A) |
michael@0 | 244 | punpcklqdq xmmF,xmmC ; xmmF=(2A 0B 1B 2B 0C 1C 2C 0D 1D 2D 0E 1E 2E 0F 1F 2F) |
michael@0 | 245 | |
michael@0 | 246 | cmp rcx, byte SIZEOF_XMMWORD |
michael@0 | 247 | jb short .column_st32 |
michael@0 | 248 | |
michael@0 | 249 | test rdi, SIZEOF_XMMWORD-1 |
michael@0 | 250 | jnz short .out1 |
michael@0 | 251 | ; --(aligned)------------------- |
michael@0 | 252 | movntdq XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA |
michael@0 | 253 | movntdq XMMWORD [rdi+1*SIZEOF_XMMWORD], xmmD |
michael@0 | 254 | movntdq XMMWORD [rdi+2*SIZEOF_XMMWORD], xmmF |
michael@0 | 255 | jmp short .out0 |
michael@0 | 256 | .out1: ; --(unaligned)----------------- |
michael@0 | 257 | movdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA |
michael@0 | 258 | movdqu XMMWORD [rdi+1*SIZEOF_XMMWORD], xmmD |
michael@0 | 259 | movdqu XMMWORD [rdi+2*SIZEOF_XMMWORD], xmmF |
michael@0 | 260 | .out0: |
michael@0 | 261 | add rdi, byte RGB_PIXELSIZE*SIZEOF_XMMWORD ; outptr |
michael@0 | 262 | sub rcx, byte SIZEOF_XMMWORD |
michael@0 | 263 | jz near .endcolumn |
michael@0 | 264 | |
michael@0 | 265 | add rsi, byte SIZEOF_XMMWORD ; inptr0 |
michael@0 | 266 | dec al ; Yctr |
michael@0 | 267 | jnz near .Yloop_2nd |
michael@0 | 268 | |
michael@0 | 269 | add rbx, byte SIZEOF_XMMWORD ; inptr1 |
michael@0 | 270 | add rdx, byte SIZEOF_XMMWORD ; inptr2 |
michael@0 | 271 | jmp near .columnloop |
michael@0 | 272 | |
michael@0 | 273 | .column_st32: |
michael@0 | 274 | lea rcx, [rcx+rcx*2] ; imul ecx, RGB_PIXELSIZE |
michael@0 | 275 | cmp rcx, byte 2*SIZEOF_XMMWORD |
michael@0 | 276 | jb short .column_st16 |
michael@0 | 277 | movdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA |
michael@0 | 278 | movdqu XMMWORD [rdi+1*SIZEOF_XMMWORD], xmmD |
michael@0 | 279 | add rdi, byte 2*SIZEOF_XMMWORD ; outptr |
michael@0 | 280 | movdqa xmmA,xmmF |
michael@0 | 281 | sub rcx, byte 2*SIZEOF_XMMWORD |
michael@0 | 282 | jmp short .column_st15 |
michael@0 | 283 | .column_st16: |
michael@0 | 284 | cmp rcx, byte SIZEOF_XMMWORD |
michael@0 | 285 | jb short .column_st15 |
michael@0 | 286 | movdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA |
michael@0 | 287 | add rdi, byte SIZEOF_XMMWORD ; outptr |
michael@0 | 288 | movdqa xmmA,xmmD |
michael@0 | 289 | sub rcx, byte SIZEOF_XMMWORD |
michael@0 | 290 | .column_st15: |
michael@0 | 291 | ; Store the lower 8 bytes of xmmA to the output when it has enough |
michael@0 | 292 | ; space. |
michael@0 | 293 | cmp rcx, byte SIZEOF_MMWORD |
michael@0 | 294 | jb short .column_st7 |
michael@0 | 295 | movq XMM_MMWORD [rdi], xmmA |
michael@0 | 296 | add rdi, byte SIZEOF_MMWORD |
michael@0 | 297 | sub rcx, byte SIZEOF_MMWORD |
michael@0 | 298 | psrldq xmmA, SIZEOF_MMWORD |
michael@0 | 299 | .column_st7: |
michael@0 | 300 | ; Store the lower 4 bytes of xmmA to the output when it has enough |
michael@0 | 301 | ; space. |
michael@0 | 302 | cmp rcx, byte SIZEOF_DWORD |
michael@0 | 303 | jb short .column_st3 |
michael@0 | 304 | movd XMM_DWORD [rdi], xmmA |
michael@0 | 305 | add rdi, byte SIZEOF_DWORD |
michael@0 | 306 | sub rcx, byte SIZEOF_DWORD |
michael@0 | 307 | psrldq xmmA, SIZEOF_DWORD |
michael@0 | 308 | .column_st3: |
michael@0 | 309 | ; Store the lower 2 bytes of rax to the output when it has enough |
michael@0 | 310 | ; space. |
michael@0 | 311 | movd eax, xmmA |
michael@0 | 312 | cmp rcx, byte SIZEOF_WORD |
michael@0 | 313 | jb short .column_st1 |
michael@0 | 314 | mov WORD [rdi], ax |
michael@0 | 315 | add rdi, byte SIZEOF_WORD |
michael@0 | 316 | sub rcx, byte SIZEOF_WORD |
michael@0 | 317 | shr rax, 16 |
michael@0 | 318 | .column_st1: |
michael@0 | 319 | ; Store the lower 1 byte of rax to the output when it has enough |
michael@0 | 320 | ; space. |
michael@0 | 321 | test rcx, rcx |
michael@0 | 322 | jz short .endcolumn |
michael@0 | 323 | mov BYTE [rdi], al |
michael@0 | 324 | |
michael@0 | 325 | %else ; RGB_PIXELSIZE == 4 ; ----------- |
michael@0 | 326 | |
michael@0 | 327 | %ifdef RGBX_FILLER_0XFF |
michael@0 | 328 | pcmpeqb xmm6,xmm6 ; xmm6=XE=X(02468ACE********) |
michael@0 | 329 | pcmpeqb xmm7,xmm7 ; xmm7=XO=X(13579BDF********) |
michael@0 | 330 | %else |
michael@0 | 331 | pxor xmm6,xmm6 ; xmm6=XE=X(02468ACE********) |
michael@0 | 332 | pxor xmm7,xmm7 ; xmm7=XO=X(13579BDF********) |
michael@0 | 333 | %endif |
michael@0 | 334 | ; xmmA=(00 02 04 06 08 0A 0C 0E **), xmmB=(01 03 05 07 09 0B 0D 0F **) |
michael@0 | 335 | ; xmmC=(10 12 14 16 18 1A 1C 1E **), xmmD=(11 13 15 17 19 1B 1D 1F **) |
michael@0 | 336 | ; xmmE=(20 22 24 26 28 2A 2C 2E **), xmmF=(21 23 25 27 29 2B 2D 2F **) |
michael@0 | 337 | ; xmmG=(30 32 34 36 38 3A 3C 3E **), xmmH=(31 33 35 37 39 3B 3D 3F **) |
michael@0 | 338 | |
michael@0 | 339 | punpcklbw xmmA,xmmC ; xmmA=(00 10 02 12 04 14 06 16 08 18 0A 1A 0C 1C 0E 1E) |
michael@0 | 340 | punpcklbw xmmE,xmmG ; xmmE=(20 30 22 32 24 34 26 36 28 38 2A 3A 2C 3C 2E 3E) |
michael@0 | 341 | punpcklbw xmmB,xmmD ; xmmB=(01 11 03 13 05 15 07 17 09 19 0B 1B 0D 1D 0F 1F) |
michael@0 | 342 | punpcklbw xmmF,xmmH ; xmmF=(21 31 23 33 25 35 27 37 29 39 2B 3B 2D 3D 2F 3F) |
michael@0 | 343 | |
michael@0 | 344 | movdqa xmmC,xmmA |
michael@0 | 345 | punpcklwd xmmA,xmmE ; xmmA=(00 10 20 30 02 12 22 32 04 14 24 34 06 16 26 36) |
michael@0 | 346 | punpckhwd xmmC,xmmE ; xmmC=(08 18 28 38 0A 1A 2A 3A 0C 1C 2C 3C 0E 1E 2E 3E) |
michael@0 | 347 | movdqa xmmG,xmmB |
michael@0 | 348 | punpcklwd xmmB,xmmF ; xmmB=(01 11 21 31 03 13 23 33 05 15 25 35 07 17 27 37) |
michael@0 | 349 | punpckhwd xmmG,xmmF ; xmmG=(09 19 29 39 0B 1B 2B 3B 0D 1D 2D 3D 0F 1F 2F 3F) |
michael@0 | 350 | |
michael@0 | 351 | movdqa xmmD,xmmA |
michael@0 | 352 | punpckldq xmmA,xmmB ; xmmA=(00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33) |
michael@0 | 353 | punpckhdq xmmD,xmmB ; xmmD=(04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37) |
michael@0 | 354 | movdqa xmmH,xmmC |
michael@0 | 355 | punpckldq xmmC,xmmG ; xmmC=(08 18 28 38 09 19 29 39 0A 1A 2A 3A 0B 1B 2B 3B) |
michael@0 | 356 | punpckhdq xmmH,xmmG ; xmmH=(0C 1C 2C 3C 0D 1D 2D 3D 0E 1E 2E 3E 0F 1F 2F 3F) |
michael@0 | 357 | |
michael@0 | 358 | cmp rcx, byte SIZEOF_XMMWORD |
michael@0 | 359 | jb short .column_st32 |
michael@0 | 360 | |
michael@0 | 361 | test rdi, SIZEOF_XMMWORD-1 |
michael@0 | 362 | jnz short .out1 |
michael@0 | 363 | ; --(aligned)------------------- |
michael@0 | 364 | movntdq XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA |
michael@0 | 365 | movntdq XMMWORD [rdi+1*SIZEOF_XMMWORD], xmmD |
michael@0 | 366 | movntdq XMMWORD [rdi+2*SIZEOF_XMMWORD], xmmC |
michael@0 | 367 | movntdq XMMWORD [rdi+3*SIZEOF_XMMWORD], xmmH |
michael@0 | 368 | jmp short .out0 |
michael@0 | 369 | .out1: ; --(unaligned)----------------- |
michael@0 | 370 | movdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA |
michael@0 | 371 | movdqu XMMWORD [rdi+1*SIZEOF_XMMWORD], xmmD |
michael@0 | 372 | movdqu XMMWORD [rdi+2*SIZEOF_XMMWORD], xmmC |
michael@0 | 373 | movdqu XMMWORD [rdi+3*SIZEOF_XMMWORD], xmmH |
michael@0 | 374 | .out0: |
michael@0 | 375 | add rdi, byte RGB_PIXELSIZE*SIZEOF_XMMWORD ; outptr |
michael@0 | 376 | sub rcx, byte SIZEOF_XMMWORD |
michael@0 | 377 | jz near .endcolumn |
michael@0 | 378 | |
michael@0 | 379 | add rsi, byte SIZEOF_XMMWORD ; inptr0 |
michael@0 | 380 | dec al ; Yctr |
michael@0 | 381 | jnz near .Yloop_2nd |
michael@0 | 382 | |
michael@0 | 383 | add rbx, byte SIZEOF_XMMWORD ; inptr1 |
michael@0 | 384 | add rdx, byte SIZEOF_XMMWORD ; inptr2 |
michael@0 | 385 | jmp near .columnloop |
michael@0 | 386 | |
michael@0 | 387 | .column_st32: |
michael@0 | 388 | cmp rcx, byte SIZEOF_XMMWORD/2 |
michael@0 | 389 | jb short .column_st16 |
michael@0 | 390 | movdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA |
michael@0 | 391 | movdqu XMMWORD [rdi+1*SIZEOF_XMMWORD], xmmD |
michael@0 | 392 | add rdi, byte 2*SIZEOF_XMMWORD ; outptr |
michael@0 | 393 | movdqa xmmA,xmmC |
michael@0 | 394 | movdqa xmmD,xmmH |
michael@0 | 395 | sub rcx, byte SIZEOF_XMMWORD/2 |
michael@0 | 396 | .column_st16: |
michael@0 | 397 | cmp rcx, byte SIZEOF_XMMWORD/4 |
michael@0 | 398 | jb short .column_st15 |
michael@0 | 399 | movdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA |
michael@0 | 400 | add rdi, byte SIZEOF_XMMWORD ; outptr |
michael@0 | 401 | movdqa xmmA,xmmD |
michael@0 | 402 | sub rcx, byte SIZEOF_XMMWORD/4 |
michael@0 | 403 | .column_st15: |
michael@0 | 404 | ; Store two pixels (8 bytes) of xmmA to the output when it has enough |
michael@0 | 405 | ; space. |
michael@0 | 406 | cmp rcx, byte SIZEOF_XMMWORD/8 |
michael@0 | 407 | jb short .column_st7 |
michael@0 | 408 | movq XMM_MMWORD [rdi], xmmA |
michael@0 | 409 | add rdi, byte SIZEOF_XMMWORD/8*4 |
michael@0 | 410 | sub rcx, byte SIZEOF_XMMWORD/8 |
michael@0 | 411 | psrldq xmmA, SIZEOF_XMMWORD/8*4 |
michael@0 | 412 | .column_st7: |
michael@0 | 413 | ; Store one pixel (4 bytes) of xmmA to the output when it has enough |
michael@0 | 414 | ; space. |
michael@0 | 415 | test rcx, rcx |
michael@0 | 416 | jz short .endcolumn |
michael@0 | 417 | movd XMM_DWORD [rdi], xmmA |
michael@0 | 418 | |
michael@0 | 419 | %endif ; RGB_PIXELSIZE ; --------------- |
michael@0 | 420 | |
michael@0 | 421 | .endcolumn: |
michael@0 | 422 | sfence ; flush the write buffer |
michael@0 | 423 | |
michael@0 | 424 | .return: |
michael@0 | 425 | pop rbx |
michael@0 | 426 | uncollect_args |
michael@0 | 427 | mov rsp,rbp ; rsp <- aligned rbp |
michael@0 | 428 | pop rsp ; rsp <- original rbp |
michael@0 | 429 | pop rbp |
michael@0 | 430 | ret |
michael@0 | 431 | |
michael@0 | 432 | ; -------------------------------------------------------------------------- |
michael@0 | 433 | ; |
michael@0 | 434 | ; Upsample and color convert for the case of 2:1 horizontal and 2:1 vertical. |
michael@0 | 435 | ; |
michael@0 | 436 | ; GLOBAL(void) |
michael@0 | 437 | ; jsimd_h2v2_merged_upsample_sse2 (JDIMENSION output_width, |
michael@0 | 438 | ; JSAMPIMAGE input_buf, |
michael@0 | 439 | ; JDIMENSION in_row_group_ctr, |
michael@0 | 440 | ; JSAMPARRAY output_buf); |
michael@0 | 441 | ; |
michael@0 | 442 | |
michael@0 | 443 | ; r10 = JDIMENSION output_width |
michael@0 | 444 | ; r11 = JSAMPIMAGE input_buf |
michael@0 | 445 | ; r12 = JDIMENSION in_row_group_ctr |
michael@0 | 446 | ; r13 = JSAMPARRAY output_buf |
michael@0 | 447 | |
michael@0 | 448 | align 16 |
michael@0 | 449 | global EXTN(jsimd_h2v2_merged_upsample_sse2) |
michael@0 | 450 | |
michael@0 | 451 | EXTN(jsimd_h2v2_merged_upsample_sse2): |
michael@0 | 452 | push rbp |
michael@0 | 453 | mov rax,rsp |
michael@0 | 454 | mov rbp,rsp |
michael@0 | 455 | collect_args |
michael@0 | 456 | push rbx |
michael@0 | 457 | |
michael@0 | 458 | mov rax, r10 |
michael@0 | 459 | |
michael@0 | 460 | mov rdi, r11 |
michael@0 | 461 | mov rcx, r12 |
michael@0 | 462 | mov rsi, JSAMPARRAY [rdi+0*SIZEOF_JSAMPARRAY] |
michael@0 | 463 | mov rbx, JSAMPARRAY [rdi+1*SIZEOF_JSAMPARRAY] |
michael@0 | 464 | mov rdx, JSAMPARRAY [rdi+2*SIZEOF_JSAMPARRAY] |
michael@0 | 465 | mov rdi, r13 |
michael@0 | 466 | lea rsi, [rsi+rcx*SIZEOF_JSAMPROW] |
michael@0 | 467 | |
michael@0 | 468 | push rdx ; inptr2 |
michael@0 | 469 | push rbx ; inptr1 |
michael@0 | 470 | push rsi ; inptr00 |
michael@0 | 471 | mov rbx,rsp |
michael@0 | 472 | |
michael@0 | 473 | push rdi |
michael@0 | 474 | push rcx |
michael@0 | 475 | push rax |
michael@0 | 476 | |
michael@0 | 477 | %ifdef WIN64 |
michael@0 | 478 | mov r8, rcx |
michael@0 | 479 | mov r9, rdi |
michael@0 | 480 | mov rcx, rax |
michael@0 | 481 | mov rdx, rbx |
michael@0 | 482 | %else |
michael@0 | 483 | mov rdx, rcx |
michael@0 | 484 | mov rcx, rdi |
michael@0 | 485 | mov rdi, rax |
michael@0 | 486 | mov rsi, rbx |
michael@0 | 487 | %endif |
michael@0 | 488 | |
michael@0 | 489 | call EXTN(jsimd_h2v1_merged_upsample_sse2) |
michael@0 | 490 | |
michael@0 | 491 | pop rax |
michael@0 | 492 | pop rcx |
michael@0 | 493 | pop rdi |
michael@0 | 494 | pop rsi |
michael@0 | 495 | pop rbx |
michael@0 | 496 | pop rdx |
michael@0 | 497 | |
michael@0 | 498 | add rdi, byte SIZEOF_JSAMPROW ; outptr1 |
michael@0 | 499 | add rsi, byte SIZEOF_JSAMPROW ; inptr01 |
michael@0 | 500 | |
michael@0 | 501 | push rdx ; inptr2 |
michael@0 | 502 | push rbx ; inptr1 |
michael@0 | 503 | push rsi ; inptr00 |
michael@0 | 504 | mov rbx,rsp |
michael@0 | 505 | |
michael@0 | 506 | push rdi |
michael@0 | 507 | push rcx |
michael@0 | 508 | push rax |
michael@0 | 509 | |
michael@0 | 510 | %ifdef WIN64 |
michael@0 | 511 | mov r8, rcx |
michael@0 | 512 | mov r9, rdi |
michael@0 | 513 | mov rcx, rax |
michael@0 | 514 | mov rdx, rbx |
michael@0 | 515 | %else |
michael@0 | 516 | mov rdx, rcx |
michael@0 | 517 | mov rcx, rdi |
michael@0 | 518 | mov rdi, rax |
michael@0 | 519 | mov rsi, rbx |
michael@0 | 520 | %endif |
michael@0 | 521 | |
michael@0 | 522 | call EXTN(jsimd_h2v1_merged_upsample_sse2) |
michael@0 | 523 | |
michael@0 | 524 | pop rax |
michael@0 | 525 | pop rcx |
michael@0 | 526 | pop rdi |
michael@0 | 527 | pop rsi |
michael@0 | 528 | pop rbx |
michael@0 | 529 | pop rdx |
michael@0 | 530 | |
michael@0 | 531 | pop rbx |
michael@0 | 532 | uncollect_args |
michael@0 | 533 | pop rbp |
michael@0 | 534 | ret |
michael@0 | 535 | |
michael@0 | 536 | ; For some reason, the OS X linker does not honor the request to align the |
michael@0 | 537 | ; segment unless we do this. |
michael@0 | 538 | align 16 |