Thu, 22 Jan 2015 13:21:57 +0100
Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6
michael@0 | 1 | ; |
michael@0 | 2 | ; jdclrss2-64.asm - colorspace conversion (64-bit SSE2) |
michael@0 | 3 | ; |
michael@0 | 4 | ; Copyright 2009, 2012 Pierre Ossman <ossman@cendio.se> for Cendio AB |
michael@0 | 5 | ; Copyright 2009, 2012 D. R. Commander |
michael@0 | 6 | ; |
michael@0 | 7 | ; Based on |
michael@0 | 8 | ; x86 SIMD extension for IJG JPEG library |
michael@0 | 9 | ; Copyright (C) 1999-2006, MIYASAKA Masaru. |
michael@0 | 10 | ; For conditions of distribution and use, see copyright notice in jsimdext.inc |
michael@0 | 11 | ; |
michael@0 | 12 | ; This file should be assembled with NASM (Netwide Assembler), |
michael@0 | 13 | ; can *not* be assembled with Microsoft's MASM or any compatible |
michael@0 | 14 | ; assembler (including Borland's Turbo Assembler). |
michael@0 | 15 | ; NASM is available from http://nasm.sourceforge.net/ or |
michael@0 | 16 | ; http://sourceforge.net/project/showfiles.php?group_id=6208 |
michael@0 | 17 | ; |
michael@0 | 18 | ; [TAB8] |
michael@0 | 19 | |
michael@0 | 20 | %include "jcolsamp.inc" |
michael@0 | 21 | |
michael@0 | 22 | ; -------------------------------------------------------------------------- |
michael@0 | 23 | ; |
michael@0 | 24 | ; Convert some rows of samples to the output colorspace. |
michael@0 | 25 | ; |
michael@0 | 26 | ; GLOBAL(void) |
michael@0 | 27 | ; jsimd_ycc_rgb_convert_sse2 (JDIMENSION out_width, |
michael@0 | 28 | ; JSAMPIMAGE input_buf, JDIMENSION input_row, |
michael@0 | 29 | ; JSAMPARRAY output_buf, int num_rows) |
michael@0 | 30 | ; |
michael@0 | 31 | |
michael@0 | 32 | ; r10 = JDIMENSION out_width |
michael@0 | 33 | ; r11 = JSAMPIMAGE input_buf |
michael@0 | 34 | ; r12 = JDIMENSION input_row |
michael@0 | 35 | ; r13 = JSAMPARRAY output_buf |
michael@0 | 36 | ; r14 = int num_rows |
michael@0 | 37 | |
michael@0 | 38 | %define wk(i) rbp-(WK_NUM-(i))*SIZEOF_XMMWORD ; xmmword wk[WK_NUM] |
michael@0 | 39 | %define WK_NUM 2 |
michael@0 | 40 | |
michael@0 | 41 | align 16 |
michael@0 | 42 | global EXTN(jsimd_ycc_rgb_convert_sse2) |
michael@0 | 43 | |
michael@0 | 44 | EXTN(jsimd_ycc_rgb_convert_sse2): |
michael@0 | 45 | push rbp |
michael@0 | 46 | mov rax,rsp ; rax = original rbp |
michael@0 | 47 | sub rsp, byte 4 |
michael@0 | 48 | and rsp, byte (-SIZEOF_XMMWORD) ; align to 128 bits |
michael@0 | 49 | mov [rsp],rax |
michael@0 | 50 | mov rbp,rsp ; rbp = aligned rbp |
michael@0 | 51 | lea rsp, [wk(0)] |
michael@0 | 52 | collect_args |
michael@0 | 53 | push rbx |
michael@0 | 54 | |
michael@0 | 55 | mov rcx, r10 ; num_cols |
michael@0 | 56 | test rcx,rcx |
michael@0 | 57 | jz near .return |
michael@0 | 58 | |
michael@0 | 59 | push rcx |
michael@0 | 60 | |
michael@0 | 61 | mov rdi, r11 |
michael@0 | 62 | mov rcx, r12 |
michael@0 | 63 | mov rsi, JSAMPARRAY [rdi+0*SIZEOF_JSAMPARRAY] |
michael@0 | 64 | mov rbx, JSAMPARRAY [rdi+1*SIZEOF_JSAMPARRAY] |
michael@0 | 65 | mov rdx, JSAMPARRAY [rdi+2*SIZEOF_JSAMPARRAY] |
michael@0 | 66 | lea rsi, [rsi+rcx*SIZEOF_JSAMPROW] |
michael@0 | 67 | lea rbx, [rbx+rcx*SIZEOF_JSAMPROW] |
michael@0 | 68 | lea rdx, [rdx+rcx*SIZEOF_JSAMPROW] |
michael@0 | 69 | |
michael@0 | 70 | pop rcx |
michael@0 | 71 | |
michael@0 | 72 | mov rdi, r13 |
michael@0 | 73 | mov eax, r14d |
michael@0 | 74 | test rax,rax |
michael@0 | 75 | jle near .return |
michael@0 | 76 | .rowloop: |
michael@0 | 77 | push rax |
michael@0 | 78 | push rdi |
michael@0 | 79 | push rdx |
michael@0 | 80 | push rbx |
michael@0 | 81 | push rsi |
michael@0 | 82 | push rcx ; col |
michael@0 | 83 | |
michael@0 | 84 | mov rsi, JSAMPROW [rsi] ; inptr0 |
michael@0 | 85 | mov rbx, JSAMPROW [rbx] ; inptr1 |
michael@0 | 86 | mov rdx, JSAMPROW [rdx] ; inptr2 |
michael@0 | 87 | mov rdi, JSAMPROW [rdi] ; outptr |
michael@0 | 88 | .columnloop: |
michael@0 | 89 | |
michael@0 | 90 | movdqa xmm5, XMMWORD [rbx] ; xmm5=Cb(0123456789ABCDEF) |
michael@0 | 91 | movdqa xmm1, XMMWORD [rdx] ; xmm1=Cr(0123456789ABCDEF) |
michael@0 | 92 | |
michael@0 | 93 | pcmpeqw xmm4,xmm4 |
michael@0 | 94 | pcmpeqw xmm7,xmm7 |
michael@0 | 95 | psrlw xmm4,BYTE_BIT |
michael@0 | 96 | psllw xmm7,7 ; xmm7={0xFF80 0xFF80 0xFF80 0xFF80 ..} |
michael@0 | 97 | movdqa xmm0,xmm4 ; xmm0=xmm4={0xFF 0x00 0xFF 0x00 ..} |
michael@0 | 98 | |
michael@0 | 99 | pand xmm4,xmm5 ; xmm4=Cb(02468ACE)=CbE |
michael@0 | 100 | psrlw xmm5,BYTE_BIT ; xmm5=Cb(13579BDF)=CbO |
michael@0 | 101 | pand xmm0,xmm1 ; xmm0=Cr(02468ACE)=CrE |
michael@0 | 102 | psrlw xmm1,BYTE_BIT ; xmm1=Cr(13579BDF)=CrO |
michael@0 | 103 | |
michael@0 | 104 | paddw xmm4,xmm7 |
michael@0 | 105 | paddw xmm5,xmm7 |
michael@0 | 106 | paddw xmm0,xmm7 |
michael@0 | 107 | paddw xmm1,xmm7 |
michael@0 | 108 | |
michael@0 | 109 | ; (Original) |
michael@0 | 110 | ; R = Y + 1.40200 * Cr |
michael@0 | 111 | ; G = Y - 0.34414 * Cb - 0.71414 * Cr |
michael@0 | 112 | ; B = Y + 1.77200 * Cb |
michael@0 | 113 | ; |
michael@0 | 114 | ; (This implementation) |
michael@0 | 115 | ; R = Y + 0.40200 * Cr + Cr |
michael@0 | 116 | ; G = Y - 0.34414 * Cb + 0.28586 * Cr - Cr |
michael@0 | 117 | ; B = Y - 0.22800 * Cb + Cb + Cb |
michael@0 | 118 | |
michael@0 | 119 | movdqa xmm2,xmm4 ; xmm2=CbE |
michael@0 | 120 | movdqa xmm3,xmm5 ; xmm3=CbO |
michael@0 | 121 | paddw xmm4,xmm4 ; xmm4=2*CbE |
michael@0 | 122 | paddw xmm5,xmm5 ; xmm5=2*CbO |
michael@0 | 123 | movdqa xmm6,xmm0 ; xmm6=CrE |
michael@0 | 124 | movdqa xmm7,xmm1 ; xmm7=CrO |
michael@0 | 125 | paddw xmm0,xmm0 ; xmm0=2*CrE |
michael@0 | 126 | paddw xmm1,xmm1 ; xmm1=2*CrO |
michael@0 | 127 | |
michael@0 | 128 | pmulhw xmm4,[rel PW_MF0228] ; xmm4=(2*CbE * -FIX(0.22800)) |
michael@0 | 129 | pmulhw xmm5,[rel PW_MF0228] ; xmm5=(2*CbO * -FIX(0.22800)) |
michael@0 | 130 | pmulhw xmm0,[rel PW_F0402] ; xmm0=(2*CrE * FIX(0.40200)) |
michael@0 | 131 | pmulhw xmm1,[rel PW_F0402] ; xmm1=(2*CrO * FIX(0.40200)) |
michael@0 | 132 | |
michael@0 | 133 | paddw xmm4,[rel PW_ONE] |
michael@0 | 134 | paddw xmm5,[rel PW_ONE] |
michael@0 | 135 | psraw xmm4,1 ; xmm4=(CbE * -FIX(0.22800)) |
michael@0 | 136 | psraw xmm5,1 ; xmm5=(CbO * -FIX(0.22800)) |
michael@0 | 137 | paddw xmm0,[rel PW_ONE] |
michael@0 | 138 | paddw xmm1,[rel PW_ONE] |
michael@0 | 139 | psraw xmm0,1 ; xmm0=(CrE * FIX(0.40200)) |
michael@0 | 140 | psraw xmm1,1 ; xmm1=(CrO * FIX(0.40200)) |
michael@0 | 141 | |
michael@0 | 142 | paddw xmm4,xmm2 |
michael@0 | 143 | paddw xmm5,xmm3 |
michael@0 | 144 | paddw xmm4,xmm2 ; xmm4=(CbE * FIX(1.77200))=(B-Y)E |
michael@0 | 145 | paddw xmm5,xmm3 ; xmm5=(CbO * FIX(1.77200))=(B-Y)O |
michael@0 | 146 | paddw xmm0,xmm6 ; xmm0=(CrE * FIX(1.40200))=(R-Y)E |
michael@0 | 147 | paddw xmm1,xmm7 ; xmm1=(CrO * FIX(1.40200))=(R-Y)O |
michael@0 | 148 | |
michael@0 | 149 | movdqa XMMWORD [wk(0)], xmm4 ; wk(0)=(B-Y)E |
michael@0 | 150 | movdqa XMMWORD [wk(1)], xmm5 ; wk(1)=(B-Y)O |
michael@0 | 151 | |
michael@0 | 152 | movdqa xmm4,xmm2 |
michael@0 | 153 | movdqa xmm5,xmm3 |
michael@0 | 154 | punpcklwd xmm2,xmm6 |
michael@0 | 155 | punpckhwd xmm4,xmm6 |
michael@0 | 156 | pmaddwd xmm2,[rel PW_MF0344_F0285] |
michael@0 | 157 | pmaddwd xmm4,[rel PW_MF0344_F0285] |
michael@0 | 158 | punpcklwd xmm3,xmm7 |
michael@0 | 159 | punpckhwd xmm5,xmm7 |
michael@0 | 160 | pmaddwd xmm3,[rel PW_MF0344_F0285] |
michael@0 | 161 | pmaddwd xmm5,[rel PW_MF0344_F0285] |
michael@0 | 162 | |
michael@0 | 163 | paddd xmm2,[rel PD_ONEHALF] |
michael@0 | 164 | paddd xmm4,[rel PD_ONEHALF] |
michael@0 | 165 | psrad xmm2,SCALEBITS |
michael@0 | 166 | psrad xmm4,SCALEBITS |
michael@0 | 167 | paddd xmm3,[rel PD_ONEHALF] |
michael@0 | 168 | paddd xmm5,[rel PD_ONEHALF] |
michael@0 | 169 | psrad xmm3,SCALEBITS |
michael@0 | 170 | psrad xmm5,SCALEBITS |
michael@0 | 171 | |
michael@0 | 172 | packssdw xmm2,xmm4 ; xmm2=CbE*-FIX(0.344)+CrE*FIX(0.285) |
michael@0 | 173 | packssdw xmm3,xmm5 ; xmm3=CbO*-FIX(0.344)+CrO*FIX(0.285) |
michael@0 | 174 | psubw xmm2,xmm6 ; xmm2=CbE*-FIX(0.344)+CrE*-FIX(0.714)=(G-Y)E |
michael@0 | 175 | psubw xmm3,xmm7 ; xmm3=CbO*-FIX(0.344)+CrO*-FIX(0.714)=(G-Y)O |
michael@0 | 176 | |
michael@0 | 177 | movdqa xmm5, XMMWORD [rsi] ; xmm5=Y(0123456789ABCDEF) |
michael@0 | 178 | |
michael@0 | 179 | pcmpeqw xmm4,xmm4 |
michael@0 | 180 | psrlw xmm4,BYTE_BIT ; xmm4={0xFF 0x00 0xFF 0x00 ..} |
michael@0 | 181 | pand xmm4,xmm5 ; xmm4=Y(02468ACE)=YE |
michael@0 | 182 | psrlw xmm5,BYTE_BIT ; xmm5=Y(13579BDF)=YO |
michael@0 | 183 | |
michael@0 | 184 | paddw xmm0,xmm4 ; xmm0=((R-Y)E+YE)=RE=R(02468ACE) |
michael@0 | 185 | paddw xmm1,xmm5 ; xmm1=((R-Y)O+YO)=RO=R(13579BDF) |
michael@0 | 186 | packuswb xmm0,xmm0 ; xmm0=R(02468ACE********) |
michael@0 | 187 | packuswb xmm1,xmm1 ; xmm1=R(13579BDF********) |
michael@0 | 188 | |
michael@0 | 189 | paddw xmm2,xmm4 ; xmm2=((G-Y)E+YE)=GE=G(02468ACE) |
michael@0 | 190 | paddw xmm3,xmm5 ; xmm3=((G-Y)O+YO)=GO=G(13579BDF) |
michael@0 | 191 | packuswb xmm2,xmm2 ; xmm2=G(02468ACE********) |
michael@0 | 192 | packuswb xmm3,xmm3 ; xmm3=G(13579BDF********) |
michael@0 | 193 | |
michael@0 | 194 | paddw xmm4, XMMWORD [wk(0)] ; xmm4=(YE+(B-Y)E)=BE=B(02468ACE) |
michael@0 | 195 | paddw xmm5, XMMWORD [wk(1)] ; xmm5=(YO+(B-Y)O)=BO=B(13579BDF) |
michael@0 | 196 | packuswb xmm4,xmm4 ; xmm4=B(02468ACE********) |
michael@0 | 197 | packuswb xmm5,xmm5 ; xmm5=B(13579BDF********) |
michael@0 | 198 | |
michael@0 | 199 | %if RGB_PIXELSIZE == 3 ; --------------- |
michael@0 | 200 | |
michael@0 | 201 | ; xmmA=(00 02 04 06 08 0A 0C 0E **), xmmB=(01 03 05 07 09 0B 0D 0F **) |
michael@0 | 202 | ; xmmC=(10 12 14 16 18 1A 1C 1E **), xmmD=(11 13 15 17 19 1B 1D 1F **) |
michael@0 | 203 | ; xmmE=(20 22 24 26 28 2A 2C 2E **), xmmF=(21 23 25 27 29 2B 2D 2F **) |
michael@0 | 204 | ; xmmG=(** ** ** ** ** ** ** ** **), xmmH=(** ** ** ** ** ** ** ** **) |
michael@0 | 205 | |
michael@0 | 206 | punpcklbw xmmA,xmmC ; xmmA=(00 10 02 12 04 14 06 16 08 18 0A 1A 0C 1C 0E 1E) |
michael@0 | 207 | punpcklbw xmmE,xmmB ; xmmE=(20 01 22 03 24 05 26 07 28 09 2A 0B 2C 0D 2E 0F) |
michael@0 | 208 | punpcklbw xmmD,xmmF ; xmmD=(11 21 13 23 15 25 17 27 19 29 1B 2B 1D 2D 1F 2F) |
michael@0 | 209 | |
michael@0 | 210 | movdqa xmmG,xmmA |
michael@0 | 211 | movdqa xmmH,xmmA |
michael@0 | 212 | punpcklwd xmmA,xmmE ; xmmA=(00 10 20 01 02 12 22 03 04 14 24 05 06 16 26 07) |
michael@0 | 213 | punpckhwd xmmG,xmmE ; xmmG=(08 18 28 09 0A 1A 2A 0B 0C 1C 2C 0D 0E 1E 2E 0F) |
michael@0 | 214 | |
michael@0 | 215 | psrldq xmmH,2 ; xmmH=(02 12 04 14 06 16 08 18 0A 1A 0C 1C 0E 1E -- --) |
michael@0 | 216 | psrldq xmmE,2 ; xmmE=(22 03 24 05 26 07 28 09 2A 0B 2C 0D 2E 0F -- --) |
michael@0 | 217 | |
michael@0 | 218 | movdqa xmmC,xmmD |
michael@0 | 219 | movdqa xmmB,xmmD |
michael@0 | 220 | punpcklwd xmmD,xmmH ; xmmD=(11 21 02 12 13 23 04 14 15 25 06 16 17 27 08 18) |
michael@0 | 221 | punpckhwd xmmC,xmmH ; xmmC=(19 29 0A 1A 1B 2B 0C 1C 1D 2D 0E 1E 1F 2F -- --) |
michael@0 | 222 | |
michael@0 | 223 | psrldq xmmB,2 ; xmmB=(13 23 15 25 17 27 19 29 1B 2B 1D 2D 1F 2F -- --) |
michael@0 | 224 | |
michael@0 | 225 | movdqa xmmF,xmmE |
michael@0 | 226 | punpcklwd xmmE,xmmB ; xmmE=(22 03 13 23 24 05 15 25 26 07 17 27 28 09 19 29) |
michael@0 | 227 | punpckhwd xmmF,xmmB ; xmmF=(2A 0B 1B 2B 2C 0D 1D 2D 2E 0F 1F 2F -- -- -- --) |
michael@0 | 228 | |
michael@0 | 229 | pshufd xmmH,xmmA,0x4E; xmmH=(04 14 24 05 06 16 26 07 00 10 20 01 02 12 22 03) |
michael@0 | 230 | movdqa xmmB,xmmE |
michael@0 | 231 | punpckldq xmmA,xmmD ; xmmA=(00 10 20 01 11 21 02 12 02 12 22 03 13 23 04 14) |
michael@0 | 232 | punpckldq xmmE,xmmH ; xmmE=(22 03 13 23 04 14 24 05 24 05 15 25 06 16 26 07) |
michael@0 | 233 | punpckhdq xmmD,xmmB ; xmmD=(15 25 06 16 26 07 17 27 17 27 08 18 28 09 19 29) |
michael@0 | 234 | |
michael@0 | 235 | pshufd xmmH,xmmG,0x4E; xmmH=(0C 1C 2C 0D 0E 1E 2E 0F 08 18 28 09 0A 1A 2A 0B) |
michael@0 | 236 | movdqa xmmB,xmmF |
michael@0 | 237 | punpckldq xmmG,xmmC ; xmmG=(08 18 28 09 19 29 0A 1A 0A 1A 2A 0B 1B 2B 0C 1C) |
michael@0 | 238 | punpckldq xmmF,xmmH ; xmmF=(2A 0B 1B 2B 0C 1C 2C 0D 2C 0D 1D 2D 0E 1E 2E 0F) |
michael@0 | 239 | punpckhdq xmmC,xmmB ; xmmC=(1D 2D 0E 1E 2E 0F 1F 2F 1F 2F -- -- -- -- -- --) |
michael@0 | 240 | |
michael@0 | 241 | punpcklqdq xmmA,xmmE ; xmmA=(00 10 20 01 11 21 02 12 22 03 13 23 04 14 24 05) |
michael@0 | 242 | punpcklqdq xmmD,xmmG ; xmmD=(15 25 06 16 26 07 17 27 08 18 28 09 19 29 0A 1A) |
michael@0 | 243 | punpcklqdq xmmF,xmmC ; xmmF=(2A 0B 1B 2B 0C 1C 2C 0D 1D 2D 0E 1E 2E 0F 1F 2F) |
michael@0 | 244 | |
michael@0 | 245 | cmp rcx, byte SIZEOF_XMMWORD |
michael@0 | 246 | jb short .column_st32 |
michael@0 | 247 | |
michael@0 | 248 | test rdi, SIZEOF_XMMWORD-1 |
michael@0 | 249 | jnz short .out1 |
michael@0 | 250 | ; --(aligned)------------------- |
michael@0 | 251 | movntdq XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA |
michael@0 | 252 | movntdq XMMWORD [rdi+1*SIZEOF_XMMWORD], xmmD |
michael@0 | 253 | movntdq XMMWORD [rdi+2*SIZEOF_XMMWORD], xmmF |
michael@0 | 254 | jmp short .out0 |
michael@0 | 255 | .out1: ; --(unaligned)----------------- |
michael@0 | 256 | movdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA |
michael@0 | 257 | movdqu XMMWORD [rdi+1*SIZEOF_XMMWORD], xmmD |
michael@0 | 258 | movdqu XMMWORD [rdi+2*SIZEOF_XMMWORD], xmmF |
michael@0 | 259 | .out0: |
michael@0 | 260 | add rdi, byte RGB_PIXELSIZE*SIZEOF_XMMWORD ; outptr |
michael@0 | 261 | sub rcx, byte SIZEOF_XMMWORD |
michael@0 | 262 | jz near .nextrow |
michael@0 | 263 | |
michael@0 | 264 | add rsi, byte SIZEOF_XMMWORD ; inptr0 |
michael@0 | 265 | add rbx, byte SIZEOF_XMMWORD ; inptr1 |
michael@0 | 266 | add rdx, byte SIZEOF_XMMWORD ; inptr2 |
michael@0 | 267 | jmp near .columnloop |
michael@0 | 268 | |
michael@0 | 269 | .column_st32: |
michael@0 | 270 | lea rcx, [rcx+rcx*2] ; imul ecx, RGB_PIXELSIZE |
michael@0 | 271 | cmp rcx, byte 2*SIZEOF_XMMWORD |
michael@0 | 272 | jb short .column_st16 |
michael@0 | 273 | movdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA |
michael@0 | 274 | movdqu XMMWORD [rdi+1*SIZEOF_XMMWORD], xmmD |
michael@0 | 275 | add rdi, byte 2*SIZEOF_XMMWORD ; outptr |
michael@0 | 276 | movdqa xmmA,xmmF |
michael@0 | 277 | sub rcx, byte 2*SIZEOF_XMMWORD |
michael@0 | 278 | jmp short .column_st15 |
michael@0 | 279 | .column_st16: |
michael@0 | 280 | cmp rcx, byte SIZEOF_XMMWORD |
michael@0 | 281 | jb short .column_st15 |
michael@0 | 282 | movdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA |
michael@0 | 283 | add rdi, byte SIZEOF_XMMWORD ; outptr |
michael@0 | 284 | movdqa xmmA,xmmD |
michael@0 | 285 | sub rcx, byte SIZEOF_XMMWORD |
michael@0 | 286 | .column_st15: |
michael@0 | 287 | ; Store the lower 8 bytes of xmmA to the output when it has enough |
michael@0 | 288 | ; space. |
michael@0 | 289 | cmp rcx, byte SIZEOF_MMWORD |
michael@0 | 290 | jb short .column_st7 |
michael@0 | 291 | movq XMM_MMWORD [rdi], xmmA |
michael@0 | 292 | add rdi, byte SIZEOF_MMWORD |
michael@0 | 293 | sub rcx, byte SIZEOF_MMWORD |
michael@0 | 294 | psrldq xmmA, SIZEOF_MMWORD |
michael@0 | 295 | .column_st7: |
michael@0 | 296 | ; Store the lower 4 bytes of xmmA to the output when it has enough |
michael@0 | 297 | ; space. |
michael@0 | 298 | cmp rcx, byte SIZEOF_DWORD |
michael@0 | 299 | jb short .column_st3 |
michael@0 | 300 | movd XMM_DWORD [rdi], xmmA |
michael@0 | 301 | add rdi, byte SIZEOF_DWORD |
michael@0 | 302 | sub rcx, byte SIZEOF_DWORD |
michael@0 | 303 | psrldq xmmA, SIZEOF_DWORD |
michael@0 | 304 | .column_st3: |
michael@0 | 305 | ; Store the lower 2 bytes of rax to the output when it has enough |
michael@0 | 306 | ; space. |
michael@0 | 307 | movd eax, xmmA |
michael@0 | 308 | cmp rcx, byte SIZEOF_WORD |
michael@0 | 309 | jb short .column_st1 |
michael@0 | 310 | mov WORD [rdi], ax |
michael@0 | 311 | add rdi, byte SIZEOF_WORD |
michael@0 | 312 | sub rcx, byte SIZEOF_WORD |
michael@0 | 313 | shr rax, 16 |
michael@0 | 314 | .column_st1: |
michael@0 | 315 | ; Store the lower 1 byte of rax to the output when it has enough |
michael@0 | 316 | ; space. |
michael@0 | 317 | test rcx, rcx |
michael@0 | 318 | jz short .nextrow |
michael@0 | 319 | mov BYTE [rdi], al |
michael@0 | 320 | |
michael@0 | 321 | %else ; RGB_PIXELSIZE == 4 ; ----------- |
michael@0 | 322 | |
michael@0 | 323 | %ifdef RGBX_FILLER_0XFF |
michael@0 | 324 | pcmpeqb xmm6,xmm6 ; xmm6=XE=X(02468ACE********) |
michael@0 | 325 | pcmpeqb xmm7,xmm7 ; xmm7=XO=X(13579BDF********) |
michael@0 | 326 | %else |
michael@0 | 327 | pxor xmm6,xmm6 ; xmm6=XE=X(02468ACE********) |
michael@0 | 328 | pxor xmm7,xmm7 ; xmm7=XO=X(13579BDF********) |
michael@0 | 329 | %endif |
michael@0 | 330 | ; xmmA=(00 02 04 06 08 0A 0C 0E **), xmmB=(01 03 05 07 09 0B 0D 0F **) |
michael@0 | 331 | ; xmmC=(10 12 14 16 18 1A 1C 1E **), xmmD=(11 13 15 17 19 1B 1D 1F **) |
michael@0 | 332 | ; xmmE=(20 22 24 26 28 2A 2C 2E **), xmmF=(21 23 25 27 29 2B 2D 2F **) |
michael@0 | 333 | ; xmmG=(30 32 34 36 38 3A 3C 3E **), xmmH=(31 33 35 37 39 3B 3D 3F **) |
michael@0 | 334 | |
michael@0 | 335 | punpcklbw xmmA,xmmC ; xmmA=(00 10 02 12 04 14 06 16 08 18 0A 1A 0C 1C 0E 1E) |
michael@0 | 336 | punpcklbw xmmE,xmmG ; xmmE=(20 30 22 32 24 34 26 36 28 38 2A 3A 2C 3C 2E 3E) |
michael@0 | 337 | punpcklbw xmmB,xmmD ; xmmB=(01 11 03 13 05 15 07 17 09 19 0B 1B 0D 1D 0F 1F) |
michael@0 | 338 | punpcklbw xmmF,xmmH ; xmmF=(21 31 23 33 25 35 27 37 29 39 2B 3B 2D 3D 2F 3F) |
michael@0 | 339 | |
michael@0 | 340 | movdqa xmmC,xmmA |
michael@0 | 341 | punpcklwd xmmA,xmmE ; xmmA=(00 10 20 30 02 12 22 32 04 14 24 34 06 16 26 36) |
michael@0 | 342 | punpckhwd xmmC,xmmE ; xmmC=(08 18 28 38 0A 1A 2A 3A 0C 1C 2C 3C 0E 1E 2E 3E) |
michael@0 | 343 | movdqa xmmG,xmmB |
michael@0 | 344 | punpcklwd xmmB,xmmF ; xmmB=(01 11 21 31 03 13 23 33 05 15 25 35 07 17 27 37) |
michael@0 | 345 | punpckhwd xmmG,xmmF ; xmmG=(09 19 29 39 0B 1B 2B 3B 0D 1D 2D 3D 0F 1F 2F 3F) |
michael@0 | 346 | |
michael@0 | 347 | movdqa xmmD,xmmA |
michael@0 | 348 | punpckldq xmmA,xmmB ; xmmA=(00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33) |
michael@0 | 349 | punpckhdq xmmD,xmmB ; xmmD=(04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37) |
michael@0 | 350 | movdqa xmmH,xmmC |
michael@0 | 351 | punpckldq xmmC,xmmG ; xmmC=(08 18 28 38 09 19 29 39 0A 1A 2A 3A 0B 1B 2B 3B) |
michael@0 | 352 | punpckhdq xmmH,xmmG ; xmmH=(0C 1C 2C 3C 0D 1D 2D 3D 0E 1E 2E 3E 0F 1F 2F 3F) |
michael@0 | 353 | |
michael@0 | 354 | cmp rcx, byte SIZEOF_XMMWORD |
michael@0 | 355 | jb short .column_st32 |
michael@0 | 356 | |
michael@0 | 357 | test rdi, SIZEOF_XMMWORD-1 |
michael@0 | 358 | jnz short .out1 |
michael@0 | 359 | ; --(aligned)------------------- |
michael@0 | 360 | movntdq XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA |
michael@0 | 361 | movntdq XMMWORD [rdi+1*SIZEOF_XMMWORD], xmmD |
michael@0 | 362 | movntdq XMMWORD [rdi+2*SIZEOF_XMMWORD], xmmC |
michael@0 | 363 | movntdq XMMWORD [rdi+3*SIZEOF_XMMWORD], xmmH |
michael@0 | 364 | jmp short .out0 |
michael@0 | 365 | .out1: ; --(unaligned)----------------- |
michael@0 | 366 | movdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA |
michael@0 | 367 | movdqu XMMWORD [rdi+1*SIZEOF_XMMWORD], xmmD |
michael@0 | 368 | movdqu XMMWORD [rdi+2*SIZEOF_XMMWORD], xmmC |
michael@0 | 369 | movdqu XMMWORD [rdi+3*SIZEOF_XMMWORD], xmmH |
michael@0 | 370 | .out0: |
michael@0 | 371 | add rdi, byte RGB_PIXELSIZE*SIZEOF_XMMWORD ; outptr |
michael@0 | 372 | sub rcx, byte SIZEOF_XMMWORD |
michael@0 | 373 | jz near .nextrow |
michael@0 | 374 | |
michael@0 | 375 | add rsi, byte SIZEOF_XMMWORD ; inptr0 |
michael@0 | 376 | add rbx, byte SIZEOF_XMMWORD ; inptr1 |
michael@0 | 377 | add rdx, byte SIZEOF_XMMWORD ; inptr2 |
michael@0 | 378 | jmp near .columnloop |
michael@0 | 379 | |
michael@0 | 380 | .column_st32: |
michael@0 | 381 | cmp rcx, byte SIZEOF_XMMWORD/2 |
michael@0 | 382 | jb short .column_st16 |
michael@0 | 383 | movdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA |
michael@0 | 384 | movdqu XMMWORD [rdi+1*SIZEOF_XMMWORD], xmmD |
michael@0 | 385 | add rdi, byte 2*SIZEOF_XMMWORD ; outptr |
michael@0 | 386 | movdqa xmmA,xmmC |
michael@0 | 387 | movdqa xmmD,xmmH |
michael@0 | 388 | sub rcx, byte SIZEOF_XMMWORD/2 |
michael@0 | 389 | .column_st16: |
michael@0 | 390 | cmp rcx, byte SIZEOF_XMMWORD/4 |
michael@0 | 391 | jb short .column_st15 |
michael@0 | 392 | movdqu XMMWORD [rdi+0*SIZEOF_XMMWORD], xmmA |
michael@0 | 393 | add rdi, byte SIZEOF_XMMWORD ; outptr |
michael@0 | 394 | movdqa xmmA,xmmD |
michael@0 | 395 | sub rcx, byte SIZEOF_XMMWORD/4 |
michael@0 | 396 | .column_st15: |
michael@0 | 397 | ; Store two pixels (8 bytes) of xmmA to the output when it has enough |
michael@0 | 398 | ; space. |
michael@0 | 399 | cmp rcx, byte SIZEOF_XMMWORD/8 |
michael@0 | 400 | jb short .column_st7 |
michael@0 | 401 | movq MMWORD [rdi], xmmA |
michael@0 | 402 | add rdi, byte SIZEOF_XMMWORD/8*4 |
michael@0 | 403 | sub rcx, byte SIZEOF_XMMWORD/8 |
michael@0 | 404 | psrldq xmmA, SIZEOF_XMMWORD/8*4 |
michael@0 | 405 | .column_st7: |
michael@0 | 406 | ; Store one pixel (4 bytes) of xmmA to the output when it has enough |
michael@0 | 407 | ; space. |
michael@0 | 408 | test rcx, rcx |
michael@0 | 409 | jz short .nextrow |
michael@0 | 410 | movd XMM_DWORD [rdi], xmmA |
michael@0 | 411 | |
michael@0 | 412 | %endif ; RGB_PIXELSIZE ; --------------- |
michael@0 | 413 | |
michael@0 | 414 | .nextrow: |
michael@0 | 415 | pop rcx |
michael@0 | 416 | pop rsi |
michael@0 | 417 | pop rbx |
michael@0 | 418 | pop rdx |
michael@0 | 419 | pop rdi |
michael@0 | 420 | pop rax |
michael@0 | 421 | |
michael@0 | 422 | add rsi, byte SIZEOF_JSAMPROW |
michael@0 | 423 | add rbx, byte SIZEOF_JSAMPROW |
michael@0 | 424 | add rdx, byte SIZEOF_JSAMPROW |
michael@0 | 425 | add rdi, byte SIZEOF_JSAMPROW ; output_buf |
michael@0 | 426 | dec rax ; num_rows |
michael@0 | 427 | jg near .rowloop |
michael@0 | 428 | |
michael@0 | 429 | sfence ; flush the write buffer |
michael@0 | 430 | |
michael@0 | 431 | .return: |
michael@0 | 432 | pop rbx |
michael@0 | 433 | uncollect_args |
michael@0 | 434 | mov rsp,rbp ; rsp <- aligned rbp |
michael@0 | 435 | pop rsp ; rsp <- original rbp |
michael@0 | 436 | pop rbp |
michael@0 | 437 | ret |
michael@0 | 438 | |
michael@0 | 439 | ; For some reason, the OS X linker does not honor the request to align the |
michael@0 | 440 | ; segment unless we do this. |
michael@0 | 441 | align 16 |