Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | ; |
michael@0 | 2 | ; jdsamss2-64.asm - upsampling (64-bit SSE2) |
michael@0 | 3 | ; |
michael@0 | 4 | ; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB |
michael@0 | 5 | ; Copyright 2009 D. R. Commander |
michael@0 | 6 | ; |
michael@0 | 7 | ; Based on |
michael@0 | 8 | ; x86 SIMD extension for IJG JPEG library |
michael@0 | 9 | ; Copyright (C) 1999-2006, MIYASAKA Masaru. |
michael@0 | 10 | ; For conditions of distribution and use, see copyright notice in jsimdext.inc |
michael@0 | 11 | ; |
michael@0 | 12 | ; This file should be assembled with NASM (Netwide Assembler), |
michael@0 | 13 | ; can *not* be assembled with Microsoft's MASM or any compatible |
michael@0 | 14 | ; assembler (including Borland's Turbo Assembler). |
michael@0 | 15 | ; NASM is available from http://nasm.sourceforge.net/ or |
michael@0 | 16 | ; http://sourceforge.net/project/showfiles.php?group_id=6208 |
michael@0 | 17 | ; |
michael@0 | 18 | ; [TAB8] |
michael@0 | 19 | |
michael@0 | 20 | %include "jsimdext.inc" |
michael@0 | 21 | |
michael@0 | 22 | ; -------------------------------------------------------------------------- |
michael@0 | 23 | SECTION SEG_CONST |
michael@0 | 24 | |
michael@0 | 25 | alignz 16 |
michael@0 | 26 | global EXTN(jconst_fancy_upsample_sse2) |
michael@0 | 27 | |
michael@0 | 28 | EXTN(jconst_fancy_upsample_sse2): |
michael@0 | 29 | |
michael@0 | 30 | PW_ONE times 8 dw 1 |
michael@0 | 31 | PW_TWO times 8 dw 2 |
michael@0 | 32 | PW_THREE times 8 dw 3 |
michael@0 | 33 | PW_SEVEN times 8 dw 7 |
michael@0 | 34 | PW_EIGHT times 8 dw 8 |
michael@0 | 35 | |
michael@0 | 36 | alignz 16 |
michael@0 | 37 | |
michael@0 | 38 | ; -------------------------------------------------------------------------- |
michael@0 | 39 | SECTION SEG_TEXT |
michael@0 | 40 | BITS 64 |
michael@0 | 41 | ; |
michael@0 | 42 | ; Fancy processing for the common case of 2:1 horizontal and 1:1 vertical. |
michael@0 | 43 | ; |
michael@0 | 44 | ; The upsampling algorithm is linear interpolation between pixel centers, |
michael@0 | 45 | ; also known as a "triangle filter". This is a good compromise between |
michael@0 | 46 | ; speed and visual quality. The centers of the output pixels are 1/4 and 3/4 |
michael@0 | 47 | ; of the way between input pixel centers. |
michael@0 | 48 | ; |
michael@0 | 49 | ; GLOBAL(void) |
michael@0 | 50 | ; jsimd_h2v1_fancy_upsample_sse2 (int max_v_samp_factor, |
michael@0 | 51 | ; JDIMENSION downsampled_width, |
michael@0 | 52 | ; JSAMPARRAY input_data, |
michael@0 | 53 | ; JSAMPARRAY * output_data_ptr); |
michael@0 | 54 | ; |
michael@0 | 55 | |
michael@0 | 56 | ; r10 = int max_v_samp_factor |
michael@0 | 57 | ; r11 = JDIMENSION downsampled_width |
michael@0 | 58 | ; r12 = JSAMPARRAY input_data |
michael@0 | 59 | ; r13 = JSAMPARRAY * output_data_ptr |
michael@0 | 60 | |
michael@0 | 61 | align 16 |
michael@0 | 62 | global EXTN(jsimd_h2v1_fancy_upsample_sse2) |
michael@0 | 63 | |
michael@0 | 64 | EXTN(jsimd_h2v1_fancy_upsample_sse2): |
michael@0 | 65 | push rbp |
michael@0 | 66 | mov rax,rsp |
michael@0 | 67 | mov rbp,rsp |
michael@0 | 68 | collect_args |
michael@0 | 69 | |
michael@0 | 70 | mov rax, r11 ; colctr |
michael@0 | 71 | test rax,rax |
michael@0 | 72 | jz near .return |
michael@0 | 73 | |
michael@0 | 74 | mov rcx, r10 ; rowctr |
michael@0 | 75 | test rcx,rcx |
michael@0 | 76 | jz near .return |
michael@0 | 77 | |
michael@0 | 78 | mov rsi, r12 ; input_data |
michael@0 | 79 | mov rdi, r13 |
michael@0 | 80 | mov rdi, JSAMPARRAY [rdi] ; output_data |
michael@0 | 81 | .rowloop: |
michael@0 | 82 | push rax ; colctr |
michael@0 | 83 | push rdi |
michael@0 | 84 | push rsi |
michael@0 | 85 | |
michael@0 | 86 | mov rsi, JSAMPROW [rsi] ; inptr |
michael@0 | 87 | mov rdi, JSAMPROW [rdi] ; outptr |
michael@0 | 88 | |
michael@0 | 89 | test rax, SIZEOF_XMMWORD-1 |
michael@0 | 90 | jz short .skip |
michael@0 | 91 | mov dl, JSAMPLE [rsi+(rax-1)*SIZEOF_JSAMPLE] |
michael@0 | 92 | mov JSAMPLE [rsi+rax*SIZEOF_JSAMPLE], dl ; insert a dummy sample |
michael@0 | 93 | .skip: |
michael@0 | 94 | pxor xmm0,xmm0 ; xmm0=(all 0's) |
michael@0 | 95 | pcmpeqb xmm7,xmm7 |
michael@0 | 96 | psrldq xmm7,(SIZEOF_XMMWORD-1) |
michael@0 | 97 | pand xmm7, XMMWORD [rsi+0*SIZEOF_XMMWORD] |
michael@0 | 98 | |
michael@0 | 99 | add rax, byte SIZEOF_XMMWORD-1 |
michael@0 | 100 | and rax, byte -SIZEOF_XMMWORD |
michael@0 | 101 | cmp rax, byte SIZEOF_XMMWORD |
michael@0 | 102 | ja short .columnloop |
michael@0 | 103 | |
michael@0 | 104 | .columnloop_last: |
michael@0 | 105 | pcmpeqb xmm6,xmm6 |
michael@0 | 106 | pslldq xmm6,(SIZEOF_XMMWORD-1) |
michael@0 | 107 | pand xmm6, XMMWORD [rsi+0*SIZEOF_XMMWORD] |
michael@0 | 108 | jmp short .upsample |
michael@0 | 109 | |
michael@0 | 110 | .columnloop: |
michael@0 | 111 | movdqa xmm6, XMMWORD [rsi+1*SIZEOF_XMMWORD] |
michael@0 | 112 | pslldq xmm6,(SIZEOF_XMMWORD-1) |
michael@0 | 113 | |
michael@0 | 114 | .upsample: |
michael@0 | 115 | movdqa xmm1, XMMWORD [rsi+0*SIZEOF_XMMWORD] |
michael@0 | 116 | movdqa xmm2,xmm1 |
michael@0 | 117 | movdqa xmm3,xmm1 ; xmm1=( 0 1 2 ... 13 14 15) |
michael@0 | 118 | pslldq xmm2,1 ; xmm2=(-- 0 1 ... 12 13 14) |
michael@0 | 119 | psrldq xmm3,1 ; xmm3=( 1 2 3 ... 14 15 --) |
michael@0 | 120 | |
michael@0 | 121 | por xmm2,xmm7 ; xmm2=(-1 0 1 ... 12 13 14) |
michael@0 | 122 | por xmm3,xmm6 ; xmm3=( 1 2 3 ... 14 15 16) |
michael@0 | 123 | |
michael@0 | 124 | movdqa xmm7,xmm1 |
michael@0 | 125 | psrldq xmm7,(SIZEOF_XMMWORD-1) ; xmm7=(15 -- -- ... -- -- --) |
michael@0 | 126 | |
michael@0 | 127 | movdqa xmm4,xmm1 |
michael@0 | 128 | punpcklbw xmm1,xmm0 ; xmm1=( 0 1 2 3 4 5 6 7) |
michael@0 | 129 | punpckhbw xmm4,xmm0 ; xmm4=( 8 9 10 11 12 13 14 15) |
michael@0 | 130 | movdqa xmm5,xmm2 |
michael@0 | 131 | punpcklbw xmm2,xmm0 ; xmm2=(-1 0 1 2 3 4 5 6) |
michael@0 | 132 | punpckhbw xmm5,xmm0 ; xmm5=( 7 8 9 10 11 12 13 14) |
michael@0 | 133 | movdqa xmm6,xmm3 |
michael@0 | 134 | punpcklbw xmm3,xmm0 ; xmm3=( 1 2 3 4 5 6 7 8) |
michael@0 | 135 | punpckhbw xmm6,xmm0 ; xmm6=( 9 10 11 12 13 14 15 16) |
michael@0 | 136 | |
michael@0 | 137 | pmullw xmm1,[rel PW_THREE] |
michael@0 | 138 | pmullw xmm4,[rel PW_THREE] |
michael@0 | 139 | paddw xmm2,[rel PW_ONE] |
michael@0 | 140 | paddw xmm5,[rel PW_ONE] |
michael@0 | 141 | paddw xmm3,[rel PW_TWO] |
michael@0 | 142 | paddw xmm6,[rel PW_TWO] |
michael@0 | 143 | |
michael@0 | 144 | paddw xmm2,xmm1 |
michael@0 | 145 | paddw xmm5,xmm4 |
michael@0 | 146 | psrlw xmm2,2 ; xmm2=OutLE=( 0 2 4 6 8 10 12 14) |
michael@0 | 147 | psrlw xmm5,2 ; xmm5=OutHE=(16 18 20 22 24 26 28 30) |
michael@0 | 148 | paddw xmm3,xmm1 |
michael@0 | 149 | paddw xmm6,xmm4 |
michael@0 | 150 | psrlw xmm3,2 ; xmm3=OutLO=( 1 3 5 7 9 11 13 15) |
michael@0 | 151 | psrlw xmm6,2 ; xmm6=OutHO=(17 19 21 23 25 27 29 31) |
michael@0 | 152 | |
michael@0 | 153 | psllw xmm3,BYTE_BIT |
michael@0 | 154 | psllw xmm6,BYTE_BIT |
michael@0 | 155 | por xmm2,xmm3 ; xmm2=OutL=( 0 1 2 ... 13 14 15) |
michael@0 | 156 | por xmm5,xmm6 ; xmm5=OutH=(16 17 18 ... 29 30 31) |
michael@0 | 157 | |
michael@0 | 158 | movdqa XMMWORD [rdi+0*SIZEOF_XMMWORD], xmm2 |
michael@0 | 159 | movdqa XMMWORD [rdi+1*SIZEOF_XMMWORD], xmm5 |
michael@0 | 160 | |
michael@0 | 161 | sub rax, byte SIZEOF_XMMWORD |
michael@0 | 162 | add rsi, byte 1*SIZEOF_XMMWORD ; inptr |
michael@0 | 163 | add rdi, byte 2*SIZEOF_XMMWORD ; outptr |
michael@0 | 164 | cmp rax, byte SIZEOF_XMMWORD |
michael@0 | 165 | ja near .columnloop |
michael@0 | 166 | test eax,eax |
michael@0 | 167 | jnz near .columnloop_last |
michael@0 | 168 | |
michael@0 | 169 | pop rsi |
michael@0 | 170 | pop rdi |
michael@0 | 171 | pop rax |
michael@0 | 172 | |
michael@0 | 173 | add rsi, byte SIZEOF_JSAMPROW ; input_data |
michael@0 | 174 | add rdi, byte SIZEOF_JSAMPROW ; output_data |
michael@0 | 175 | dec rcx ; rowctr |
michael@0 | 176 | jg near .rowloop |
michael@0 | 177 | |
michael@0 | 178 | .return: |
michael@0 | 179 | uncollect_args |
michael@0 | 180 | pop rbp |
michael@0 | 181 | ret |
michael@0 | 182 | |
michael@0 | 183 | ; -------------------------------------------------------------------------- |
michael@0 | 184 | ; |
michael@0 | 185 | ; Fancy processing for the common case of 2:1 horizontal and 2:1 vertical. |
michael@0 | 186 | ; Again a triangle filter; see comments for h2v1 case, above. |
michael@0 | 187 | ; |
michael@0 | 188 | ; GLOBAL(void) |
michael@0 | 189 | ; jsimd_h2v2_fancy_upsample_sse2 (int max_v_samp_factor, |
michael@0 | 190 | ; JDIMENSION downsampled_width, |
michael@0 | 191 | ; JSAMPARRAY input_data, |
michael@0 | 192 | ; JSAMPARRAY * output_data_ptr); |
michael@0 | 193 | ; |
michael@0 | 194 | |
michael@0 | 195 | ; r10 = int max_v_samp_factor |
michael@0 | 196 | ; r11 = JDIMENSION downsampled_width |
michael@0 | 197 | ; r12 = JSAMPARRAY input_data |
michael@0 | 198 | ; r13 = JSAMPARRAY * output_data_ptr |
michael@0 | 199 | |
michael@0 | 200 | %define wk(i) rbp-(WK_NUM-(i))*SIZEOF_XMMWORD ; xmmword wk[WK_NUM] |
michael@0 | 201 | %define WK_NUM 4 |
michael@0 | 202 | |
michael@0 | 203 | align 16 |
michael@0 | 204 | global EXTN(jsimd_h2v2_fancy_upsample_sse2) |
michael@0 | 205 | |
michael@0 | 206 | EXTN(jsimd_h2v2_fancy_upsample_sse2): |
michael@0 | 207 | push rbp |
michael@0 | 208 | mov rax,rsp ; rax = original rbp |
michael@0 | 209 | sub rsp, byte 4 |
michael@0 | 210 | and rsp, byte (-SIZEOF_XMMWORD) ; align to 128 bits |
michael@0 | 211 | mov [rsp],rax |
michael@0 | 212 | mov rbp,rsp ; rbp = aligned rbp |
michael@0 | 213 | lea rsp, [wk(0)] |
michael@0 | 214 | collect_args |
michael@0 | 215 | push rbx |
michael@0 | 216 | |
michael@0 | 217 | mov rax, r11 ; colctr |
michael@0 | 218 | test rax,rax |
michael@0 | 219 | jz near .return |
michael@0 | 220 | |
michael@0 | 221 | mov rcx, r10 ; rowctr |
michael@0 | 222 | test rcx,rcx |
michael@0 | 223 | jz near .return |
michael@0 | 224 | |
michael@0 | 225 | mov rsi, r12 ; input_data |
michael@0 | 226 | mov rdi, r13 |
michael@0 | 227 | mov rdi, JSAMPARRAY [rdi] ; output_data |
michael@0 | 228 | .rowloop: |
michael@0 | 229 | push rax ; colctr |
michael@0 | 230 | push rcx |
michael@0 | 231 | push rdi |
michael@0 | 232 | push rsi |
michael@0 | 233 | |
michael@0 | 234 | mov rcx, JSAMPROW [rsi-1*SIZEOF_JSAMPROW] ; inptr1(above) |
michael@0 | 235 | mov rbx, JSAMPROW [rsi+0*SIZEOF_JSAMPROW] ; inptr0 |
michael@0 | 236 | mov rsi, JSAMPROW [rsi+1*SIZEOF_JSAMPROW] ; inptr1(below) |
michael@0 | 237 | mov rdx, JSAMPROW [rdi+0*SIZEOF_JSAMPROW] ; outptr0 |
michael@0 | 238 | mov rdi, JSAMPROW [rdi+1*SIZEOF_JSAMPROW] ; outptr1 |
michael@0 | 239 | |
michael@0 | 240 | test rax, SIZEOF_XMMWORD-1 |
michael@0 | 241 | jz short .skip |
michael@0 | 242 | push rdx |
michael@0 | 243 | mov dl, JSAMPLE [rcx+(rax-1)*SIZEOF_JSAMPLE] |
michael@0 | 244 | mov JSAMPLE [rcx+rax*SIZEOF_JSAMPLE], dl |
michael@0 | 245 | mov dl, JSAMPLE [rbx+(rax-1)*SIZEOF_JSAMPLE] |
michael@0 | 246 | mov JSAMPLE [rbx+rax*SIZEOF_JSAMPLE], dl |
michael@0 | 247 | mov dl, JSAMPLE [rsi+(rax-1)*SIZEOF_JSAMPLE] |
michael@0 | 248 | mov JSAMPLE [rsi+rax*SIZEOF_JSAMPLE], dl ; insert a dummy sample |
michael@0 | 249 | pop rdx |
michael@0 | 250 | .skip: |
michael@0 | 251 | ; -- process the first column block |
michael@0 | 252 | |
michael@0 | 253 | movdqa xmm0, XMMWORD [rbx+0*SIZEOF_XMMWORD] ; xmm0=row[ 0][0] |
michael@0 | 254 | movdqa xmm1, XMMWORD [rcx+0*SIZEOF_XMMWORD] ; xmm1=row[-1][0] |
michael@0 | 255 | movdqa xmm2, XMMWORD [rsi+0*SIZEOF_XMMWORD] ; xmm2=row[+1][0] |
michael@0 | 256 | |
michael@0 | 257 | pxor xmm3,xmm3 ; xmm3=(all 0's) |
michael@0 | 258 | movdqa xmm4,xmm0 |
michael@0 | 259 | punpcklbw xmm0,xmm3 ; xmm0=row[ 0]( 0 1 2 3 4 5 6 7) |
michael@0 | 260 | punpckhbw xmm4,xmm3 ; xmm4=row[ 0]( 8 9 10 11 12 13 14 15) |
michael@0 | 261 | movdqa xmm5,xmm1 |
michael@0 | 262 | punpcklbw xmm1,xmm3 ; xmm1=row[-1]( 0 1 2 3 4 5 6 7) |
michael@0 | 263 | punpckhbw xmm5,xmm3 ; xmm5=row[-1]( 8 9 10 11 12 13 14 15) |
michael@0 | 264 | movdqa xmm6,xmm2 |
michael@0 | 265 | punpcklbw xmm2,xmm3 ; xmm2=row[+1]( 0 1 2 3 4 5 6 7) |
michael@0 | 266 | punpckhbw xmm6,xmm3 ; xmm6=row[+1]( 8 9 10 11 12 13 14 15) |
michael@0 | 267 | |
michael@0 | 268 | pmullw xmm0,[rel PW_THREE] |
michael@0 | 269 | pmullw xmm4,[rel PW_THREE] |
michael@0 | 270 | |
michael@0 | 271 | pcmpeqb xmm7,xmm7 |
michael@0 | 272 | psrldq xmm7,(SIZEOF_XMMWORD-2) |
michael@0 | 273 | |
michael@0 | 274 | paddw xmm1,xmm0 ; xmm1=Int0L=( 0 1 2 3 4 5 6 7) |
michael@0 | 275 | paddw xmm5,xmm4 ; xmm5=Int0H=( 8 9 10 11 12 13 14 15) |
michael@0 | 276 | paddw xmm2,xmm0 ; xmm2=Int1L=( 0 1 2 3 4 5 6 7) |
michael@0 | 277 | paddw xmm6,xmm4 ; xmm6=Int1H=( 8 9 10 11 12 13 14 15) |
michael@0 | 278 | |
michael@0 | 279 | movdqa XMMWORD [rdx+0*SIZEOF_XMMWORD], xmm1 ; temporarily save |
michael@0 | 280 | movdqa XMMWORD [rdx+1*SIZEOF_XMMWORD], xmm5 ; the intermediate data |
michael@0 | 281 | movdqa XMMWORD [rdi+0*SIZEOF_XMMWORD], xmm2 |
michael@0 | 282 | movdqa XMMWORD [rdi+1*SIZEOF_XMMWORD], xmm6 |
michael@0 | 283 | |
michael@0 | 284 | pand xmm1,xmm7 ; xmm1=( 0 -- -- -- -- -- -- --) |
michael@0 | 285 | pand xmm2,xmm7 ; xmm2=( 0 -- -- -- -- -- -- --) |
michael@0 | 286 | |
michael@0 | 287 | movdqa XMMWORD [wk(0)], xmm1 |
michael@0 | 288 | movdqa XMMWORD [wk(1)], xmm2 |
michael@0 | 289 | |
michael@0 | 290 | add rax, byte SIZEOF_XMMWORD-1 |
michael@0 | 291 | and rax, byte -SIZEOF_XMMWORD |
michael@0 | 292 | cmp rax, byte SIZEOF_XMMWORD |
michael@0 | 293 | ja short .columnloop |
michael@0 | 294 | |
michael@0 | 295 | .columnloop_last: |
michael@0 | 296 | ; -- process the last column block |
michael@0 | 297 | |
michael@0 | 298 | pcmpeqb xmm1,xmm1 |
michael@0 | 299 | pslldq xmm1,(SIZEOF_XMMWORD-2) |
michael@0 | 300 | movdqa xmm2,xmm1 |
michael@0 | 301 | |
michael@0 | 302 | pand xmm1, XMMWORD [rdx+1*SIZEOF_XMMWORD] |
michael@0 | 303 | pand xmm2, XMMWORD [rdi+1*SIZEOF_XMMWORD] |
michael@0 | 304 | |
michael@0 | 305 | movdqa XMMWORD [wk(2)], xmm1 ; xmm1=(-- -- -- -- -- -- -- 15) |
michael@0 | 306 | movdqa XMMWORD [wk(3)], xmm2 ; xmm2=(-- -- -- -- -- -- -- 15) |
michael@0 | 307 | |
michael@0 | 308 | jmp near .upsample |
michael@0 | 309 | |
michael@0 | 310 | .columnloop: |
michael@0 | 311 | ; -- process the next column block |
michael@0 | 312 | |
michael@0 | 313 | movdqa xmm0, XMMWORD [rbx+1*SIZEOF_XMMWORD] ; xmm0=row[ 0][1] |
michael@0 | 314 | movdqa xmm1, XMMWORD [rcx+1*SIZEOF_XMMWORD] ; xmm1=row[-1][1] |
michael@0 | 315 | movdqa xmm2, XMMWORD [rsi+1*SIZEOF_XMMWORD] ; xmm2=row[+1][1] |
michael@0 | 316 | |
michael@0 | 317 | pxor xmm3,xmm3 ; xmm3=(all 0's) |
michael@0 | 318 | movdqa xmm4,xmm0 |
michael@0 | 319 | punpcklbw xmm0,xmm3 ; xmm0=row[ 0]( 0 1 2 3 4 5 6 7) |
michael@0 | 320 | punpckhbw xmm4,xmm3 ; xmm4=row[ 0]( 8 9 10 11 12 13 14 15) |
michael@0 | 321 | movdqa xmm5,xmm1 |
michael@0 | 322 | punpcklbw xmm1,xmm3 ; xmm1=row[-1]( 0 1 2 3 4 5 6 7) |
michael@0 | 323 | punpckhbw xmm5,xmm3 ; xmm5=row[-1]( 8 9 10 11 12 13 14 15) |
michael@0 | 324 | movdqa xmm6,xmm2 |
michael@0 | 325 | punpcklbw xmm2,xmm3 ; xmm2=row[+1]( 0 1 2 3 4 5 6 7) |
michael@0 | 326 | punpckhbw xmm6,xmm3 ; xmm6=row[+1]( 8 9 10 11 12 13 14 15) |
michael@0 | 327 | |
michael@0 | 328 | pmullw xmm0,[rel PW_THREE] |
michael@0 | 329 | pmullw xmm4,[rel PW_THREE] |
michael@0 | 330 | |
michael@0 | 331 | paddw xmm1,xmm0 ; xmm1=Int0L=( 0 1 2 3 4 5 6 7) |
michael@0 | 332 | paddw xmm5,xmm4 ; xmm5=Int0H=( 8 9 10 11 12 13 14 15) |
michael@0 | 333 | paddw xmm2,xmm0 ; xmm2=Int1L=( 0 1 2 3 4 5 6 7) |
michael@0 | 334 | paddw xmm6,xmm4 ; xmm6=Int1H=( 8 9 10 11 12 13 14 15) |
michael@0 | 335 | |
michael@0 | 336 | movdqa XMMWORD [rdx+2*SIZEOF_XMMWORD], xmm1 ; temporarily save |
michael@0 | 337 | movdqa XMMWORD [rdx+3*SIZEOF_XMMWORD], xmm5 ; the intermediate data |
michael@0 | 338 | movdqa XMMWORD [rdi+2*SIZEOF_XMMWORD], xmm2 |
michael@0 | 339 | movdqa XMMWORD [rdi+3*SIZEOF_XMMWORD], xmm6 |
michael@0 | 340 | |
michael@0 | 341 | pslldq xmm1,(SIZEOF_XMMWORD-2) ; xmm1=(-- -- -- -- -- -- -- 0) |
michael@0 | 342 | pslldq xmm2,(SIZEOF_XMMWORD-2) ; xmm2=(-- -- -- -- -- -- -- 0) |
michael@0 | 343 | |
michael@0 | 344 | movdqa XMMWORD [wk(2)], xmm1 |
michael@0 | 345 | movdqa XMMWORD [wk(3)], xmm2 |
michael@0 | 346 | |
michael@0 | 347 | .upsample: |
michael@0 | 348 | ; -- process the upper row |
michael@0 | 349 | |
michael@0 | 350 | movdqa xmm7, XMMWORD [rdx+0*SIZEOF_XMMWORD] |
michael@0 | 351 | movdqa xmm3, XMMWORD [rdx+1*SIZEOF_XMMWORD] |
michael@0 | 352 | |
michael@0 | 353 | movdqa xmm0,xmm7 ; xmm7=Int0L=( 0 1 2 3 4 5 6 7) |
michael@0 | 354 | movdqa xmm4,xmm3 ; xmm3=Int0H=( 8 9 10 11 12 13 14 15) |
michael@0 | 355 | psrldq xmm0,2 ; xmm0=( 1 2 3 4 5 6 7 --) |
michael@0 | 356 | pslldq xmm4,(SIZEOF_XMMWORD-2) ; xmm4=(-- -- -- -- -- -- -- 8) |
michael@0 | 357 | movdqa xmm5,xmm7 |
michael@0 | 358 | movdqa xmm6,xmm3 |
michael@0 | 359 | psrldq xmm5,(SIZEOF_XMMWORD-2) ; xmm5=( 7 -- -- -- -- -- -- --) |
michael@0 | 360 | pslldq xmm6,2 ; xmm6=(-- 8 9 10 11 12 13 14) |
michael@0 | 361 | |
michael@0 | 362 | por xmm0,xmm4 ; xmm0=( 1 2 3 4 5 6 7 8) |
michael@0 | 363 | por xmm5,xmm6 ; xmm5=( 7 8 9 10 11 12 13 14) |
michael@0 | 364 | |
michael@0 | 365 | movdqa xmm1,xmm7 |
michael@0 | 366 | movdqa xmm2,xmm3 |
michael@0 | 367 | pslldq xmm1,2 ; xmm1=(-- 0 1 2 3 4 5 6) |
michael@0 | 368 | psrldq xmm2,2 ; xmm2=( 9 10 11 12 13 14 15 --) |
michael@0 | 369 | movdqa xmm4,xmm3 |
michael@0 | 370 | psrldq xmm4,(SIZEOF_XMMWORD-2) ; xmm4=(15 -- -- -- -- -- -- --) |
michael@0 | 371 | |
michael@0 | 372 | por xmm1, XMMWORD [wk(0)] ; xmm1=(-1 0 1 2 3 4 5 6) |
michael@0 | 373 | por xmm2, XMMWORD [wk(2)] ; xmm2=( 9 10 11 12 13 14 15 16) |
michael@0 | 374 | |
michael@0 | 375 | movdqa XMMWORD [wk(0)], xmm4 |
michael@0 | 376 | |
michael@0 | 377 | pmullw xmm7,[rel PW_THREE] |
michael@0 | 378 | pmullw xmm3,[rel PW_THREE] |
michael@0 | 379 | paddw xmm1,[rel PW_EIGHT] |
michael@0 | 380 | paddw xmm5,[rel PW_EIGHT] |
michael@0 | 381 | paddw xmm0,[rel PW_SEVEN] |
michael@0 | 382 | paddw xmm2,[rel PW_SEVEN] |
michael@0 | 383 | |
michael@0 | 384 | paddw xmm1,xmm7 |
michael@0 | 385 | paddw xmm5,xmm3 |
michael@0 | 386 | psrlw xmm1,4 ; xmm1=Out0LE=( 0 2 4 6 8 10 12 14) |
michael@0 | 387 | psrlw xmm5,4 ; xmm5=Out0HE=(16 18 20 22 24 26 28 30) |
michael@0 | 388 | paddw xmm0,xmm7 |
michael@0 | 389 | paddw xmm2,xmm3 |
michael@0 | 390 | psrlw xmm0,4 ; xmm0=Out0LO=( 1 3 5 7 9 11 13 15) |
michael@0 | 391 | psrlw xmm2,4 ; xmm2=Out0HO=(17 19 21 23 25 27 29 31) |
michael@0 | 392 | |
michael@0 | 393 | psllw xmm0,BYTE_BIT |
michael@0 | 394 | psllw xmm2,BYTE_BIT |
michael@0 | 395 | por xmm1,xmm0 ; xmm1=Out0L=( 0 1 2 ... 13 14 15) |
michael@0 | 396 | por xmm5,xmm2 ; xmm5=Out0H=(16 17 18 ... 29 30 31) |
michael@0 | 397 | |
michael@0 | 398 | movdqa XMMWORD [rdx+0*SIZEOF_XMMWORD], xmm1 |
michael@0 | 399 | movdqa XMMWORD [rdx+1*SIZEOF_XMMWORD], xmm5 |
michael@0 | 400 | |
michael@0 | 401 | ; -- process the lower row |
michael@0 | 402 | |
michael@0 | 403 | movdqa xmm6, XMMWORD [rdi+0*SIZEOF_XMMWORD] |
michael@0 | 404 | movdqa xmm4, XMMWORD [rdi+1*SIZEOF_XMMWORD] |
michael@0 | 405 | |
michael@0 | 406 | movdqa xmm7,xmm6 ; xmm6=Int1L=( 0 1 2 3 4 5 6 7) |
michael@0 | 407 | movdqa xmm3,xmm4 ; xmm4=Int1H=( 8 9 10 11 12 13 14 15) |
michael@0 | 408 | psrldq xmm7,2 ; xmm7=( 1 2 3 4 5 6 7 --) |
michael@0 | 409 | pslldq xmm3,(SIZEOF_XMMWORD-2) ; xmm3=(-- -- -- -- -- -- -- 8) |
michael@0 | 410 | movdqa xmm0,xmm6 |
michael@0 | 411 | movdqa xmm2,xmm4 |
michael@0 | 412 | psrldq xmm0,(SIZEOF_XMMWORD-2) ; xmm0=( 7 -- -- -- -- -- -- --) |
michael@0 | 413 | pslldq xmm2,2 ; xmm2=(-- 8 9 10 11 12 13 14) |
michael@0 | 414 | |
michael@0 | 415 | por xmm7,xmm3 ; xmm7=( 1 2 3 4 5 6 7 8) |
michael@0 | 416 | por xmm0,xmm2 ; xmm0=( 7 8 9 10 11 12 13 14) |
michael@0 | 417 | |
michael@0 | 418 | movdqa xmm1,xmm6 |
michael@0 | 419 | movdqa xmm5,xmm4 |
michael@0 | 420 | pslldq xmm1,2 ; xmm1=(-- 0 1 2 3 4 5 6) |
michael@0 | 421 | psrldq xmm5,2 ; xmm5=( 9 10 11 12 13 14 15 --) |
michael@0 | 422 | movdqa xmm3,xmm4 |
michael@0 | 423 | psrldq xmm3,(SIZEOF_XMMWORD-2) ; xmm3=(15 -- -- -- -- -- -- --) |
michael@0 | 424 | |
michael@0 | 425 | por xmm1, XMMWORD [wk(1)] ; xmm1=(-1 0 1 2 3 4 5 6) |
michael@0 | 426 | por xmm5, XMMWORD [wk(3)] ; xmm5=( 9 10 11 12 13 14 15 16) |
michael@0 | 427 | |
michael@0 | 428 | movdqa XMMWORD [wk(1)], xmm3 |
michael@0 | 429 | |
michael@0 | 430 | pmullw xmm6,[rel PW_THREE] |
michael@0 | 431 | pmullw xmm4,[rel PW_THREE] |
michael@0 | 432 | paddw xmm1,[rel PW_EIGHT] |
michael@0 | 433 | paddw xmm0,[rel PW_EIGHT] |
michael@0 | 434 | paddw xmm7,[rel PW_SEVEN] |
michael@0 | 435 | paddw xmm5,[rel PW_SEVEN] |
michael@0 | 436 | |
michael@0 | 437 | paddw xmm1,xmm6 |
michael@0 | 438 | paddw xmm0,xmm4 |
michael@0 | 439 | psrlw xmm1,4 ; xmm1=Out1LE=( 0 2 4 6 8 10 12 14) |
michael@0 | 440 | psrlw xmm0,4 ; xmm0=Out1HE=(16 18 20 22 24 26 28 30) |
michael@0 | 441 | paddw xmm7,xmm6 |
michael@0 | 442 | paddw xmm5,xmm4 |
michael@0 | 443 | psrlw xmm7,4 ; xmm7=Out1LO=( 1 3 5 7 9 11 13 15) |
michael@0 | 444 | psrlw xmm5,4 ; xmm5=Out1HO=(17 19 21 23 25 27 29 31) |
michael@0 | 445 | |
michael@0 | 446 | psllw xmm7,BYTE_BIT |
michael@0 | 447 | psllw xmm5,BYTE_BIT |
michael@0 | 448 | por xmm1,xmm7 ; xmm1=Out1L=( 0 1 2 ... 13 14 15) |
michael@0 | 449 | por xmm0,xmm5 ; xmm0=Out1H=(16 17 18 ... 29 30 31) |
michael@0 | 450 | |
michael@0 | 451 | movdqa XMMWORD [rdi+0*SIZEOF_XMMWORD], xmm1 |
michael@0 | 452 | movdqa XMMWORD [rdi+1*SIZEOF_XMMWORD], xmm0 |
michael@0 | 453 | |
michael@0 | 454 | sub rax, byte SIZEOF_XMMWORD |
michael@0 | 455 | add rcx, byte 1*SIZEOF_XMMWORD ; inptr1(above) |
michael@0 | 456 | add rbx, byte 1*SIZEOF_XMMWORD ; inptr0 |
michael@0 | 457 | add rsi, byte 1*SIZEOF_XMMWORD ; inptr1(below) |
michael@0 | 458 | add rdx, byte 2*SIZEOF_XMMWORD ; outptr0 |
michael@0 | 459 | add rdi, byte 2*SIZEOF_XMMWORD ; outptr1 |
michael@0 | 460 | cmp rax, byte SIZEOF_XMMWORD |
michael@0 | 461 | ja near .columnloop |
michael@0 | 462 | test rax,rax |
michael@0 | 463 | jnz near .columnloop_last |
michael@0 | 464 | |
michael@0 | 465 | pop rsi |
michael@0 | 466 | pop rdi |
michael@0 | 467 | pop rcx |
michael@0 | 468 | pop rax |
michael@0 | 469 | |
michael@0 | 470 | add rsi, byte 1*SIZEOF_JSAMPROW ; input_data |
michael@0 | 471 | add rdi, byte 2*SIZEOF_JSAMPROW ; output_data |
michael@0 | 472 | sub rcx, byte 2 ; rowctr |
michael@0 | 473 | jg near .rowloop |
michael@0 | 474 | |
michael@0 | 475 | .return: |
michael@0 | 476 | pop rbx |
michael@0 | 477 | uncollect_args |
michael@0 | 478 | mov rsp,rbp ; rsp <- aligned rbp |
michael@0 | 479 | pop rsp ; rsp <- original rbp |
michael@0 | 480 | pop rbp |
michael@0 | 481 | ret |
michael@0 | 482 | |
michael@0 | 483 | ; -------------------------------------------------------------------------- |
michael@0 | 484 | ; |
michael@0 | 485 | ; Fast processing for the common case of 2:1 horizontal and 1:1 vertical. |
michael@0 | 486 | ; It's still a box filter. |
michael@0 | 487 | ; |
michael@0 | 488 | ; GLOBAL(void) |
michael@0 | 489 | ; jsimd_h2v1_upsample_sse2 (int max_v_samp_factor, |
michael@0 | 490 | ; JDIMENSION output_width, |
michael@0 | 491 | ; JSAMPARRAY input_data, |
michael@0 | 492 | ; JSAMPARRAY * output_data_ptr); |
michael@0 | 493 | ; |
michael@0 | 494 | |
michael@0 | 495 | ; r10 = int max_v_samp_factor |
michael@0 | 496 | ; r11 = JDIMENSION output_width |
michael@0 | 497 | ; r12 = JSAMPARRAY input_data |
michael@0 | 498 | ; r13 = JSAMPARRAY * output_data_ptr |
michael@0 | 499 | |
michael@0 | 500 | align 16 |
michael@0 | 501 | global EXTN(jsimd_h2v1_upsample_sse2) |
michael@0 | 502 | |
michael@0 | 503 | EXTN(jsimd_h2v1_upsample_sse2): |
michael@0 | 504 | push rbp |
michael@0 | 505 | mov rax,rsp |
michael@0 | 506 | mov rbp,rsp |
michael@0 | 507 | collect_args |
michael@0 | 508 | |
michael@0 | 509 | mov rdx, r11 |
michael@0 | 510 | add rdx, byte (2*SIZEOF_XMMWORD)-1 |
michael@0 | 511 | and rdx, byte -(2*SIZEOF_XMMWORD) |
michael@0 | 512 | jz near .return |
michael@0 | 513 | |
michael@0 | 514 | mov rcx, r10 ; rowctr |
michael@0 | 515 | test rcx,rcx |
michael@0 | 516 | jz short .return |
michael@0 | 517 | |
michael@0 | 518 | mov rsi, r12 ; input_data |
michael@0 | 519 | mov rdi, r13 |
michael@0 | 520 | mov rdi, JSAMPARRAY [rdi] ; output_data |
michael@0 | 521 | .rowloop: |
michael@0 | 522 | push rdi |
michael@0 | 523 | push rsi |
michael@0 | 524 | |
michael@0 | 525 | mov rsi, JSAMPROW [rsi] ; inptr |
michael@0 | 526 | mov rdi, JSAMPROW [rdi] ; outptr |
michael@0 | 527 | mov rax,rdx ; colctr |
michael@0 | 528 | .columnloop: |
michael@0 | 529 | |
michael@0 | 530 | movdqa xmm0, XMMWORD [rsi+0*SIZEOF_XMMWORD] |
michael@0 | 531 | |
michael@0 | 532 | movdqa xmm1,xmm0 |
michael@0 | 533 | punpcklbw xmm0,xmm0 |
michael@0 | 534 | punpckhbw xmm1,xmm1 |
michael@0 | 535 | |
michael@0 | 536 | movdqa XMMWORD [rdi+0*SIZEOF_XMMWORD], xmm0 |
michael@0 | 537 | movdqa XMMWORD [rdi+1*SIZEOF_XMMWORD], xmm1 |
michael@0 | 538 | |
michael@0 | 539 | sub rax, byte 2*SIZEOF_XMMWORD |
michael@0 | 540 | jz short .nextrow |
michael@0 | 541 | |
michael@0 | 542 | movdqa xmm2, XMMWORD [rsi+1*SIZEOF_XMMWORD] |
michael@0 | 543 | |
michael@0 | 544 | movdqa xmm3,xmm2 |
michael@0 | 545 | punpcklbw xmm2,xmm2 |
michael@0 | 546 | punpckhbw xmm3,xmm3 |
michael@0 | 547 | |
michael@0 | 548 | movdqa XMMWORD [rdi+2*SIZEOF_XMMWORD], xmm2 |
michael@0 | 549 | movdqa XMMWORD [rdi+3*SIZEOF_XMMWORD], xmm3 |
michael@0 | 550 | |
michael@0 | 551 | sub rax, byte 2*SIZEOF_XMMWORD |
michael@0 | 552 | jz short .nextrow |
michael@0 | 553 | |
michael@0 | 554 | add rsi, byte 2*SIZEOF_XMMWORD ; inptr |
michael@0 | 555 | add rdi, byte 4*SIZEOF_XMMWORD ; outptr |
michael@0 | 556 | jmp short .columnloop |
michael@0 | 557 | |
michael@0 | 558 | .nextrow: |
michael@0 | 559 | pop rsi |
michael@0 | 560 | pop rdi |
michael@0 | 561 | |
michael@0 | 562 | add rsi, byte SIZEOF_JSAMPROW ; input_data |
michael@0 | 563 | add rdi, byte SIZEOF_JSAMPROW ; output_data |
michael@0 | 564 | dec rcx ; rowctr |
michael@0 | 565 | jg short .rowloop |
michael@0 | 566 | |
michael@0 | 567 | .return: |
michael@0 | 568 | uncollect_args |
michael@0 | 569 | pop rbp |
michael@0 | 570 | ret |
michael@0 | 571 | |
michael@0 | 572 | ; -------------------------------------------------------------------------- |
michael@0 | 573 | ; |
michael@0 | 574 | ; Fast processing for the common case of 2:1 horizontal and 2:1 vertical. |
michael@0 | 575 | ; It's still a box filter. |
michael@0 | 576 | ; |
michael@0 | 577 | ; GLOBAL(void) |
michael@0 | 578 | ; jsimd_h2v2_upsample_sse2 (nt max_v_samp_factor, |
michael@0 | 579 | ; JDIMENSION output_width, |
michael@0 | 580 | ; JSAMPARRAY input_data, |
michael@0 | 581 | ; JSAMPARRAY * output_data_ptr); |
michael@0 | 582 | ; |
michael@0 | 583 | |
michael@0 | 584 | ; r10 = int max_v_samp_factor |
michael@0 | 585 | ; r11 = JDIMENSION output_width |
michael@0 | 586 | ; r12 = JSAMPARRAY input_data |
michael@0 | 587 | ; r13 = JSAMPARRAY * output_data_ptr |
michael@0 | 588 | |
michael@0 | 589 | align 16 |
michael@0 | 590 | global EXTN(jsimd_h2v2_upsample_sse2) |
michael@0 | 591 | |
michael@0 | 592 | EXTN(jsimd_h2v2_upsample_sse2): |
michael@0 | 593 | push rbp |
michael@0 | 594 | mov rax,rsp |
michael@0 | 595 | mov rbp,rsp |
michael@0 | 596 | collect_args |
michael@0 | 597 | push rbx |
michael@0 | 598 | |
michael@0 | 599 | mov rdx, r11 |
michael@0 | 600 | add rdx, byte (2*SIZEOF_XMMWORD)-1 |
michael@0 | 601 | and rdx, byte -(2*SIZEOF_XMMWORD) |
michael@0 | 602 | jz near .return |
michael@0 | 603 | |
michael@0 | 604 | mov rcx, r10 ; rowctr |
michael@0 | 605 | test rcx,rcx |
michael@0 | 606 | jz near .return |
michael@0 | 607 | |
michael@0 | 608 | mov rsi, r12 ; input_data |
michael@0 | 609 | mov rdi, r13 |
michael@0 | 610 | mov rdi, JSAMPARRAY [rdi] ; output_data |
michael@0 | 611 | .rowloop: |
michael@0 | 612 | push rdi |
michael@0 | 613 | push rsi |
michael@0 | 614 | |
michael@0 | 615 | mov rsi, JSAMPROW [rsi] ; inptr |
michael@0 | 616 | mov rbx, JSAMPROW [rdi+0*SIZEOF_JSAMPROW] ; outptr0 |
michael@0 | 617 | mov rdi, JSAMPROW [rdi+1*SIZEOF_JSAMPROW] ; outptr1 |
michael@0 | 618 | mov rax,rdx ; colctr |
michael@0 | 619 | .columnloop: |
michael@0 | 620 | |
michael@0 | 621 | movdqa xmm0, XMMWORD [rsi+0*SIZEOF_XMMWORD] |
michael@0 | 622 | |
michael@0 | 623 | movdqa xmm1,xmm0 |
michael@0 | 624 | punpcklbw xmm0,xmm0 |
michael@0 | 625 | punpckhbw xmm1,xmm1 |
michael@0 | 626 | |
michael@0 | 627 | movdqa XMMWORD [rbx+0*SIZEOF_XMMWORD], xmm0 |
michael@0 | 628 | movdqa XMMWORD [rbx+1*SIZEOF_XMMWORD], xmm1 |
michael@0 | 629 | movdqa XMMWORD [rdi+0*SIZEOF_XMMWORD], xmm0 |
michael@0 | 630 | movdqa XMMWORD [rdi+1*SIZEOF_XMMWORD], xmm1 |
michael@0 | 631 | |
michael@0 | 632 | sub rax, byte 2*SIZEOF_XMMWORD |
michael@0 | 633 | jz short .nextrow |
michael@0 | 634 | |
michael@0 | 635 | movdqa xmm2, XMMWORD [rsi+1*SIZEOF_XMMWORD] |
michael@0 | 636 | |
michael@0 | 637 | movdqa xmm3,xmm2 |
michael@0 | 638 | punpcklbw xmm2,xmm2 |
michael@0 | 639 | punpckhbw xmm3,xmm3 |
michael@0 | 640 | |
michael@0 | 641 | movdqa XMMWORD [rbx+2*SIZEOF_XMMWORD], xmm2 |
michael@0 | 642 | movdqa XMMWORD [rbx+3*SIZEOF_XMMWORD], xmm3 |
michael@0 | 643 | movdqa XMMWORD [rdi+2*SIZEOF_XMMWORD], xmm2 |
michael@0 | 644 | movdqa XMMWORD [rdi+3*SIZEOF_XMMWORD], xmm3 |
michael@0 | 645 | |
michael@0 | 646 | sub rax, byte 2*SIZEOF_XMMWORD |
michael@0 | 647 | jz short .nextrow |
michael@0 | 648 | |
michael@0 | 649 | add rsi, byte 2*SIZEOF_XMMWORD ; inptr |
michael@0 | 650 | add rbx, byte 4*SIZEOF_XMMWORD ; outptr0 |
michael@0 | 651 | add rdi, byte 4*SIZEOF_XMMWORD ; outptr1 |
michael@0 | 652 | jmp short .columnloop |
michael@0 | 653 | |
michael@0 | 654 | .nextrow: |
michael@0 | 655 | pop rsi |
michael@0 | 656 | pop rdi |
michael@0 | 657 | |
michael@0 | 658 | add rsi, byte 1*SIZEOF_JSAMPROW ; input_data |
michael@0 | 659 | add rdi, byte 2*SIZEOF_JSAMPROW ; output_data |
michael@0 | 660 | sub rcx, byte 2 ; rowctr |
michael@0 | 661 | jg near .rowloop |
michael@0 | 662 | |
michael@0 | 663 | .return: |
michael@0 | 664 | pop rbx |
michael@0 | 665 | uncollect_args |
michael@0 | 666 | pop rbp |
michael@0 | 667 | ret |
michael@0 | 668 | |
michael@0 | 669 | ; For some reason, the OS X linker does not honor the request to align the |
michael@0 | 670 | ; segment unless we do this. |
michael@0 | 671 | align 16 |