michael@0: ; michael@0: ; jdsamss2-64.asm - upsampling (64-bit SSE2) michael@0: ; michael@0: ; Copyright 2009 Pierre Ossman for Cendio AB michael@0: ; Copyright 2009 D. R. Commander michael@0: ; michael@0: ; Based on michael@0: ; x86 SIMD extension for IJG JPEG library michael@0: ; Copyright (C) 1999-2006, MIYASAKA Masaru. michael@0: ; For conditions of distribution and use, see copyright notice in jsimdext.inc michael@0: ; michael@0: ; This file should be assembled with NASM (Netwide Assembler), michael@0: ; can *not* be assembled with Microsoft's MASM or any compatible michael@0: ; assembler (including Borland's Turbo Assembler). michael@0: ; NASM is available from http://nasm.sourceforge.net/ or michael@0: ; http://sourceforge.net/project/showfiles.php?group_id=6208 michael@0: ; michael@0: ; [TAB8] michael@0: michael@0: %include "jsimdext.inc" michael@0: michael@0: ; -------------------------------------------------------------------------- michael@0: SECTION SEG_CONST michael@0: michael@0: alignz 16 michael@0: global EXTN(jconst_fancy_upsample_sse2) michael@0: michael@0: EXTN(jconst_fancy_upsample_sse2): michael@0: michael@0: PW_ONE times 8 dw 1 michael@0: PW_TWO times 8 dw 2 michael@0: PW_THREE times 8 dw 3 michael@0: PW_SEVEN times 8 dw 7 michael@0: PW_EIGHT times 8 dw 8 michael@0: michael@0: alignz 16 michael@0: michael@0: ; -------------------------------------------------------------------------- michael@0: SECTION SEG_TEXT michael@0: BITS 64 michael@0: ; michael@0: ; Fancy processing for the common case of 2:1 horizontal and 1:1 vertical. michael@0: ; michael@0: ; The upsampling algorithm is linear interpolation between pixel centers, michael@0: ; also known as a "triangle filter". This is a good compromise between michael@0: ; speed and visual quality. The centers of the output pixels are 1/4 and 3/4 michael@0: ; of the way between input pixel centers. michael@0: ; michael@0: ; GLOBAL(void) michael@0: ; jsimd_h2v1_fancy_upsample_sse2 (int max_v_samp_factor, michael@0: ; JDIMENSION downsampled_width, michael@0: ; JSAMPARRAY input_data, michael@0: ; JSAMPARRAY * output_data_ptr); michael@0: ; michael@0: michael@0: ; r10 = int max_v_samp_factor michael@0: ; r11 = JDIMENSION downsampled_width michael@0: ; r12 = JSAMPARRAY input_data michael@0: ; r13 = JSAMPARRAY * output_data_ptr michael@0: michael@0: align 16 michael@0: global EXTN(jsimd_h2v1_fancy_upsample_sse2) michael@0: michael@0: EXTN(jsimd_h2v1_fancy_upsample_sse2): michael@0: push rbp michael@0: mov rax,rsp michael@0: mov rbp,rsp michael@0: collect_args michael@0: michael@0: mov rax, r11 ; colctr michael@0: test rax,rax michael@0: jz near .return michael@0: michael@0: mov rcx, r10 ; rowctr michael@0: test rcx,rcx michael@0: jz near .return michael@0: michael@0: mov rsi, r12 ; input_data michael@0: mov rdi, r13 michael@0: mov rdi, JSAMPARRAY [rdi] ; output_data michael@0: .rowloop: michael@0: push rax ; colctr michael@0: push rdi michael@0: push rsi michael@0: michael@0: mov rsi, JSAMPROW [rsi] ; inptr michael@0: mov rdi, JSAMPROW [rdi] ; outptr michael@0: michael@0: test rax, SIZEOF_XMMWORD-1 michael@0: jz short .skip michael@0: mov dl, JSAMPLE [rsi+(rax-1)*SIZEOF_JSAMPLE] michael@0: mov JSAMPLE [rsi+rax*SIZEOF_JSAMPLE], dl ; insert a dummy sample michael@0: .skip: michael@0: pxor xmm0,xmm0 ; xmm0=(all 0's) michael@0: pcmpeqb xmm7,xmm7 michael@0: psrldq xmm7,(SIZEOF_XMMWORD-1) michael@0: pand xmm7, XMMWORD [rsi+0*SIZEOF_XMMWORD] michael@0: michael@0: add rax, byte SIZEOF_XMMWORD-1 michael@0: and rax, byte -SIZEOF_XMMWORD michael@0: cmp rax, byte SIZEOF_XMMWORD michael@0: ja short .columnloop michael@0: michael@0: .columnloop_last: michael@0: pcmpeqb xmm6,xmm6 michael@0: pslldq xmm6,(SIZEOF_XMMWORD-1) michael@0: pand xmm6, XMMWORD [rsi+0*SIZEOF_XMMWORD] michael@0: jmp short .upsample michael@0: michael@0: .columnloop: michael@0: movdqa xmm6, XMMWORD [rsi+1*SIZEOF_XMMWORD] michael@0: pslldq xmm6,(SIZEOF_XMMWORD-1) michael@0: michael@0: .upsample: michael@0: movdqa xmm1, XMMWORD [rsi+0*SIZEOF_XMMWORD] michael@0: movdqa xmm2,xmm1 michael@0: movdqa xmm3,xmm1 ; xmm1=( 0 1 2 ... 13 14 15) michael@0: pslldq xmm2,1 ; xmm2=(-- 0 1 ... 12 13 14) michael@0: psrldq xmm3,1 ; xmm3=( 1 2 3 ... 14 15 --) michael@0: michael@0: por xmm2,xmm7 ; xmm2=(-1 0 1 ... 12 13 14) michael@0: por xmm3,xmm6 ; xmm3=( 1 2 3 ... 14 15 16) michael@0: michael@0: movdqa xmm7,xmm1 michael@0: psrldq xmm7,(SIZEOF_XMMWORD-1) ; xmm7=(15 -- -- ... -- -- --) michael@0: michael@0: movdqa xmm4,xmm1 michael@0: punpcklbw xmm1,xmm0 ; xmm1=( 0 1 2 3 4 5 6 7) michael@0: punpckhbw xmm4,xmm0 ; xmm4=( 8 9 10 11 12 13 14 15) michael@0: movdqa xmm5,xmm2 michael@0: punpcklbw xmm2,xmm0 ; xmm2=(-1 0 1 2 3 4 5 6) michael@0: punpckhbw xmm5,xmm0 ; xmm5=( 7 8 9 10 11 12 13 14) michael@0: movdqa xmm6,xmm3 michael@0: punpcklbw xmm3,xmm0 ; xmm3=( 1 2 3 4 5 6 7 8) michael@0: punpckhbw xmm6,xmm0 ; xmm6=( 9 10 11 12 13 14 15 16) michael@0: michael@0: pmullw xmm1,[rel PW_THREE] michael@0: pmullw xmm4,[rel PW_THREE] michael@0: paddw xmm2,[rel PW_ONE] michael@0: paddw xmm5,[rel PW_ONE] michael@0: paddw xmm3,[rel PW_TWO] michael@0: paddw xmm6,[rel PW_TWO] michael@0: michael@0: paddw xmm2,xmm1 michael@0: paddw xmm5,xmm4 michael@0: psrlw xmm2,2 ; xmm2=OutLE=( 0 2 4 6 8 10 12 14) michael@0: psrlw xmm5,2 ; xmm5=OutHE=(16 18 20 22 24 26 28 30) michael@0: paddw xmm3,xmm1 michael@0: paddw xmm6,xmm4 michael@0: psrlw xmm3,2 ; xmm3=OutLO=( 1 3 5 7 9 11 13 15) michael@0: psrlw xmm6,2 ; xmm6=OutHO=(17 19 21 23 25 27 29 31) michael@0: michael@0: psllw xmm3,BYTE_BIT michael@0: psllw xmm6,BYTE_BIT michael@0: por xmm2,xmm3 ; xmm2=OutL=( 0 1 2 ... 13 14 15) michael@0: por xmm5,xmm6 ; xmm5=OutH=(16 17 18 ... 29 30 31) michael@0: michael@0: movdqa XMMWORD [rdi+0*SIZEOF_XMMWORD], xmm2 michael@0: movdqa XMMWORD [rdi+1*SIZEOF_XMMWORD], xmm5 michael@0: michael@0: sub rax, byte SIZEOF_XMMWORD michael@0: add rsi, byte 1*SIZEOF_XMMWORD ; inptr michael@0: add rdi, byte 2*SIZEOF_XMMWORD ; outptr michael@0: cmp rax, byte SIZEOF_XMMWORD michael@0: ja near .columnloop michael@0: test eax,eax michael@0: jnz near .columnloop_last michael@0: michael@0: pop rsi michael@0: pop rdi michael@0: pop rax michael@0: michael@0: add rsi, byte SIZEOF_JSAMPROW ; input_data michael@0: add rdi, byte SIZEOF_JSAMPROW ; output_data michael@0: dec rcx ; rowctr michael@0: jg near .rowloop michael@0: michael@0: .return: michael@0: uncollect_args michael@0: pop rbp michael@0: ret michael@0: michael@0: ; -------------------------------------------------------------------------- michael@0: ; michael@0: ; Fancy processing for the common case of 2:1 horizontal and 2:1 vertical. michael@0: ; Again a triangle filter; see comments for h2v1 case, above. michael@0: ; michael@0: ; GLOBAL(void) michael@0: ; jsimd_h2v2_fancy_upsample_sse2 (int max_v_samp_factor, michael@0: ; JDIMENSION downsampled_width, michael@0: ; JSAMPARRAY input_data, michael@0: ; JSAMPARRAY * output_data_ptr); michael@0: ; michael@0: michael@0: ; r10 = int max_v_samp_factor michael@0: ; r11 = JDIMENSION downsampled_width michael@0: ; r12 = JSAMPARRAY input_data michael@0: ; r13 = JSAMPARRAY * output_data_ptr michael@0: michael@0: %define wk(i) rbp-(WK_NUM-(i))*SIZEOF_XMMWORD ; xmmword wk[WK_NUM] michael@0: %define WK_NUM 4 michael@0: michael@0: align 16 michael@0: global EXTN(jsimd_h2v2_fancy_upsample_sse2) michael@0: michael@0: EXTN(jsimd_h2v2_fancy_upsample_sse2): michael@0: push rbp michael@0: mov rax,rsp ; rax = original rbp michael@0: sub rsp, byte 4 michael@0: and rsp, byte (-SIZEOF_XMMWORD) ; align to 128 bits michael@0: mov [rsp],rax michael@0: mov rbp,rsp ; rbp = aligned rbp michael@0: lea rsp, [wk(0)] michael@0: collect_args michael@0: push rbx michael@0: michael@0: mov rax, r11 ; colctr michael@0: test rax,rax michael@0: jz near .return michael@0: michael@0: mov rcx, r10 ; rowctr michael@0: test rcx,rcx michael@0: jz near .return michael@0: michael@0: mov rsi, r12 ; input_data michael@0: mov rdi, r13 michael@0: mov rdi, JSAMPARRAY [rdi] ; output_data michael@0: .rowloop: michael@0: push rax ; colctr michael@0: push rcx michael@0: push rdi michael@0: push rsi michael@0: michael@0: mov rcx, JSAMPROW [rsi-1*SIZEOF_JSAMPROW] ; inptr1(above) michael@0: mov rbx, JSAMPROW [rsi+0*SIZEOF_JSAMPROW] ; inptr0 michael@0: mov rsi, JSAMPROW [rsi+1*SIZEOF_JSAMPROW] ; inptr1(below) michael@0: mov rdx, JSAMPROW [rdi+0*SIZEOF_JSAMPROW] ; outptr0 michael@0: mov rdi, JSAMPROW [rdi+1*SIZEOF_JSAMPROW] ; outptr1 michael@0: michael@0: test rax, SIZEOF_XMMWORD-1 michael@0: jz short .skip michael@0: push rdx michael@0: mov dl, JSAMPLE [rcx+(rax-1)*SIZEOF_JSAMPLE] michael@0: mov JSAMPLE [rcx+rax*SIZEOF_JSAMPLE], dl michael@0: mov dl, JSAMPLE [rbx+(rax-1)*SIZEOF_JSAMPLE] michael@0: mov JSAMPLE [rbx+rax*SIZEOF_JSAMPLE], dl michael@0: mov dl, JSAMPLE [rsi+(rax-1)*SIZEOF_JSAMPLE] michael@0: mov JSAMPLE [rsi+rax*SIZEOF_JSAMPLE], dl ; insert a dummy sample michael@0: pop rdx michael@0: .skip: michael@0: ; -- process the first column block michael@0: michael@0: movdqa xmm0, XMMWORD [rbx+0*SIZEOF_XMMWORD] ; xmm0=row[ 0][0] michael@0: movdqa xmm1, XMMWORD [rcx+0*SIZEOF_XMMWORD] ; xmm1=row[-1][0] michael@0: movdqa xmm2, XMMWORD [rsi+0*SIZEOF_XMMWORD] ; xmm2=row[+1][0] michael@0: michael@0: pxor xmm3,xmm3 ; xmm3=(all 0's) michael@0: movdqa xmm4,xmm0 michael@0: punpcklbw xmm0,xmm3 ; xmm0=row[ 0]( 0 1 2 3 4 5 6 7) michael@0: punpckhbw xmm4,xmm3 ; xmm4=row[ 0]( 8 9 10 11 12 13 14 15) michael@0: movdqa xmm5,xmm1 michael@0: punpcklbw xmm1,xmm3 ; xmm1=row[-1]( 0 1 2 3 4 5 6 7) michael@0: punpckhbw xmm5,xmm3 ; xmm5=row[-1]( 8 9 10 11 12 13 14 15) michael@0: movdqa xmm6,xmm2 michael@0: punpcklbw xmm2,xmm3 ; xmm2=row[+1]( 0 1 2 3 4 5 6 7) michael@0: punpckhbw xmm6,xmm3 ; xmm6=row[+1]( 8 9 10 11 12 13 14 15) michael@0: michael@0: pmullw xmm0,[rel PW_THREE] michael@0: pmullw xmm4,[rel PW_THREE] michael@0: michael@0: pcmpeqb xmm7,xmm7 michael@0: psrldq xmm7,(SIZEOF_XMMWORD-2) michael@0: michael@0: paddw xmm1,xmm0 ; xmm1=Int0L=( 0 1 2 3 4 5 6 7) michael@0: paddw xmm5,xmm4 ; xmm5=Int0H=( 8 9 10 11 12 13 14 15) michael@0: paddw xmm2,xmm0 ; xmm2=Int1L=( 0 1 2 3 4 5 6 7) michael@0: paddw xmm6,xmm4 ; xmm6=Int1H=( 8 9 10 11 12 13 14 15) michael@0: michael@0: movdqa XMMWORD [rdx+0*SIZEOF_XMMWORD], xmm1 ; temporarily save michael@0: movdqa XMMWORD [rdx+1*SIZEOF_XMMWORD], xmm5 ; the intermediate data michael@0: movdqa XMMWORD [rdi+0*SIZEOF_XMMWORD], xmm2 michael@0: movdqa XMMWORD [rdi+1*SIZEOF_XMMWORD], xmm6 michael@0: michael@0: pand xmm1,xmm7 ; xmm1=( 0 -- -- -- -- -- -- --) michael@0: pand xmm2,xmm7 ; xmm2=( 0 -- -- -- -- -- -- --) michael@0: michael@0: movdqa XMMWORD [wk(0)], xmm1 michael@0: movdqa XMMWORD [wk(1)], xmm2 michael@0: michael@0: add rax, byte SIZEOF_XMMWORD-1 michael@0: and rax, byte -SIZEOF_XMMWORD michael@0: cmp rax, byte SIZEOF_XMMWORD michael@0: ja short .columnloop michael@0: michael@0: .columnloop_last: michael@0: ; -- process the last column block michael@0: michael@0: pcmpeqb xmm1,xmm1 michael@0: pslldq xmm1,(SIZEOF_XMMWORD-2) michael@0: movdqa xmm2,xmm1 michael@0: michael@0: pand xmm1, XMMWORD [rdx+1*SIZEOF_XMMWORD] michael@0: pand xmm2, XMMWORD [rdi+1*SIZEOF_XMMWORD] michael@0: michael@0: movdqa XMMWORD [wk(2)], xmm1 ; xmm1=(-- -- -- -- -- -- -- 15) michael@0: movdqa XMMWORD [wk(3)], xmm2 ; xmm2=(-- -- -- -- -- -- -- 15) michael@0: michael@0: jmp near .upsample michael@0: michael@0: .columnloop: michael@0: ; -- process the next column block michael@0: michael@0: movdqa xmm0, XMMWORD [rbx+1*SIZEOF_XMMWORD] ; xmm0=row[ 0][1] michael@0: movdqa xmm1, XMMWORD [rcx+1*SIZEOF_XMMWORD] ; xmm1=row[-1][1] michael@0: movdqa xmm2, XMMWORD [rsi+1*SIZEOF_XMMWORD] ; xmm2=row[+1][1] michael@0: michael@0: pxor xmm3,xmm3 ; xmm3=(all 0's) michael@0: movdqa xmm4,xmm0 michael@0: punpcklbw xmm0,xmm3 ; xmm0=row[ 0]( 0 1 2 3 4 5 6 7) michael@0: punpckhbw xmm4,xmm3 ; xmm4=row[ 0]( 8 9 10 11 12 13 14 15) michael@0: movdqa xmm5,xmm1 michael@0: punpcklbw xmm1,xmm3 ; xmm1=row[-1]( 0 1 2 3 4 5 6 7) michael@0: punpckhbw xmm5,xmm3 ; xmm5=row[-1]( 8 9 10 11 12 13 14 15) michael@0: movdqa xmm6,xmm2 michael@0: punpcklbw xmm2,xmm3 ; xmm2=row[+1]( 0 1 2 3 4 5 6 7) michael@0: punpckhbw xmm6,xmm3 ; xmm6=row[+1]( 8 9 10 11 12 13 14 15) michael@0: michael@0: pmullw xmm0,[rel PW_THREE] michael@0: pmullw xmm4,[rel PW_THREE] michael@0: michael@0: paddw xmm1,xmm0 ; xmm1=Int0L=( 0 1 2 3 4 5 6 7) michael@0: paddw xmm5,xmm4 ; xmm5=Int0H=( 8 9 10 11 12 13 14 15) michael@0: paddw xmm2,xmm0 ; xmm2=Int1L=( 0 1 2 3 4 5 6 7) michael@0: paddw xmm6,xmm4 ; xmm6=Int1H=( 8 9 10 11 12 13 14 15) michael@0: michael@0: movdqa XMMWORD [rdx+2*SIZEOF_XMMWORD], xmm1 ; temporarily save michael@0: movdqa XMMWORD [rdx+3*SIZEOF_XMMWORD], xmm5 ; the intermediate data michael@0: movdqa XMMWORD [rdi+2*SIZEOF_XMMWORD], xmm2 michael@0: movdqa XMMWORD [rdi+3*SIZEOF_XMMWORD], xmm6 michael@0: michael@0: pslldq xmm1,(SIZEOF_XMMWORD-2) ; xmm1=(-- -- -- -- -- -- -- 0) michael@0: pslldq xmm2,(SIZEOF_XMMWORD-2) ; xmm2=(-- -- -- -- -- -- -- 0) michael@0: michael@0: movdqa XMMWORD [wk(2)], xmm1 michael@0: movdqa XMMWORD [wk(3)], xmm2 michael@0: michael@0: .upsample: michael@0: ; -- process the upper row michael@0: michael@0: movdqa xmm7, XMMWORD [rdx+0*SIZEOF_XMMWORD] michael@0: movdqa xmm3, XMMWORD [rdx+1*SIZEOF_XMMWORD] michael@0: michael@0: movdqa xmm0,xmm7 ; xmm7=Int0L=( 0 1 2 3 4 5 6 7) michael@0: movdqa xmm4,xmm3 ; xmm3=Int0H=( 8 9 10 11 12 13 14 15) michael@0: psrldq xmm0,2 ; xmm0=( 1 2 3 4 5 6 7 --) michael@0: pslldq xmm4,(SIZEOF_XMMWORD-2) ; xmm4=(-- -- -- -- -- -- -- 8) michael@0: movdqa xmm5,xmm7 michael@0: movdqa xmm6,xmm3 michael@0: psrldq xmm5,(SIZEOF_XMMWORD-2) ; xmm5=( 7 -- -- -- -- -- -- --) michael@0: pslldq xmm6,2 ; xmm6=(-- 8 9 10 11 12 13 14) michael@0: michael@0: por xmm0,xmm4 ; xmm0=( 1 2 3 4 5 6 7 8) michael@0: por xmm5,xmm6 ; xmm5=( 7 8 9 10 11 12 13 14) michael@0: michael@0: movdqa xmm1,xmm7 michael@0: movdqa xmm2,xmm3 michael@0: pslldq xmm1,2 ; xmm1=(-- 0 1 2 3 4 5 6) michael@0: psrldq xmm2,2 ; xmm2=( 9 10 11 12 13 14 15 --) michael@0: movdqa xmm4,xmm3 michael@0: psrldq xmm4,(SIZEOF_XMMWORD-2) ; xmm4=(15 -- -- -- -- -- -- --) michael@0: michael@0: por xmm1, XMMWORD [wk(0)] ; xmm1=(-1 0 1 2 3 4 5 6) michael@0: por xmm2, XMMWORD [wk(2)] ; xmm2=( 9 10 11 12 13 14 15 16) michael@0: michael@0: movdqa XMMWORD [wk(0)], xmm4 michael@0: michael@0: pmullw xmm7,[rel PW_THREE] michael@0: pmullw xmm3,[rel PW_THREE] michael@0: paddw xmm1,[rel PW_EIGHT] michael@0: paddw xmm5,[rel PW_EIGHT] michael@0: paddw xmm0,[rel PW_SEVEN] michael@0: paddw xmm2,[rel PW_SEVEN] michael@0: michael@0: paddw xmm1,xmm7 michael@0: paddw xmm5,xmm3 michael@0: psrlw xmm1,4 ; xmm1=Out0LE=( 0 2 4 6 8 10 12 14) michael@0: psrlw xmm5,4 ; xmm5=Out0HE=(16 18 20 22 24 26 28 30) michael@0: paddw xmm0,xmm7 michael@0: paddw xmm2,xmm3 michael@0: psrlw xmm0,4 ; xmm0=Out0LO=( 1 3 5 7 9 11 13 15) michael@0: psrlw xmm2,4 ; xmm2=Out0HO=(17 19 21 23 25 27 29 31) michael@0: michael@0: psllw xmm0,BYTE_BIT michael@0: psllw xmm2,BYTE_BIT michael@0: por xmm1,xmm0 ; xmm1=Out0L=( 0 1 2 ... 13 14 15) michael@0: por xmm5,xmm2 ; xmm5=Out0H=(16 17 18 ... 29 30 31) michael@0: michael@0: movdqa XMMWORD [rdx+0*SIZEOF_XMMWORD], xmm1 michael@0: movdqa XMMWORD [rdx+1*SIZEOF_XMMWORD], xmm5 michael@0: michael@0: ; -- process the lower row michael@0: michael@0: movdqa xmm6, XMMWORD [rdi+0*SIZEOF_XMMWORD] michael@0: movdqa xmm4, XMMWORD [rdi+1*SIZEOF_XMMWORD] michael@0: michael@0: movdqa xmm7,xmm6 ; xmm6=Int1L=( 0 1 2 3 4 5 6 7) michael@0: movdqa xmm3,xmm4 ; xmm4=Int1H=( 8 9 10 11 12 13 14 15) michael@0: psrldq xmm7,2 ; xmm7=( 1 2 3 4 5 6 7 --) michael@0: pslldq xmm3,(SIZEOF_XMMWORD-2) ; xmm3=(-- -- -- -- -- -- -- 8) michael@0: movdqa xmm0,xmm6 michael@0: movdqa xmm2,xmm4 michael@0: psrldq xmm0,(SIZEOF_XMMWORD-2) ; xmm0=( 7 -- -- -- -- -- -- --) michael@0: pslldq xmm2,2 ; xmm2=(-- 8 9 10 11 12 13 14) michael@0: michael@0: por xmm7,xmm3 ; xmm7=( 1 2 3 4 5 6 7 8) michael@0: por xmm0,xmm2 ; xmm0=( 7 8 9 10 11 12 13 14) michael@0: michael@0: movdqa xmm1,xmm6 michael@0: movdqa xmm5,xmm4 michael@0: pslldq xmm1,2 ; xmm1=(-- 0 1 2 3 4 5 6) michael@0: psrldq xmm5,2 ; xmm5=( 9 10 11 12 13 14 15 --) michael@0: movdqa xmm3,xmm4 michael@0: psrldq xmm3,(SIZEOF_XMMWORD-2) ; xmm3=(15 -- -- -- -- -- -- --) michael@0: michael@0: por xmm1, XMMWORD [wk(1)] ; xmm1=(-1 0 1 2 3 4 5 6) michael@0: por xmm5, XMMWORD [wk(3)] ; xmm5=( 9 10 11 12 13 14 15 16) michael@0: michael@0: movdqa XMMWORD [wk(1)], xmm3 michael@0: michael@0: pmullw xmm6,[rel PW_THREE] michael@0: pmullw xmm4,[rel PW_THREE] michael@0: paddw xmm1,[rel PW_EIGHT] michael@0: paddw xmm0,[rel PW_EIGHT] michael@0: paddw xmm7,[rel PW_SEVEN] michael@0: paddw xmm5,[rel PW_SEVEN] michael@0: michael@0: paddw xmm1,xmm6 michael@0: paddw xmm0,xmm4 michael@0: psrlw xmm1,4 ; xmm1=Out1LE=( 0 2 4 6 8 10 12 14) michael@0: psrlw xmm0,4 ; xmm0=Out1HE=(16 18 20 22 24 26 28 30) michael@0: paddw xmm7,xmm6 michael@0: paddw xmm5,xmm4 michael@0: psrlw xmm7,4 ; xmm7=Out1LO=( 1 3 5 7 9 11 13 15) michael@0: psrlw xmm5,4 ; xmm5=Out1HO=(17 19 21 23 25 27 29 31) michael@0: michael@0: psllw xmm7,BYTE_BIT michael@0: psllw xmm5,BYTE_BIT michael@0: por xmm1,xmm7 ; xmm1=Out1L=( 0 1 2 ... 13 14 15) michael@0: por xmm0,xmm5 ; xmm0=Out1H=(16 17 18 ... 29 30 31) michael@0: michael@0: movdqa XMMWORD [rdi+0*SIZEOF_XMMWORD], xmm1 michael@0: movdqa XMMWORD [rdi+1*SIZEOF_XMMWORD], xmm0 michael@0: michael@0: sub rax, byte SIZEOF_XMMWORD michael@0: add rcx, byte 1*SIZEOF_XMMWORD ; inptr1(above) michael@0: add rbx, byte 1*SIZEOF_XMMWORD ; inptr0 michael@0: add rsi, byte 1*SIZEOF_XMMWORD ; inptr1(below) michael@0: add rdx, byte 2*SIZEOF_XMMWORD ; outptr0 michael@0: add rdi, byte 2*SIZEOF_XMMWORD ; outptr1 michael@0: cmp rax, byte SIZEOF_XMMWORD michael@0: ja near .columnloop michael@0: test rax,rax michael@0: jnz near .columnloop_last michael@0: michael@0: pop rsi michael@0: pop rdi michael@0: pop rcx michael@0: pop rax michael@0: michael@0: add rsi, byte 1*SIZEOF_JSAMPROW ; input_data michael@0: add rdi, byte 2*SIZEOF_JSAMPROW ; output_data michael@0: sub rcx, byte 2 ; rowctr michael@0: jg near .rowloop michael@0: michael@0: .return: michael@0: pop rbx michael@0: uncollect_args michael@0: mov rsp,rbp ; rsp <- aligned rbp michael@0: pop rsp ; rsp <- original rbp michael@0: pop rbp michael@0: ret michael@0: michael@0: ; -------------------------------------------------------------------------- michael@0: ; michael@0: ; Fast processing for the common case of 2:1 horizontal and 1:1 vertical. michael@0: ; It's still a box filter. michael@0: ; michael@0: ; GLOBAL(void) michael@0: ; jsimd_h2v1_upsample_sse2 (int max_v_samp_factor, michael@0: ; JDIMENSION output_width, michael@0: ; JSAMPARRAY input_data, michael@0: ; JSAMPARRAY * output_data_ptr); michael@0: ; michael@0: michael@0: ; r10 = int max_v_samp_factor michael@0: ; r11 = JDIMENSION output_width michael@0: ; r12 = JSAMPARRAY input_data michael@0: ; r13 = JSAMPARRAY * output_data_ptr michael@0: michael@0: align 16 michael@0: global EXTN(jsimd_h2v1_upsample_sse2) michael@0: michael@0: EXTN(jsimd_h2v1_upsample_sse2): michael@0: push rbp michael@0: mov rax,rsp michael@0: mov rbp,rsp michael@0: collect_args michael@0: michael@0: mov rdx, r11 michael@0: add rdx, byte (2*SIZEOF_XMMWORD)-1 michael@0: and rdx, byte -(2*SIZEOF_XMMWORD) michael@0: jz near .return michael@0: michael@0: mov rcx, r10 ; rowctr michael@0: test rcx,rcx michael@0: jz short .return michael@0: michael@0: mov rsi, r12 ; input_data michael@0: mov rdi, r13 michael@0: mov rdi, JSAMPARRAY [rdi] ; output_data michael@0: .rowloop: michael@0: push rdi michael@0: push rsi michael@0: michael@0: mov rsi, JSAMPROW [rsi] ; inptr michael@0: mov rdi, JSAMPROW [rdi] ; outptr michael@0: mov rax,rdx ; colctr michael@0: .columnloop: michael@0: michael@0: movdqa xmm0, XMMWORD [rsi+0*SIZEOF_XMMWORD] michael@0: michael@0: movdqa xmm1,xmm0 michael@0: punpcklbw xmm0,xmm0 michael@0: punpckhbw xmm1,xmm1 michael@0: michael@0: movdqa XMMWORD [rdi+0*SIZEOF_XMMWORD], xmm0 michael@0: movdqa XMMWORD [rdi+1*SIZEOF_XMMWORD], xmm1 michael@0: michael@0: sub rax, byte 2*SIZEOF_XMMWORD michael@0: jz short .nextrow michael@0: michael@0: movdqa xmm2, XMMWORD [rsi+1*SIZEOF_XMMWORD] michael@0: michael@0: movdqa xmm3,xmm2 michael@0: punpcklbw xmm2,xmm2 michael@0: punpckhbw xmm3,xmm3 michael@0: michael@0: movdqa XMMWORD [rdi+2*SIZEOF_XMMWORD], xmm2 michael@0: movdqa XMMWORD [rdi+3*SIZEOF_XMMWORD], xmm3 michael@0: michael@0: sub rax, byte 2*SIZEOF_XMMWORD michael@0: jz short .nextrow michael@0: michael@0: add rsi, byte 2*SIZEOF_XMMWORD ; inptr michael@0: add rdi, byte 4*SIZEOF_XMMWORD ; outptr michael@0: jmp short .columnloop michael@0: michael@0: .nextrow: michael@0: pop rsi michael@0: pop rdi michael@0: michael@0: add rsi, byte SIZEOF_JSAMPROW ; input_data michael@0: add rdi, byte SIZEOF_JSAMPROW ; output_data michael@0: dec rcx ; rowctr michael@0: jg short .rowloop michael@0: michael@0: .return: michael@0: uncollect_args michael@0: pop rbp michael@0: ret michael@0: michael@0: ; -------------------------------------------------------------------------- michael@0: ; michael@0: ; Fast processing for the common case of 2:1 horizontal and 2:1 vertical. michael@0: ; It's still a box filter. michael@0: ; michael@0: ; GLOBAL(void) michael@0: ; jsimd_h2v2_upsample_sse2 (nt max_v_samp_factor, michael@0: ; JDIMENSION output_width, michael@0: ; JSAMPARRAY input_data, michael@0: ; JSAMPARRAY * output_data_ptr); michael@0: ; michael@0: michael@0: ; r10 = int max_v_samp_factor michael@0: ; r11 = JDIMENSION output_width michael@0: ; r12 = JSAMPARRAY input_data michael@0: ; r13 = JSAMPARRAY * output_data_ptr michael@0: michael@0: align 16 michael@0: global EXTN(jsimd_h2v2_upsample_sse2) michael@0: michael@0: EXTN(jsimd_h2v2_upsample_sse2): michael@0: push rbp michael@0: mov rax,rsp michael@0: mov rbp,rsp michael@0: collect_args michael@0: push rbx michael@0: michael@0: mov rdx, r11 michael@0: add rdx, byte (2*SIZEOF_XMMWORD)-1 michael@0: and rdx, byte -(2*SIZEOF_XMMWORD) michael@0: jz near .return michael@0: michael@0: mov rcx, r10 ; rowctr michael@0: test rcx,rcx michael@0: jz near .return michael@0: michael@0: mov rsi, r12 ; input_data michael@0: mov rdi, r13 michael@0: mov rdi, JSAMPARRAY [rdi] ; output_data michael@0: .rowloop: michael@0: push rdi michael@0: push rsi michael@0: michael@0: mov rsi, JSAMPROW [rsi] ; inptr michael@0: mov rbx, JSAMPROW [rdi+0*SIZEOF_JSAMPROW] ; outptr0 michael@0: mov rdi, JSAMPROW [rdi+1*SIZEOF_JSAMPROW] ; outptr1 michael@0: mov rax,rdx ; colctr michael@0: .columnloop: michael@0: michael@0: movdqa xmm0, XMMWORD [rsi+0*SIZEOF_XMMWORD] michael@0: michael@0: movdqa xmm1,xmm0 michael@0: punpcklbw xmm0,xmm0 michael@0: punpckhbw xmm1,xmm1 michael@0: michael@0: movdqa XMMWORD [rbx+0*SIZEOF_XMMWORD], xmm0 michael@0: movdqa XMMWORD [rbx+1*SIZEOF_XMMWORD], xmm1 michael@0: movdqa XMMWORD [rdi+0*SIZEOF_XMMWORD], xmm0 michael@0: movdqa XMMWORD [rdi+1*SIZEOF_XMMWORD], xmm1 michael@0: michael@0: sub rax, byte 2*SIZEOF_XMMWORD michael@0: jz short .nextrow michael@0: michael@0: movdqa xmm2, XMMWORD [rsi+1*SIZEOF_XMMWORD] michael@0: michael@0: movdqa xmm3,xmm2 michael@0: punpcklbw xmm2,xmm2 michael@0: punpckhbw xmm3,xmm3 michael@0: michael@0: movdqa XMMWORD [rbx+2*SIZEOF_XMMWORD], xmm2 michael@0: movdqa XMMWORD [rbx+3*SIZEOF_XMMWORD], xmm3 michael@0: movdqa XMMWORD [rdi+2*SIZEOF_XMMWORD], xmm2 michael@0: movdqa XMMWORD [rdi+3*SIZEOF_XMMWORD], xmm3 michael@0: michael@0: sub rax, byte 2*SIZEOF_XMMWORD michael@0: jz short .nextrow michael@0: michael@0: add rsi, byte 2*SIZEOF_XMMWORD ; inptr michael@0: add rbx, byte 4*SIZEOF_XMMWORD ; outptr0 michael@0: add rdi, byte 4*SIZEOF_XMMWORD ; outptr1 michael@0: jmp short .columnloop michael@0: michael@0: .nextrow: michael@0: pop rsi michael@0: pop rdi michael@0: michael@0: add rsi, byte 1*SIZEOF_JSAMPROW ; input_data michael@0: add rdi, byte 2*SIZEOF_JSAMPROW ; output_data michael@0: sub rcx, byte 2 ; rowctr michael@0: jg near .rowloop michael@0: michael@0: .return: michael@0: pop rbx michael@0: uncollect_args michael@0: pop rbp michael@0: ret michael@0: michael@0: ; For some reason, the OS X linker does not honor the request to align the michael@0: ; segment unless we do this. michael@0: align 16