michael@0: ; michael@0: ; jcclrss2-64.asm - colorspace conversion (64-bit SSE2) michael@0: ; michael@0: ; x86 SIMD extension for IJG JPEG library michael@0: ; Copyright (C) 1999-2006, MIYASAKA Masaru. michael@0: ; Copyright (C) 2009, D. R. Commander. michael@0: ; For conditions of distribution and use, see copyright notice in jsimdext.inc michael@0: ; michael@0: ; This file should be assembled with NASM (Netwide Assembler), michael@0: ; can *not* be assembled with Microsoft's MASM or any compatible michael@0: ; assembler (including Borland's Turbo Assembler). michael@0: ; NASM is available from http://nasm.sourceforge.net/ or michael@0: ; http://sourceforge.net/project/showfiles.php?group_id=6208 michael@0: ; michael@0: ; [TAB8] michael@0: michael@0: %include "jcolsamp.inc" michael@0: michael@0: ; -------------------------------------------------------------------------- michael@0: ; michael@0: ; Convert some rows of samples to the output colorspace. michael@0: ; michael@0: ; GLOBAL(void) michael@0: ; jsimd_rgb_ycc_convert_sse2 (JDIMENSION img_width, michael@0: ; JSAMPARRAY input_buf, JSAMPIMAGE output_buf, michael@0: ; JDIMENSION output_row, int num_rows); michael@0: ; michael@0: michael@0: ; r10 = JDIMENSION img_width michael@0: ; r11 = JSAMPARRAY input_buf michael@0: ; r12 = JSAMPIMAGE output_buf michael@0: ; r13 = JDIMENSION output_row michael@0: ; r14 = int num_rows michael@0: michael@0: %define wk(i) rbp-(WK_NUM-(i))*SIZEOF_XMMWORD ; xmmword wk[WK_NUM] michael@0: %define WK_NUM 8 michael@0: michael@0: align 16 michael@0: michael@0: global EXTN(jsimd_rgb_ycc_convert_sse2) michael@0: michael@0: EXTN(jsimd_rgb_ycc_convert_sse2): michael@0: push rbp michael@0: mov rax,rsp ; rax = original rbp michael@0: sub rsp, byte 4 michael@0: and rsp, byte (-SIZEOF_XMMWORD) ; align to 128 bits michael@0: mov [rsp],rax michael@0: mov rbp,rsp ; rbp = aligned rbp michael@0: lea rsp, [wk(0)] michael@0: collect_args michael@0: push rbx michael@0: michael@0: mov rcx, r10 michael@0: test rcx,rcx michael@0: jz near .return michael@0: michael@0: push rcx michael@0: michael@0: mov rsi, r12 michael@0: mov rcx, r13 michael@0: mov rdi, JSAMPARRAY [rsi+0*SIZEOF_JSAMPARRAY] michael@0: mov rbx, JSAMPARRAY [rsi+1*SIZEOF_JSAMPARRAY] michael@0: mov rdx, JSAMPARRAY [rsi+2*SIZEOF_JSAMPARRAY] michael@0: lea rdi, [rdi+rcx*SIZEOF_JSAMPROW] michael@0: lea rbx, [rbx+rcx*SIZEOF_JSAMPROW] michael@0: lea rdx, [rdx+rcx*SIZEOF_JSAMPROW] michael@0: michael@0: pop rcx michael@0: michael@0: mov rsi, r11 michael@0: mov eax, r14d michael@0: test rax,rax michael@0: jle near .return michael@0: .rowloop: michael@0: push rdx michael@0: push rbx michael@0: push rdi michael@0: push rsi michael@0: push rcx ; col michael@0: michael@0: mov rsi, JSAMPROW [rsi] ; inptr michael@0: mov rdi, JSAMPROW [rdi] ; outptr0 michael@0: mov rbx, JSAMPROW [rbx] ; outptr1 michael@0: mov rdx, JSAMPROW [rdx] ; outptr2 michael@0: michael@0: cmp rcx, byte SIZEOF_XMMWORD michael@0: jae near .columnloop michael@0: michael@0: %if RGB_PIXELSIZE == 3 ; --------------- michael@0: michael@0: .column_ld1: michael@0: push rax michael@0: push rdx michael@0: lea rcx,[rcx+rcx*2] ; imul ecx,RGB_PIXELSIZE michael@0: test cl, SIZEOF_BYTE michael@0: jz short .column_ld2 michael@0: sub rcx, byte SIZEOF_BYTE michael@0: movzx rax, BYTE [rsi+rcx] michael@0: .column_ld2: michael@0: test cl, SIZEOF_WORD michael@0: jz short .column_ld4 michael@0: sub rcx, byte SIZEOF_WORD michael@0: movzx rdx, WORD [rsi+rcx] michael@0: shl rax, WORD_BIT michael@0: or rax,rdx michael@0: .column_ld4: michael@0: movd xmmA,eax michael@0: pop rdx michael@0: pop rax michael@0: test cl, SIZEOF_DWORD michael@0: jz short .column_ld8 michael@0: sub rcx, byte SIZEOF_DWORD michael@0: movd xmmF, XMM_DWORD [rsi+rcx] michael@0: pslldq xmmA, SIZEOF_DWORD michael@0: por xmmA,xmmF michael@0: .column_ld8: michael@0: test cl, SIZEOF_MMWORD michael@0: jz short .column_ld16 michael@0: sub rcx, byte SIZEOF_MMWORD michael@0: movq xmmB, XMM_MMWORD [rsi+rcx] michael@0: pslldq xmmA, SIZEOF_MMWORD michael@0: por xmmA,xmmB michael@0: .column_ld16: michael@0: test cl, SIZEOF_XMMWORD michael@0: jz short .column_ld32 michael@0: movdqa xmmF,xmmA michael@0: movdqu xmmA, XMMWORD [rsi+0*SIZEOF_XMMWORD] michael@0: mov rcx, SIZEOF_XMMWORD michael@0: jmp short .rgb_ycc_cnv michael@0: .column_ld32: michael@0: test cl, 2*SIZEOF_XMMWORD michael@0: mov rcx, SIZEOF_XMMWORD michael@0: jz short .rgb_ycc_cnv michael@0: movdqa xmmB,xmmA michael@0: movdqu xmmA, XMMWORD [rsi+0*SIZEOF_XMMWORD] michael@0: movdqu xmmF, XMMWORD [rsi+1*SIZEOF_XMMWORD] michael@0: jmp short .rgb_ycc_cnv michael@0: michael@0: .columnloop: michael@0: movdqu xmmA, XMMWORD [rsi+0*SIZEOF_XMMWORD] michael@0: movdqu xmmF, XMMWORD [rsi+1*SIZEOF_XMMWORD] michael@0: movdqu xmmB, XMMWORD [rsi+2*SIZEOF_XMMWORD] michael@0: michael@0: .rgb_ycc_cnv: michael@0: ; xmmA=(00 10 20 01 11 21 02 12 22 03 13 23 04 14 24 05) michael@0: ; xmmF=(15 25 06 16 26 07 17 27 08 18 28 09 19 29 0A 1A) michael@0: ; xmmB=(2A 0B 1B 2B 0C 1C 2C 0D 1D 2D 0E 1E 2E 0F 1F 2F) michael@0: michael@0: movdqa xmmG,xmmA michael@0: pslldq xmmA,8 ; xmmA=(-- -- -- -- -- -- -- -- 00 10 20 01 11 21 02 12) michael@0: psrldq xmmG,8 ; xmmG=(22 03 13 23 04 14 24 05 -- -- -- -- -- -- -- --) michael@0: michael@0: punpckhbw xmmA,xmmF ; xmmA=(00 08 10 18 20 28 01 09 11 19 21 29 02 0A 12 1A) michael@0: pslldq xmmF,8 ; xmmF=(-- -- -- -- -- -- -- -- 15 25 06 16 26 07 17 27) michael@0: michael@0: punpcklbw xmmG,xmmB ; xmmG=(22 2A 03 0B 13 1B 23 2B 04 0C 14 1C 24 2C 05 0D) michael@0: punpckhbw xmmF,xmmB ; xmmF=(15 1D 25 2D 06 0E 16 1E 26 2E 07 0F 17 1F 27 2F) michael@0: michael@0: movdqa xmmD,xmmA michael@0: pslldq xmmA,8 ; xmmA=(-- -- -- -- -- -- -- -- 00 08 10 18 20 28 01 09) michael@0: psrldq xmmD,8 ; xmmD=(11 19 21 29 02 0A 12 1A -- -- -- -- -- -- -- --) michael@0: michael@0: punpckhbw xmmA,xmmG ; xmmA=(00 04 08 0C 10 14 18 1C 20 24 28 2C 01 05 09 0D) michael@0: pslldq xmmG,8 ; xmmG=(-- -- -- -- -- -- -- -- 22 2A 03 0B 13 1B 23 2B) michael@0: michael@0: punpcklbw xmmD,xmmF ; xmmD=(11 15 19 1D 21 25 29 2D 02 06 0A 0E 12 16 1A 1E) michael@0: punpckhbw xmmG,xmmF ; xmmG=(22 26 2A 2E 03 07 0B 0F 13 17 1B 1F 23 27 2B 2F) michael@0: michael@0: movdqa xmmE,xmmA michael@0: pslldq xmmA,8 ; xmmA=(-- -- -- -- -- -- -- -- 00 04 08 0C 10 14 18 1C) michael@0: psrldq xmmE,8 ; xmmE=(20 24 28 2C 01 05 09 0D -- -- -- -- -- -- -- --) michael@0: michael@0: punpckhbw xmmA,xmmD ; xmmA=(00 02 04 06 08 0A 0C 0E 10 12 14 16 18 1A 1C 1E) michael@0: pslldq xmmD,8 ; xmmD=(-- -- -- -- -- -- -- -- 11 15 19 1D 21 25 29 2D) michael@0: michael@0: punpcklbw xmmE,xmmG ; xmmE=(20 22 24 26 28 2A 2C 2E 01 03 05 07 09 0B 0D 0F) michael@0: punpckhbw xmmD,xmmG ; xmmD=(11 13 15 17 19 1B 1D 1F 21 23 25 27 29 2B 2D 2F) michael@0: michael@0: pxor xmmH,xmmH michael@0: michael@0: movdqa xmmC,xmmA michael@0: punpcklbw xmmA,xmmH ; xmmA=(00 02 04 06 08 0A 0C 0E) michael@0: punpckhbw xmmC,xmmH ; xmmC=(10 12 14 16 18 1A 1C 1E) michael@0: michael@0: movdqa xmmB,xmmE michael@0: punpcklbw xmmE,xmmH ; xmmE=(20 22 24 26 28 2A 2C 2E) michael@0: punpckhbw xmmB,xmmH ; xmmB=(01 03 05 07 09 0B 0D 0F) michael@0: michael@0: movdqa xmmF,xmmD michael@0: punpcklbw xmmD,xmmH ; xmmD=(11 13 15 17 19 1B 1D 1F) michael@0: punpckhbw xmmF,xmmH ; xmmF=(21 23 25 27 29 2B 2D 2F) michael@0: michael@0: %else ; RGB_PIXELSIZE == 4 ; ----------- michael@0: michael@0: .column_ld1: michael@0: test cl, SIZEOF_XMMWORD/16 michael@0: jz short .column_ld2 michael@0: sub rcx, byte SIZEOF_XMMWORD/16 michael@0: movd xmmA, XMM_DWORD [rsi+rcx*RGB_PIXELSIZE] michael@0: .column_ld2: michael@0: test cl, SIZEOF_XMMWORD/8 michael@0: jz short .column_ld4 michael@0: sub rcx, byte SIZEOF_XMMWORD/8 michael@0: movq xmmE, XMM_MMWORD [rsi+rcx*RGB_PIXELSIZE] michael@0: pslldq xmmA, SIZEOF_MMWORD michael@0: por xmmA,xmmE michael@0: .column_ld4: michael@0: test cl, SIZEOF_XMMWORD/4 michael@0: jz short .column_ld8 michael@0: sub rcx, byte SIZEOF_XMMWORD/4 michael@0: movdqa xmmE,xmmA michael@0: movdqu xmmA, XMMWORD [rsi+rcx*RGB_PIXELSIZE] michael@0: .column_ld8: michael@0: test cl, SIZEOF_XMMWORD/2 michael@0: mov rcx, SIZEOF_XMMWORD michael@0: jz short .rgb_ycc_cnv michael@0: movdqa xmmF,xmmA michael@0: movdqa xmmH,xmmE michael@0: movdqu xmmA, XMMWORD [rsi+0*SIZEOF_XMMWORD] michael@0: movdqu xmmE, XMMWORD [rsi+1*SIZEOF_XMMWORD] michael@0: jmp short .rgb_ycc_cnv michael@0: michael@0: .columnloop: michael@0: movdqu xmmA, XMMWORD [rsi+0*SIZEOF_XMMWORD] michael@0: movdqu xmmE, XMMWORD [rsi+1*SIZEOF_XMMWORD] michael@0: movdqu xmmF, XMMWORD [rsi+2*SIZEOF_XMMWORD] michael@0: movdqu xmmH, XMMWORD [rsi+3*SIZEOF_XMMWORD] michael@0: michael@0: .rgb_ycc_cnv: michael@0: ; xmmA=(00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33) michael@0: ; xmmE=(04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37) michael@0: ; xmmF=(08 18 28 38 09 19 29 39 0A 1A 2A 3A 0B 1B 2B 3B) michael@0: ; xmmH=(0C 1C 2C 3C 0D 1D 2D 3D 0E 1E 2E 3E 0F 1F 2F 3F) michael@0: michael@0: movdqa xmmD,xmmA michael@0: punpcklbw xmmA,xmmE ; xmmA=(00 04 10 14 20 24 30 34 01 05 11 15 21 25 31 35) michael@0: punpckhbw xmmD,xmmE ; xmmD=(02 06 12 16 22 26 32 36 03 07 13 17 23 27 33 37) michael@0: michael@0: movdqa xmmC,xmmF michael@0: punpcklbw xmmF,xmmH ; xmmF=(08 0C 18 1C 28 2C 38 3C 09 0D 19 1D 29 2D 39 3D) michael@0: punpckhbw xmmC,xmmH ; xmmC=(0A 0E 1A 1E 2A 2E 3A 3E 0B 0F 1B 1F 2B 2F 3B 3F) michael@0: michael@0: movdqa xmmB,xmmA michael@0: punpcklwd xmmA,xmmF ; xmmA=(00 04 08 0C 10 14 18 1C 20 24 28 2C 30 34 38 3C) michael@0: punpckhwd xmmB,xmmF ; xmmB=(01 05 09 0D 11 15 19 1D 21 25 29 2D 31 35 39 3D) michael@0: michael@0: movdqa xmmG,xmmD michael@0: punpcklwd xmmD,xmmC ; xmmD=(02 06 0A 0E 12 16 1A 1E 22 26 2A 2E 32 36 3A 3E) michael@0: punpckhwd xmmG,xmmC ; xmmG=(03 07 0B 0F 13 17 1B 1F 23 27 2B 2F 33 37 3B 3F) michael@0: michael@0: movdqa xmmE,xmmA michael@0: punpcklbw xmmA,xmmD ; xmmA=(00 02 04 06 08 0A 0C 0E 10 12 14 16 18 1A 1C 1E) michael@0: punpckhbw xmmE,xmmD ; xmmE=(20 22 24 26 28 2A 2C 2E 30 32 34 36 38 3A 3C 3E) michael@0: michael@0: movdqa xmmH,xmmB michael@0: punpcklbw xmmB,xmmG ; xmmB=(01 03 05 07 09 0B 0D 0F 11 13 15 17 19 1B 1D 1F) michael@0: punpckhbw xmmH,xmmG ; xmmH=(21 23 25 27 29 2B 2D 2F 31 33 35 37 39 3B 3D 3F) michael@0: michael@0: pxor xmmF,xmmF michael@0: michael@0: movdqa xmmC,xmmA michael@0: punpcklbw xmmA,xmmF ; xmmA=(00 02 04 06 08 0A 0C 0E) michael@0: punpckhbw xmmC,xmmF ; xmmC=(10 12 14 16 18 1A 1C 1E) michael@0: michael@0: movdqa xmmD,xmmB michael@0: punpcklbw xmmB,xmmF ; xmmB=(01 03 05 07 09 0B 0D 0F) michael@0: punpckhbw xmmD,xmmF ; xmmD=(11 13 15 17 19 1B 1D 1F) michael@0: michael@0: movdqa xmmG,xmmE michael@0: punpcklbw xmmE,xmmF ; xmmE=(20 22 24 26 28 2A 2C 2E) michael@0: punpckhbw xmmG,xmmF ; xmmG=(30 32 34 36 38 3A 3C 3E) michael@0: michael@0: punpcklbw xmmF,xmmH michael@0: punpckhbw xmmH,xmmH michael@0: psrlw xmmF,BYTE_BIT ; xmmF=(21 23 25 27 29 2B 2D 2F) michael@0: psrlw xmmH,BYTE_BIT ; xmmH=(31 33 35 37 39 3B 3D 3F) michael@0: michael@0: %endif ; RGB_PIXELSIZE ; --------------- michael@0: michael@0: ; xmm0=R(02468ACE)=RE, xmm2=G(02468ACE)=GE, xmm4=B(02468ACE)=BE michael@0: ; xmm1=R(13579BDF)=RO, xmm3=G(13579BDF)=GO, xmm5=B(13579BDF)=BO michael@0: michael@0: ; (Original) michael@0: ; Y = 0.29900 * R + 0.58700 * G + 0.11400 * B michael@0: ; Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B + CENTERJSAMPLE michael@0: ; Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B + CENTERJSAMPLE michael@0: ; michael@0: ; (This implementation) michael@0: ; Y = 0.29900 * R + 0.33700 * G + 0.11400 * B + 0.25000 * G michael@0: ; Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B + CENTERJSAMPLE michael@0: ; Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B + CENTERJSAMPLE michael@0: michael@0: movdqa XMMWORD [wk(0)], xmm0 ; wk(0)=RE michael@0: movdqa XMMWORD [wk(1)], xmm1 ; wk(1)=RO michael@0: movdqa XMMWORD [wk(2)], xmm4 ; wk(2)=BE michael@0: movdqa XMMWORD [wk(3)], xmm5 ; wk(3)=BO michael@0: michael@0: movdqa xmm6,xmm1 michael@0: punpcklwd xmm1,xmm3 michael@0: punpckhwd xmm6,xmm3 michael@0: movdqa xmm7,xmm1 michael@0: movdqa xmm4,xmm6 michael@0: pmaddwd xmm1,[rel PW_F0299_F0337] ; xmm1=ROL*FIX(0.299)+GOL*FIX(0.337) michael@0: pmaddwd xmm6,[rel PW_F0299_F0337] ; xmm6=ROH*FIX(0.299)+GOH*FIX(0.337) michael@0: pmaddwd xmm7,[rel PW_MF016_MF033] ; xmm7=ROL*-FIX(0.168)+GOL*-FIX(0.331) michael@0: pmaddwd xmm4,[rel PW_MF016_MF033] ; xmm4=ROH*-FIX(0.168)+GOH*-FIX(0.331) michael@0: michael@0: movdqa XMMWORD [wk(4)], xmm1 ; wk(4)=ROL*FIX(0.299)+GOL*FIX(0.337) michael@0: movdqa XMMWORD [wk(5)], xmm6 ; wk(5)=ROH*FIX(0.299)+GOH*FIX(0.337) michael@0: michael@0: pxor xmm1,xmm1 michael@0: pxor xmm6,xmm6 michael@0: punpcklwd xmm1,xmm5 ; xmm1=BOL michael@0: punpckhwd xmm6,xmm5 ; xmm6=BOH michael@0: psrld xmm1,1 ; xmm1=BOL*FIX(0.500) michael@0: psrld xmm6,1 ; xmm6=BOH*FIX(0.500) michael@0: michael@0: movdqa xmm5,[rel PD_ONEHALFM1_CJ] ; xmm5=[PD_ONEHALFM1_CJ] michael@0: michael@0: paddd xmm7,xmm1 michael@0: paddd xmm4,xmm6 michael@0: paddd xmm7,xmm5 michael@0: paddd xmm4,xmm5 michael@0: psrld xmm7,SCALEBITS ; xmm7=CbOL michael@0: psrld xmm4,SCALEBITS ; xmm4=CbOH michael@0: packssdw xmm7,xmm4 ; xmm7=CbO michael@0: michael@0: movdqa xmm1, XMMWORD [wk(2)] ; xmm1=BE michael@0: michael@0: movdqa xmm6,xmm0 michael@0: punpcklwd xmm0,xmm2 michael@0: punpckhwd xmm6,xmm2 michael@0: movdqa xmm5,xmm0 michael@0: movdqa xmm4,xmm6 michael@0: pmaddwd xmm0,[rel PW_F0299_F0337] ; xmm0=REL*FIX(0.299)+GEL*FIX(0.337) michael@0: pmaddwd xmm6,[rel PW_F0299_F0337] ; xmm6=REH*FIX(0.299)+GEH*FIX(0.337) michael@0: pmaddwd xmm5,[rel PW_MF016_MF033] ; xmm5=REL*-FIX(0.168)+GEL*-FIX(0.331) michael@0: pmaddwd xmm4,[rel PW_MF016_MF033] ; xmm4=REH*-FIX(0.168)+GEH*-FIX(0.331) michael@0: michael@0: movdqa XMMWORD [wk(6)], xmm0 ; wk(6)=REL*FIX(0.299)+GEL*FIX(0.337) michael@0: movdqa XMMWORD [wk(7)], xmm6 ; wk(7)=REH*FIX(0.299)+GEH*FIX(0.337) michael@0: michael@0: pxor xmm0,xmm0 michael@0: pxor xmm6,xmm6 michael@0: punpcklwd xmm0,xmm1 ; xmm0=BEL michael@0: punpckhwd xmm6,xmm1 ; xmm6=BEH michael@0: psrld xmm0,1 ; xmm0=BEL*FIX(0.500) michael@0: psrld xmm6,1 ; xmm6=BEH*FIX(0.500) michael@0: michael@0: movdqa xmm1,[rel PD_ONEHALFM1_CJ] ; xmm1=[PD_ONEHALFM1_CJ] michael@0: michael@0: paddd xmm5,xmm0 michael@0: paddd xmm4,xmm6 michael@0: paddd xmm5,xmm1 michael@0: paddd xmm4,xmm1 michael@0: psrld xmm5,SCALEBITS ; xmm5=CbEL michael@0: psrld xmm4,SCALEBITS ; xmm4=CbEH michael@0: packssdw xmm5,xmm4 ; xmm5=CbE michael@0: michael@0: psllw xmm7,BYTE_BIT michael@0: por xmm5,xmm7 ; xmm5=Cb michael@0: movdqa XMMWORD [rbx], xmm5 ; Save Cb michael@0: michael@0: movdqa xmm0, XMMWORD [wk(3)] ; xmm0=BO michael@0: movdqa xmm6, XMMWORD [wk(2)] ; xmm6=BE michael@0: movdqa xmm1, XMMWORD [wk(1)] ; xmm1=RO michael@0: michael@0: movdqa xmm4,xmm0 michael@0: punpcklwd xmm0,xmm3 michael@0: punpckhwd xmm4,xmm3 michael@0: movdqa xmm7,xmm0 michael@0: movdqa xmm5,xmm4 michael@0: pmaddwd xmm0,[rel PW_F0114_F0250] ; xmm0=BOL*FIX(0.114)+GOL*FIX(0.250) michael@0: pmaddwd xmm4,[rel PW_F0114_F0250] ; xmm4=BOH*FIX(0.114)+GOH*FIX(0.250) michael@0: pmaddwd xmm7,[rel PW_MF008_MF041] ; xmm7=BOL*-FIX(0.081)+GOL*-FIX(0.418) michael@0: pmaddwd xmm5,[rel PW_MF008_MF041] ; xmm5=BOH*-FIX(0.081)+GOH*-FIX(0.418) michael@0: michael@0: movdqa xmm3,[rel PD_ONEHALF] ; xmm3=[PD_ONEHALF] michael@0: michael@0: paddd xmm0, XMMWORD [wk(4)] michael@0: paddd xmm4, XMMWORD [wk(5)] michael@0: paddd xmm0,xmm3 michael@0: paddd xmm4,xmm3 michael@0: psrld xmm0,SCALEBITS ; xmm0=YOL michael@0: psrld xmm4,SCALEBITS ; xmm4=YOH michael@0: packssdw xmm0,xmm4 ; xmm0=YO michael@0: michael@0: pxor xmm3,xmm3 michael@0: pxor xmm4,xmm4 michael@0: punpcklwd xmm3,xmm1 ; xmm3=ROL michael@0: punpckhwd xmm4,xmm1 ; xmm4=ROH michael@0: psrld xmm3,1 ; xmm3=ROL*FIX(0.500) michael@0: psrld xmm4,1 ; xmm4=ROH*FIX(0.500) michael@0: michael@0: movdqa xmm1,[rel PD_ONEHALFM1_CJ] ; xmm1=[PD_ONEHALFM1_CJ] michael@0: michael@0: paddd xmm7,xmm3 michael@0: paddd xmm5,xmm4 michael@0: paddd xmm7,xmm1 michael@0: paddd xmm5,xmm1 michael@0: psrld xmm7,SCALEBITS ; xmm7=CrOL michael@0: psrld xmm5,SCALEBITS ; xmm5=CrOH michael@0: packssdw xmm7,xmm5 ; xmm7=CrO michael@0: michael@0: movdqa xmm3, XMMWORD [wk(0)] ; xmm3=RE michael@0: michael@0: movdqa xmm4,xmm6 michael@0: punpcklwd xmm6,xmm2 michael@0: punpckhwd xmm4,xmm2 michael@0: movdqa xmm1,xmm6 michael@0: movdqa xmm5,xmm4 michael@0: pmaddwd xmm6,[rel PW_F0114_F0250] ; xmm6=BEL*FIX(0.114)+GEL*FIX(0.250) michael@0: pmaddwd xmm4,[rel PW_F0114_F0250] ; xmm4=BEH*FIX(0.114)+GEH*FIX(0.250) michael@0: pmaddwd xmm1,[rel PW_MF008_MF041] ; xmm1=BEL*-FIX(0.081)+GEL*-FIX(0.418) michael@0: pmaddwd xmm5,[rel PW_MF008_MF041] ; xmm5=BEH*-FIX(0.081)+GEH*-FIX(0.418) michael@0: michael@0: movdqa xmm2,[rel PD_ONEHALF] ; xmm2=[PD_ONEHALF] michael@0: michael@0: paddd xmm6, XMMWORD [wk(6)] michael@0: paddd xmm4, XMMWORD [wk(7)] michael@0: paddd xmm6,xmm2 michael@0: paddd xmm4,xmm2 michael@0: psrld xmm6,SCALEBITS ; xmm6=YEL michael@0: psrld xmm4,SCALEBITS ; xmm4=YEH michael@0: packssdw xmm6,xmm4 ; xmm6=YE michael@0: michael@0: psllw xmm0,BYTE_BIT michael@0: por xmm6,xmm0 ; xmm6=Y michael@0: movdqa XMMWORD [rdi], xmm6 ; Save Y michael@0: michael@0: pxor xmm2,xmm2 michael@0: pxor xmm4,xmm4 michael@0: punpcklwd xmm2,xmm3 ; xmm2=REL michael@0: punpckhwd xmm4,xmm3 ; xmm4=REH michael@0: psrld xmm2,1 ; xmm2=REL*FIX(0.500) michael@0: psrld xmm4,1 ; xmm4=REH*FIX(0.500) michael@0: michael@0: movdqa xmm0,[rel PD_ONEHALFM1_CJ] ; xmm0=[PD_ONEHALFM1_CJ] michael@0: michael@0: paddd xmm1,xmm2 michael@0: paddd xmm5,xmm4 michael@0: paddd xmm1,xmm0 michael@0: paddd xmm5,xmm0 michael@0: psrld xmm1,SCALEBITS ; xmm1=CrEL michael@0: psrld xmm5,SCALEBITS ; xmm5=CrEH michael@0: packssdw xmm1,xmm5 ; xmm1=CrE michael@0: michael@0: psllw xmm7,BYTE_BIT michael@0: por xmm1,xmm7 ; xmm1=Cr michael@0: movdqa XMMWORD [rdx], xmm1 ; Save Cr michael@0: michael@0: sub rcx, byte SIZEOF_XMMWORD michael@0: add rsi, byte RGB_PIXELSIZE*SIZEOF_XMMWORD ; inptr michael@0: add rdi, byte SIZEOF_XMMWORD ; outptr0 michael@0: add rbx, byte SIZEOF_XMMWORD ; outptr1 michael@0: add rdx, byte SIZEOF_XMMWORD ; outptr2 michael@0: cmp rcx, byte SIZEOF_XMMWORD michael@0: jae near .columnloop michael@0: test rcx,rcx michael@0: jnz near .column_ld1 michael@0: michael@0: pop rcx ; col michael@0: pop rsi michael@0: pop rdi michael@0: pop rbx michael@0: pop rdx michael@0: michael@0: add rsi, byte SIZEOF_JSAMPROW ; input_buf michael@0: add rdi, byte SIZEOF_JSAMPROW michael@0: add rbx, byte SIZEOF_JSAMPROW michael@0: add rdx, byte SIZEOF_JSAMPROW michael@0: dec rax ; num_rows michael@0: jg near .rowloop michael@0: michael@0: .return: michael@0: pop rbx michael@0: uncollect_args michael@0: mov rsp,rbp ; rsp <- aligned rbp michael@0: pop rsp ; rsp <- original rbp michael@0: pop rbp michael@0: ret michael@0: michael@0: ; For some reason, the OS X linker does not honor the request to align the michael@0: ; segment unless we do this. michael@0: align 16