michael@0: ; michael@0: ; jcqnts2f-64.asm - sample data conversion and quantization (64-bit SSE & SSE2) michael@0: ; michael@0: ; Copyright 2009 Pierre Ossman for Cendio AB michael@0: ; Copyright 2009 D. R. Commander michael@0: ; michael@0: ; Based on michael@0: ; x86 SIMD extension for IJG JPEG library michael@0: ; Copyright (C) 1999-2006, MIYASAKA Masaru. michael@0: ; For conditions of distribution and use, see copyright notice in jsimdext.inc michael@0: ; michael@0: ; This file should be assembled with NASM (Netwide Assembler), michael@0: ; can *not* be assembled with Microsoft's MASM or any compatible michael@0: ; assembler (including Borland's Turbo Assembler). michael@0: ; NASM is available from http://nasm.sourceforge.net/ or michael@0: ; http://sourceforge.net/project/showfiles.php?group_id=6208 michael@0: ; michael@0: ; [TAB8] michael@0: michael@0: %include "jsimdext.inc" michael@0: %include "jdct.inc" michael@0: michael@0: ; -------------------------------------------------------------------------- michael@0: SECTION SEG_TEXT michael@0: BITS 64 michael@0: ; michael@0: ; Load data into workspace, applying unsigned->signed conversion michael@0: ; michael@0: ; GLOBAL(void) michael@0: ; jsimd_convsamp_float_sse2 (JSAMPARRAY sample_data, JDIMENSION start_col, michael@0: ; FAST_FLOAT * workspace); michael@0: ; michael@0: michael@0: ; r10 = JSAMPARRAY sample_data michael@0: ; r11 = JDIMENSION start_col michael@0: ; r12 = FAST_FLOAT * workspace michael@0: michael@0: align 16 michael@0: global EXTN(jsimd_convsamp_float_sse2) michael@0: michael@0: EXTN(jsimd_convsamp_float_sse2): michael@0: push rbp michael@0: mov rax,rsp michael@0: mov rbp,rsp michael@0: collect_args michael@0: push rbx michael@0: michael@0: pcmpeqw xmm7,xmm7 michael@0: psllw xmm7,7 michael@0: packsswb xmm7,xmm7 ; xmm7 = PB_CENTERJSAMPLE (0x808080..) michael@0: michael@0: mov rsi, r10 michael@0: mov rax, r11 michael@0: mov rdi, r12 michael@0: mov rcx, DCTSIZE/2 michael@0: .convloop: michael@0: mov rbx, JSAMPROW [rsi+0*SIZEOF_JSAMPROW] ; (JSAMPLE *) michael@0: mov rdx, JSAMPROW [rsi+1*SIZEOF_JSAMPROW] ; (JSAMPLE *) michael@0: michael@0: movq xmm0, XMM_MMWORD [rbx+rax*SIZEOF_JSAMPLE] michael@0: movq xmm1, XMM_MMWORD [rdx+rax*SIZEOF_JSAMPLE] michael@0: michael@0: psubb xmm0,xmm7 ; xmm0=(01234567) michael@0: psubb xmm1,xmm7 ; xmm1=(89ABCDEF) michael@0: michael@0: punpcklbw xmm0,xmm0 ; xmm0=(*0*1*2*3*4*5*6*7) michael@0: punpcklbw xmm1,xmm1 ; xmm1=(*8*9*A*B*C*D*E*F) michael@0: michael@0: punpcklwd xmm2,xmm0 ; xmm2=(***0***1***2***3) michael@0: punpckhwd xmm0,xmm0 ; xmm0=(***4***5***6***7) michael@0: punpcklwd xmm3,xmm1 ; xmm3=(***8***9***A***B) michael@0: punpckhwd xmm1,xmm1 ; xmm1=(***C***D***E***F) michael@0: michael@0: psrad xmm2,(DWORD_BIT-BYTE_BIT) ; xmm2=(0123) michael@0: psrad xmm0,(DWORD_BIT-BYTE_BIT) ; xmm0=(4567) michael@0: cvtdq2ps xmm2,xmm2 ; xmm2=(0123) michael@0: cvtdq2ps xmm0,xmm0 ; xmm0=(4567) michael@0: psrad xmm3,(DWORD_BIT-BYTE_BIT) ; xmm3=(89AB) michael@0: psrad xmm1,(DWORD_BIT-BYTE_BIT) ; xmm1=(CDEF) michael@0: cvtdq2ps xmm3,xmm3 ; xmm3=(89AB) michael@0: cvtdq2ps xmm1,xmm1 ; xmm1=(CDEF) michael@0: michael@0: movaps XMMWORD [XMMBLOCK(0,0,rdi,SIZEOF_FAST_FLOAT)], xmm2 michael@0: movaps XMMWORD [XMMBLOCK(0,1,rdi,SIZEOF_FAST_FLOAT)], xmm0 michael@0: movaps XMMWORD [XMMBLOCK(1,0,rdi,SIZEOF_FAST_FLOAT)], xmm3 michael@0: movaps XMMWORD [XMMBLOCK(1,1,rdi,SIZEOF_FAST_FLOAT)], xmm1 michael@0: michael@0: add rsi, byte 2*SIZEOF_JSAMPROW michael@0: add rdi, byte 2*DCTSIZE*SIZEOF_FAST_FLOAT michael@0: dec rcx michael@0: jnz short .convloop michael@0: michael@0: pop rbx michael@0: uncollect_args michael@0: pop rbp michael@0: ret michael@0: michael@0: michael@0: ; -------------------------------------------------------------------------- michael@0: ; michael@0: ; Quantize/descale the coefficients, and store into coef_block michael@0: ; michael@0: ; GLOBAL(void) michael@0: ; jsimd_quantize_float_sse2 (JCOEFPTR coef_block, FAST_FLOAT * divisors, michael@0: ; FAST_FLOAT * workspace); michael@0: ; michael@0: michael@0: ; r10 = JCOEFPTR coef_block michael@0: ; r11 = FAST_FLOAT * divisors michael@0: ; r12 = FAST_FLOAT * workspace michael@0: michael@0: align 16 michael@0: global EXTN(jsimd_quantize_float_sse2) michael@0: michael@0: EXTN(jsimd_quantize_float_sse2): michael@0: push rbp michael@0: mov rax,rsp michael@0: mov rbp,rsp michael@0: collect_args michael@0: michael@0: mov rsi, r12 michael@0: mov rdx, r11 michael@0: mov rdi, r10 michael@0: mov rax, DCTSIZE2/16 michael@0: .quantloop: michael@0: movaps xmm0, XMMWORD [XMMBLOCK(0,0,rsi,SIZEOF_FAST_FLOAT)] michael@0: movaps xmm1, XMMWORD [XMMBLOCK(0,1,rsi,SIZEOF_FAST_FLOAT)] michael@0: mulps xmm0, XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_FAST_FLOAT)] michael@0: mulps xmm1, XMMWORD [XMMBLOCK(0,1,rdx,SIZEOF_FAST_FLOAT)] michael@0: movaps xmm2, XMMWORD [XMMBLOCK(1,0,rsi,SIZEOF_FAST_FLOAT)] michael@0: movaps xmm3, XMMWORD [XMMBLOCK(1,1,rsi,SIZEOF_FAST_FLOAT)] michael@0: mulps xmm2, XMMWORD [XMMBLOCK(1,0,rdx,SIZEOF_FAST_FLOAT)] michael@0: mulps xmm3, XMMWORD [XMMBLOCK(1,1,rdx,SIZEOF_FAST_FLOAT)] michael@0: michael@0: cvtps2dq xmm0,xmm0 michael@0: cvtps2dq xmm1,xmm1 michael@0: cvtps2dq xmm2,xmm2 michael@0: cvtps2dq xmm3,xmm3 michael@0: michael@0: packssdw xmm0,xmm1 michael@0: packssdw xmm2,xmm3 michael@0: michael@0: movdqa XMMWORD [XMMBLOCK(0,0,rdi,SIZEOF_JCOEF)], xmm0 michael@0: movdqa XMMWORD [XMMBLOCK(1,0,rdi,SIZEOF_JCOEF)], xmm2 michael@0: michael@0: add rsi, byte 16*SIZEOF_FAST_FLOAT michael@0: add rdx, byte 16*SIZEOF_FAST_FLOAT michael@0: add rdi, byte 16*SIZEOF_JCOEF michael@0: dec rax michael@0: jnz short .quantloop michael@0: michael@0: uncollect_args michael@0: pop rbp michael@0: ret michael@0: michael@0: ; For some reason, the OS X linker does not honor the request to align the michael@0: ; segment unless we do this. michael@0: align 16