media/libjpeg/simd/jcqnts2i-64.asm

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/media/libjpeg/simd/jcqnts2i-64.asm	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,187 @@
     1.4 +;
     1.5 +; jcqnts2i-64.asm - sample data conversion and quantization (64-bit SSE2)
     1.6 +;
     1.7 +; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
     1.8 +; Copyright 2009 D. R. Commander
     1.9 +;
    1.10 +; Based on
    1.11 +; x86 SIMD extension for IJG JPEG library
    1.12 +; Copyright (C) 1999-2006, MIYASAKA Masaru.
    1.13 +; For conditions of distribution and use, see copyright notice in jsimdext.inc
    1.14 +;
    1.15 +; This file should be assembled with NASM (Netwide Assembler),
    1.16 +; can *not* be assembled with Microsoft's MASM or any compatible
    1.17 +; assembler (including Borland's Turbo Assembler).
    1.18 +; NASM is available from http://nasm.sourceforge.net/ or
    1.19 +; http://sourceforge.net/project/showfiles.php?group_id=6208
    1.20 +;
    1.21 +; [TAB8]
    1.22 +
    1.23 +%include "jsimdext.inc"
    1.24 +%include "jdct.inc"
    1.25 +
    1.26 +; --------------------------------------------------------------------------
    1.27 +	SECTION	SEG_TEXT
    1.28 +	BITS	64
    1.29 +;
    1.30 +; Load data into workspace, applying unsigned->signed conversion
    1.31 +;
    1.32 +; GLOBAL(void)
    1.33 +; jsimd_convsamp_sse2 (JSAMPARRAY sample_data, JDIMENSION start_col,
    1.34 +;                      DCTELEM * workspace);
    1.35 +;
    1.36 +
    1.37 +; r10 = JSAMPARRAY sample_data
    1.38 +; r11 = JDIMENSION start_col
    1.39 +; r12 = DCTELEM * workspace
    1.40 +
    1.41 +	align	16
    1.42 +	global	EXTN(jsimd_convsamp_sse2)
    1.43 +
    1.44 +EXTN(jsimd_convsamp_sse2):
    1.45 +	push	rbp
    1.46 +	mov	rax,rsp
    1.47 +	mov	rbp,rsp
    1.48 +	collect_args
    1.49 +	push	rbx
    1.50 +
    1.51 +	pxor	xmm6,xmm6		; xmm6=(all 0's)
    1.52 +	pcmpeqw	xmm7,xmm7
    1.53 +	psllw	xmm7,7			; xmm7={0xFF80 0xFF80 0xFF80 0xFF80 ..}
    1.54 +
    1.55 +	mov rsi, r10
    1.56 +	mov rax, r11
    1.57 +	mov rdi, r12
    1.58 +	mov	rcx, DCTSIZE/4
    1.59 +.convloop:
    1.60 +	mov	rbx, JSAMPROW [rsi+0*SIZEOF_JSAMPROW]	; (JSAMPLE *)
    1.61 +	mov rdx, JSAMPROW [rsi+1*SIZEOF_JSAMPROW]	; (JSAMPLE *)
    1.62 +
    1.63 +	movq	xmm0, XMM_MMWORD [rbx+rax*SIZEOF_JSAMPLE]	; xmm0=(01234567)
    1.64 +	movq	xmm1, XMM_MMWORD [rdx+rax*SIZEOF_JSAMPLE]	; xmm1=(89ABCDEF)
    1.65 +
    1.66 +	mov	rbx, JSAMPROW [rsi+2*SIZEOF_JSAMPROW]	; (JSAMPLE *)
    1.67 +	mov	rdx, JSAMPROW [rsi+3*SIZEOF_JSAMPROW]	; (JSAMPLE *)
    1.68 +
    1.69 +	movq	xmm2, XMM_MMWORD [rbx+rax*SIZEOF_JSAMPLE]	; xmm2=(GHIJKLMN)
    1.70 +	movq	xmm3, XMM_MMWORD [rdx+rax*SIZEOF_JSAMPLE]	; xmm3=(OPQRSTUV)
    1.71 +
    1.72 +	punpcklbw xmm0,xmm6		; xmm0=(01234567)
    1.73 +	punpcklbw xmm1,xmm6		; xmm1=(89ABCDEF)
    1.74 +	paddw     xmm0,xmm7
    1.75 +	paddw     xmm1,xmm7
    1.76 +	punpcklbw xmm2,xmm6		; xmm2=(GHIJKLMN)
    1.77 +	punpcklbw xmm3,xmm6		; xmm3=(OPQRSTUV)
    1.78 +	paddw     xmm2,xmm7
    1.79 +	paddw     xmm3,xmm7
    1.80 +
    1.81 +	movdqa	XMMWORD [XMMBLOCK(0,0,rdi,SIZEOF_DCTELEM)], xmm0
    1.82 +	movdqa	XMMWORD [XMMBLOCK(1,0,rdi,SIZEOF_DCTELEM)], xmm1
    1.83 +	movdqa	XMMWORD [XMMBLOCK(2,0,rdi,SIZEOF_DCTELEM)], xmm2
    1.84 +	movdqa	XMMWORD [XMMBLOCK(3,0,rdi,SIZEOF_DCTELEM)], xmm3
    1.85 +
    1.86 +	add	rsi, byte 4*SIZEOF_JSAMPROW
    1.87 +	add	rdi, byte 4*DCTSIZE*SIZEOF_DCTELEM
    1.88 +	dec	rcx
    1.89 +	jnz	short .convloop
    1.90 +
    1.91 +	pop	rbx
    1.92 +	uncollect_args
    1.93 +	pop	rbp
    1.94 +	ret
    1.95 +
    1.96 +; --------------------------------------------------------------------------
    1.97 +;
    1.98 +; Quantize/descale the coefficients, and store into coef_block
    1.99 +;
   1.100 +; This implementation is based on an algorithm described in
   1.101 +;   "How to optimize for the Pentium family of microprocessors"
   1.102 +;   (http://www.agner.org/assem/).
   1.103 +;
   1.104 +; GLOBAL(void)
   1.105 +; jsimd_quantize_sse2 (JCOEFPTR coef_block, DCTELEM * divisors,
   1.106 +;                      DCTELEM * workspace);
   1.107 +;
   1.108 +
   1.109 +%define RECIPROCAL(m,n,b) XMMBLOCK(DCTSIZE*0+(m),(n),(b),SIZEOF_DCTELEM)
   1.110 +%define CORRECTION(m,n,b) XMMBLOCK(DCTSIZE*1+(m),(n),(b),SIZEOF_DCTELEM)
   1.111 +%define SCALE(m,n,b)      XMMBLOCK(DCTSIZE*2+(m),(n),(b),SIZEOF_DCTELEM)
   1.112 +
   1.113 +; r10 = JCOEFPTR coef_block
   1.114 +; r11 = DCTELEM * divisors
   1.115 +; r12 = DCTELEM * workspace
   1.116 +
   1.117 +	align	16
   1.118 +	global	EXTN(jsimd_quantize_sse2)
   1.119 +
   1.120 +EXTN(jsimd_quantize_sse2):
   1.121 +	push	rbp
   1.122 +	mov	rax,rsp
   1.123 +	mov	rbp,rsp
   1.124 +	collect_args
   1.125 +
   1.126 +	mov rsi, r12
   1.127 +	mov rdx, r11
   1.128 +	mov rdi, r10
   1.129 +	mov	rax, DCTSIZE2/32
   1.130 +.quantloop:
   1.131 +	movdqa	xmm4, XMMWORD [XMMBLOCK(0,0,rsi,SIZEOF_DCTELEM)]
   1.132 +	movdqa	xmm5, XMMWORD [XMMBLOCK(1,0,rsi,SIZEOF_DCTELEM)]
   1.133 +	movdqa	xmm6, XMMWORD [XMMBLOCK(2,0,rsi,SIZEOF_DCTELEM)]
   1.134 +	movdqa	xmm7, XMMWORD [XMMBLOCK(3,0,rsi,SIZEOF_DCTELEM)]
   1.135 +	movdqa	xmm0,xmm4
   1.136 +	movdqa	xmm1,xmm5
   1.137 +	movdqa	xmm2,xmm6
   1.138 +	movdqa	xmm3,xmm7
   1.139 +	psraw	xmm4,(WORD_BIT-1)
   1.140 +	psraw	xmm5,(WORD_BIT-1)
   1.141 +	psraw	xmm6,(WORD_BIT-1)
   1.142 +	psraw	xmm7,(WORD_BIT-1)
   1.143 +	pxor	xmm0,xmm4
   1.144 +	pxor	xmm1,xmm5
   1.145 +	pxor	xmm2,xmm6
   1.146 +	pxor	xmm3,xmm7
   1.147 +	psubw	xmm0,xmm4		; if (xmm0 < 0) xmm0 = -xmm0;
   1.148 +	psubw	xmm1,xmm5		; if (xmm1 < 0) xmm1 = -xmm1;
   1.149 +	psubw	xmm2,xmm6		; if (xmm2 < 0) xmm2 = -xmm2;
   1.150 +	psubw	xmm3,xmm7		; if (xmm3 < 0) xmm3 = -xmm3;
   1.151 +
   1.152 +	paddw	xmm0, XMMWORD [CORRECTION(0,0,rdx)]  ; correction + roundfactor
   1.153 +	paddw	xmm1, XMMWORD [CORRECTION(1,0,rdx)]
   1.154 +	paddw	xmm2, XMMWORD [CORRECTION(2,0,rdx)]
   1.155 +	paddw	xmm3, XMMWORD [CORRECTION(3,0,rdx)]
   1.156 +	pmulhuw	xmm0, XMMWORD [RECIPROCAL(0,0,rdx)]  ; reciprocal
   1.157 +	pmulhuw	xmm1, XMMWORD [RECIPROCAL(1,0,rdx)]
   1.158 +	pmulhuw	xmm2, XMMWORD [RECIPROCAL(2,0,rdx)]
   1.159 +	pmulhuw	xmm3, XMMWORD [RECIPROCAL(3,0,rdx)]
   1.160 +	pmulhuw	xmm0, XMMWORD [SCALE(0,0,rdx)]	; scale
   1.161 +	pmulhuw	xmm1, XMMWORD [SCALE(1,0,rdx)]
   1.162 +	pmulhuw	xmm2, XMMWORD [SCALE(2,0,rdx)]
   1.163 +	pmulhuw	xmm3, XMMWORD [SCALE(3,0,rdx)]
   1.164 +
   1.165 +	pxor	xmm0,xmm4
   1.166 +	pxor	xmm1,xmm5
   1.167 +	pxor	xmm2,xmm6
   1.168 +	pxor	xmm3,xmm7
   1.169 +	psubw	xmm0,xmm4
   1.170 +	psubw	xmm1,xmm5
   1.171 +	psubw	xmm2,xmm6
   1.172 +	psubw	xmm3,xmm7
   1.173 +	movdqa	XMMWORD [XMMBLOCK(0,0,rdi,SIZEOF_DCTELEM)], xmm0
   1.174 +	movdqa	XMMWORD [XMMBLOCK(1,0,rdi,SIZEOF_DCTELEM)], xmm1
   1.175 +	movdqa	XMMWORD [XMMBLOCK(2,0,rdi,SIZEOF_DCTELEM)], xmm2
   1.176 +	movdqa	XMMWORD [XMMBLOCK(3,0,rdi,SIZEOF_DCTELEM)], xmm3
   1.177 +
   1.178 +	add	rsi, byte 32*SIZEOF_DCTELEM
   1.179 +	add	rdx, byte 32*SIZEOF_DCTELEM
   1.180 +	add	rdi, byte 32*SIZEOF_JCOEF
   1.181 +	dec	rax
   1.182 +	jnz	near .quantloop
   1.183 +
   1.184 +	uncollect_args
   1.185 +	pop	rbp
   1.186 +	ret
   1.187 +
   1.188 +; For some reason, the OS X linker does not honor the request to align the
   1.189 +; segment unless we do this.
   1.190 +	align	16

mercurial