1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/media/libjpeg/simd/jfss2fst.asm Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,404 @@ 1.4 +; 1.5 +; jfss2fst.asm - fast integer FDCT (SSE2) 1.6 +; 1.7 +; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB 1.8 +; 1.9 +; Based on 1.10 +; x86 SIMD extension for IJG JPEG library 1.11 +; Copyright (C) 1999-2006, MIYASAKA Masaru. 1.12 +; For conditions of distribution and use, see copyright notice in jsimdext.inc 1.13 +; 1.14 +; This file should be assembled with NASM (Netwide Assembler), 1.15 +; can *not* be assembled with Microsoft's MASM or any compatible 1.16 +; assembler (including Borland's Turbo Assembler). 1.17 +; NASM is available from http://nasm.sourceforge.net/ or 1.18 +; http://sourceforge.net/project/showfiles.php?group_id=6208 1.19 +; 1.20 +; This file contains a fast, not so accurate integer implementation of 1.21 +; the forward DCT (Discrete Cosine Transform). The following code is 1.22 +; based directly on the IJG's original jfdctfst.c; see the jfdctfst.c 1.23 +; for more details. 1.24 +; 1.25 +; [TAB8] 1.26 + 1.27 +%include "jsimdext.inc" 1.28 +%include "jdct.inc" 1.29 + 1.30 +; -------------------------------------------------------------------------- 1.31 + 1.32 +%define CONST_BITS 8 ; 14 is also OK. 1.33 + 1.34 +%if CONST_BITS == 8 1.35 +F_0_382 equ 98 ; FIX(0.382683433) 1.36 +F_0_541 equ 139 ; FIX(0.541196100) 1.37 +F_0_707 equ 181 ; FIX(0.707106781) 1.38 +F_1_306 equ 334 ; FIX(1.306562965) 1.39 +%else 1.40 +; NASM cannot do compile-time arithmetic on floating-point constants. 1.41 +%define DESCALE(x,n) (((x)+(1<<((n)-1)))>>(n)) 1.42 +F_0_382 equ DESCALE( 410903207,30-CONST_BITS) ; FIX(0.382683433) 1.43 +F_0_541 equ DESCALE( 581104887,30-CONST_BITS) ; FIX(0.541196100) 1.44 +F_0_707 equ DESCALE( 759250124,30-CONST_BITS) ; FIX(0.707106781) 1.45 +F_1_306 equ DESCALE(1402911301,30-CONST_BITS) ; FIX(1.306562965) 1.46 +%endif 1.47 + 1.48 +; -------------------------------------------------------------------------- 1.49 + SECTION SEG_CONST 1.50 + 1.51 +; PRE_MULTIPLY_SCALE_BITS <= 2 (to avoid overflow) 1.52 +; CONST_BITS + CONST_SHIFT + PRE_MULTIPLY_SCALE_BITS == 16 (for pmulhw) 1.53 + 1.54 +%define PRE_MULTIPLY_SCALE_BITS 2 1.55 +%define CONST_SHIFT (16 - PRE_MULTIPLY_SCALE_BITS - CONST_BITS) 1.56 + 1.57 + alignz 16 1.58 + global EXTN(jconst_fdct_ifast_sse2) 1.59 + 1.60 +EXTN(jconst_fdct_ifast_sse2): 1.61 + 1.62 +PW_F0707 times 8 dw F_0_707 << CONST_SHIFT 1.63 +PW_F0382 times 8 dw F_0_382 << CONST_SHIFT 1.64 +PW_F0541 times 8 dw F_0_541 << CONST_SHIFT 1.65 +PW_F1306 times 8 dw F_1_306 << CONST_SHIFT 1.66 + 1.67 + alignz 16 1.68 + 1.69 +; -------------------------------------------------------------------------- 1.70 + SECTION SEG_TEXT 1.71 + BITS 32 1.72 +; 1.73 +; Perform the forward DCT on one block of samples. 1.74 +; 1.75 +; GLOBAL(void) 1.76 +; jsimd_fdct_ifast_sse2 (DCTELEM * data) 1.77 +; 1.78 + 1.79 +%define data(b) (b)+8 ; DCTELEM * data 1.80 + 1.81 +%define original_ebp ebp+0 1.82 +%define wk(i) ebp-(WK_NUM-(i))*SIZEOF_XMMWORD ; xmmword wk[WK_NUM] 1.83 +%define WK_NUM 2 1.84 + 1.85 + align 16 1.86 + global EXTN(jsimd_fdct_ifast_sse2) 1.87 + 1.88 +EXTN(jsimd_fdct_ifast_sse2): 1.89 + push ebp 1.90 + mov eax,esp ; eax = original ebp 1.91 + sub esp, byte 4 1.92 + and esp, byte (-SIZEOF_XMMWORD) ; align to 128 bits 1.93 + mov [esp],eax 1.94 + mov ebp,esp ; ebp = aligned ebp 1.95 + lea esp, [wk(0)] 1.96 + pushpic ebx 1.97 +; push ecx ; unused 1.98 +; push edx ; need not be preserved 1.99 +; push esi ; unused 1.100 +; push edi ; unused 1.101 + 1.102 + get_GOT ebx ; get GOT address 1.103 + 1.104 + ; ---- Pass 1: process rows. 1.105 + 1.106 + mov edx, POINTER [data(eax)] ; (DCTELEM *) 1.107 + 1.108 + movdqa xmm0, XMMWORD [XMMBLOCK(0,0,edx,SIZEOF_DCTELEM)] 1.109 + movdqa xmm1, XMMWORD [XMMBLOCK(1,0,edx,SIZEOF_DCTELEM)] 1.110 + movdqa xmm2, XMMWORD [XMMBLOCK(2,0,edx,SIZEOF_DCTELEM)] 1.111 + movdqa xmm3, XMMWORD [XMMBLOCK(3,0,edx,SIZEOF_DCTELEM)] 1.112 + 1.113 + ; xmm0=(00 01 02 03 04 05 06 07), xmm2=(20 21 22 23 24 25 26 27) 1.114 + ; xmm1=(10 11 12 13 14 15 16 17), xmm3=(30 31 32 33 34 35 36 37) 1.115 + 1.116 + movdqa xmm4,xmm0 ; transpose coefficients(phase 1) 1.117 + punpcklwd xmm0,xmm1 ; xmm0=(00 10 01 11 02 12 03 13) 1.118 + punpckhwd xmm4,xmm1 ; xmm4=(04 14 05 15 06 16 07 17) 1.119 + movdqa xmm5,xmm2 ; transpose coefficients(phase 1) 1.120 + punpcklwd xmm2,xmm3 ; xmm2=(20 30 21 31 22 32 23 33) 1.121 + punpckhwd xmm5,xmm3 ; xmm5=(24 34 25 35 26 36 27 37) 1.122 + 1.123 + movdqa xmm6, XMMWORD [XMMBLOCK(4,0,edx,SIZEOF_DCTELEM)] 1.124 + movdqa xmm7, XMMWORD [XMMBLOCK(5,0,edx,SIZEOF_DCTELEM)] 1.125 + movdqa xmm1, XMMWORD [XMMBLOCK(6,0,edx,SIZEOF_DCTELEM)] 1.126 + movdqa xmm3, XMMWORD [XMMBLOCK(7,0,edx,SIZEOF_DCTELEM)] 1.127 + 1.128 + ; xmm6=( 4 12 20 28 36 44 52 60), xmm1=( 6 14 22 30 38 46 54 62) 1.129 + ; xmm7=( 5 13 21 29 37 45 53 61), xmm3=( 7 15 23 31 39 47 55 63) 1.130 + 1.131 + movdqa XMMWORD [wk(0)], xmm2 ; wk(0)=(20 30 21 31 22 32 23 33) 1.132 + movdqa XMMWORD [wk(1)], xmm5 ; wk(1)=(24 34 25 35 26 36 27 37) 1.133 + 1.134 + movdqa xmm2,xmm6 ; transpose coefficients(phase 1) 1.135 + punpcklwd xmm6,xmm7 ; xmm6=(40 50 41 51 42 52 43 53) 1.136 + punpckhwd xmm2,xmm7 ; xmm2=(44 54 45 55 46 56 47 57) 1.137 + movdqa xmm5,xmm1 ; transpose coefficients(phase 1) 1.138 + punpcklwd xmm1,xmm3 ; xmm1=(60 70 61 71 62 72 63 73) 1.139 + punpckhwd xmm5,xmm3 ; xmm5=(64 74 65 75 66 76 67 77) 1.140 + 1.141 + movdqa xmm7,xmm6 ; transpose coefficients(phase 2) 1.142 + punpckldq xmm6,xmm1 ; xmm6=(40 50 60 70 41 51 61 71) 1.143 + punpckhdq xmm7,xmm1 ; xmm7=(42 52 62 72 43 53 63 73) 1.144 + movdqa xmm3,xmm2 ; transpose coefficients(phase 2) 1.145 + punpckldq xmm2,xmm5 ; xmm2=(44 54 64 74 45 55 65 75) 1.146 + punpckhdq xmm3,xmm5 ; xmm3=(46 56 66 76 47 57 67 77) 1.147 + 1.148 + movdqa xmm1, XMMWORD [wk(0)] ; xmm1=(20 30 21 31 22 32 23 33) 1.149 + movdqa xmm5, XMMWORD [wk(1)] ; xmm5=(24 34 25 35 26 36 27 37) 1.150 + movdqa XMMWORD [wk(0)], xmm7 ; wk(0)=(42 52 62 72 43 53 63 73) 1.151 + movdqa XMMWORD [wk(1)], xmm2 ; wk(1)=(44 54 64 74 45 55 65 75) 1.152 + 1.153 + movdqa xmm7,xmm0 ; transpose coefficients(phase 2) 1.154 + punpckldq xmm0,xmm1 ; xmm0=(00 10 20 30 01 11 21 31) 1.155 + punpckhdq xmm7,xmm1 ; xmm7=(02 12 22 32 03 13 23 33) 1.156 + movdqa xmm2,xmm4 ; transpose coefficients(phase 2) 1.157 + punpckldq xmm4,xmm5 ; xmm4=(04 14 24 34 05 15 25 35) 1.158 + punpckhdq xmm2,xmm5 ; xmm2=(06 16 26 36 07 17 27 37) 1.159 + 1.160 + movdqa xmm1,xmm0 ; transpose coefficients(phase 3) 1.161 + punpcklqdq xmm0,xmm6 ; xmm0=(00 10 20 30 40 50 60 70)=data0 1.162 + punpckhqdq xmm1,xmm6 ; xmm1=(01 11 21 31 41 51 61 71)=data1 1.163 + movdqa xmm5,xmm2 ; transpose coefficients(phase 3) 1.164 + punpcklqdq xmm2,xmm3 ; xmm2=(06 16 26 36 46 56 66 76)=data6 1.165 + punpckhqdq xmm5,xmm3 ; xmm5=(07 17 27 37 47 57 67 77)=data7 1.166 + 1.167 + movdqa xmm6,xmm1 1.168 + movdqa xmm3,xmm0 1.169 + psubw xmm1,xmm2 ; xmm1=data1-data6=tmp6 1.170 + psubw xmm0,xmm5 ; xmm0=data0-data7=tmp7 1.171 + paddw xmm6,xmm2 ; xmm6=data1+data6=tmp1 1.172 + paddw xmm3,xmm5 ; xmm3=data0+data7=tmp0 1.173 + 1.174 + movdqa xmm2, XMMWORD [wk(0)] ; xmm2=(42 52 62 72 43 53 63 73) 1.175 + movdqa xmm5, XMMWORD [wk(1)] ; xmm5=(44 54 64 74 45 55 65 75) 1.176 + movdqa XMMWORD [wk(0)], xmm1 ; wk(0)=tmp6 1.177 + movdqa XMMWORD [wk(1)], xmm0 ; wk(1)=tmp7 1.178 + 1.179 + movdqa xmm1,xmm7 ; transpose coefficients(phase 3) 1.180 + punpcklqdq xmm7,xmm2 ; xmm7=(02 12 22 32 42 52 62 72)=data2 1.181 + punpckhqdq xmm1,xmm2 ; xmm1=(03 13 23 33 43 53 63 73)=data3 1.182 + movdqa xmm0,xmm4 ; transpose coefficients(phase 3) 1.183 + punpcklqdq xmm4,xmm5 ; xmm4=(04 14 24 34 44 54 64 74)=data4 1.184 + punpckhqdq xmm0,xmm5 ; xmm0=(05 15 25 35 45 55 65 75)=data5 1.185 + 1.186 + movdqa xmm2,xmm1 1.187 + movdqa xmm5,xmm7 1.188 + paddw xmm1,xmm4 ; xmm1=data3+data4=tmp3 1.189 + paddw xmm7,xmm0 ; xmm7=data2+data5=tmp2 1.190 + psubw xmm2,xmm4 ; xmm2=data3-data4=tmp4 1.191 + psubw xmm5,xmm0 ; xmm5=data2-data5=tmp5 1.192 + 1.193 + ; -- Even part 1.194 + 1.195 + movdqa xmm4,xmm3 1.196 + movdqa xmm0,xmm6 1.197 + psubw xmm3,xmm1 ; xmm3=tmp13 1.198 + psubw xmm6,xmm7 ; xmm6=tmp12 1.199 + paddw xmm4,xmm1 ; xmm4=tmp10 1.200 + paddw xmm0,xmm7 ; xmm0=tmp11 1.201 + 1.202 + paddw xmm6,xmm3 1.203 + psllw xmm6,PRE_MULTIPLY_SCALE_BITS 1.204 + pmulhw xmm6,[GOTOFF(ebx,PW_F0707)] ; xmm6=z1 1.205 + 1.206 + movdqa xmm1,xmm4 1.207 + movdqa xmm7,xmm3 1.208 + psubw xmm4,xmm0 ; xmm4=data4 1.209 + psubw xmm3,xmm6 ; xmm3=data6 1.210 + paddw xmm1,xmm0 ; xmm1=data0 1.211 + paddw xmm7,xmm6 ; xmm7=data2 1.212 + 1.213 + movdqa xmm0, XMMWORD [wk(0)] ; xmm0=tmp6 1.214 + movdqa xmm6, XMMWORD [wk(1)] ; xmm6=tmp7 1.215 + movdqa XMMWORD [wk(0)], xmm4 ; wk(0)=data4 1.216 + movdqa XMMWORD [wk(1)], xmm3 ; wk(1)=data6 1.217 + 1.218 + ; -- Odd part 1.219 + 1.220 + paddw xmm2,xmm5 ; xmm2=tmp10 1.221 + paddw xmm5,xmm0 ; xmm5=tmp11 1.222 + paddw xmm0,xmm6 ; xmm0=tmp12, xmm6=tmp7 1.223 + 1.224 + psllw xmm2,PRE_MULTIPLY_SCALE_BITS 1.225 + psllw xmm0,PRE_MULTIPLY_SCALE_BITS 1.226 + 1.227 + psllw xmm5,PRE_MULTIPLY_SCALE_BITS 1.228 + pmulhw xmm5,[GOTOFF(ebx,PW_F0707)] ; xmm5=z3 1.229 + 1.230 + movdqa xmm4,xmm2 ; xmm4=tmp10 1.231 + psubw xmm2,xmm0 1.232 + pmulhw xmm2,[GOTOFF(ebx,PW_F0382)] ; xmm2=z5 1.233 + pmulhw xmm4,[GOTOFF(ebx,PW_F0541)] ; xmm4=MULTIPLY(tmp10,FIX_0_541196) 1.234 + pmulhw xmm0,[GOTOFF(ebx,PW_F1306)] ; xmm0=MULTIPLY(tmp12,FIX_1_306562) 1.235 + paddw xmm4,xmm2 ; xmm4=z2 1.236 + paddw xmm0,xmm2 ; xmm0=z4 1.237 + 1.238 + movdqa xmm3,xmm6 1.239 + psubw xmm6,xmm5 ; xmm6=z13 1.240 + paddw xmm3,xmm5 ; xmm3=z11 1.241 + 1.242 + movdqa xmm2,xmm6 1.243 + movdqa xmm5,xmm3 1.244 + psubw xmm6,xmm4 ; xmm6=data3 1.245 + psubw xmm3,xmm0 ; xmm3=data7 1.246 + paddw xmm2,xmm4 ; xmm2=data5 1.247 + paddw xmm5,xmm0 ; xmm5=data1 1.248 + 1.249 + ; ---- Pass 2: process columns. 1.250 + 1.251 +; mov edx, POINTER [data(eax)] ; (DCTELEM *) 1.252 + 1.253 + ; xmm1=(00 10 20 30 40 50 60 70), xmm7=(02 12 22 32 42 52 62 72) 1.254 + ; xmm5=(01 11 21 31 41 51 61 71), xmm6=(03 13 23 33 43 53 63 73) 1.255 + 1.256 + movdqa xmm4,xmm1 ; transpose coefficients(phase 1) 1.257 + punpcklwd xmm1,xmm5 ; xmm1=(00 01 10 11 20 21 30 31) 1.258 + punpckhwd xmm4,xmm5 ; xmm4=(40 41 50 51 60 61 70 71) 1.259 + movdqa xmm0,xmm7 ; transpose coefficients(phase 1) 1.260 + punpcklwd xmm7,xmm6 ; xmm7=(02 03 12 13 22 23 32 33) 1.261 + punpckhwd xmm0,xmm6 ; xmm0=(42 43 52 53 62 63 72 73) 1.262 + 1.263 + movdqa xmm5, XMMWORD [wk(0)] ; xmm5=col4 1.264 + movdqa xmm6, XMMWORD [wk(1)] ; xmm6=col6 1.265 + 1.266 + ; xmm5=(04 14 24 34 44 54 64 74), xmm6=(06 16 26 36 46 56 66 76) 1.267 + ; xmm2=(05 15 25 35 45 55 65 75), xmm3=(07 17 27 37 47 57 67 77) 1.268 + 1.269 + movdqa XMMWORD [wk(0)], xmm7 ; wk(0)=(02 03 12 13 22 23 32 33) 1.270 + movdqa XMMWORD [wk(1)], xmm0 ; wk(1)=(42 43 52 53 62 63 72 73) 1.271 + 1.272 + movdqa xmm7,xmm5 ; transpose coefficients(phase 1) 1.273 + punpcklwd xmm5,xmm2 ; xmm5=(04 05 14 15 24 25 34 35) 1.274 + punpckhwd xmm7,xmm2 ; xmm7=(44 45 54 55 64 65 74 75) 1.275 + movdqa xmm0,xmm6 ; transpose coefficients(phase 1) 1.276 + punpcklwd xmm6,xmm3 ; xmm6=(06 07 16 17 26 27 36 37) 1.277 + punpckhwd xmm0,xmm3 ; xmm0=(46 47 56 57 66 67 76 77) 1.278 + 1.279 + movdqa xmm2,xmm5 ; transpose coefficients(phase 2) 1.280 + punpckldq xmm5,xmm6 ; xmm5=(04 05 06 07 14 15 16 17) 1.281 + punpckhdq xmm2,xmm6 ; xmm2=(24 25 26 27 34 35 36 37) 1.282 + movdqa xmm3,xmm7 ; transpose coefficients(phase 2) 1.283 + punpckldq xmm7,xmm0 ; xmm7=(44 45 46 47 54 55 56 57) 1.284 + punpckhdq xmm3,xmm0 ; xmm3=(64 65 66 67 74 75 76 77) 1.285 + 1.286 + movdqa xmm6, XMMWORD [wk(0)] ; xmm6=(02 03 12 13 22 23 32 33) 1.287 + movdqa xmm0, XMMWORD [wk(1)] ; xmm0=(42 43 52 53 62 63 72 73) 1.288 + movdqa XMMWORD [wk(0)], xmm2 ; wk(0)=(24 25 26 27 34 35 36 37) 1.289 + movdqa XMMWORD [wk(1)], xmm7 ; wk(1)=(44 45 46 47 54 55 56 57) 1.290 + 1.291 + movdqa xmm2,xmm1 ; transpose coefficients(phase 2) 1.292 + punpckldq xmm1,xmm6 ; xmm1=(00 01 02 03 10 11 12 13) 1.293 + punpckhdq xmm2,xmm6 ; xmm2=(20 21 22 23 30 31 32 33) 1.294 + movdqa xmm7,xmm4 ; transpose coefficients(phase 2) 1.295 + punpckldq xmm4,xmm0 ; xmm4=(40 41 42 43 50 51 52 53) 1.296 + punpckhdq xmm7,xmm0 ; xmm7=(60 61 62 63 70 71 72 73) 1.297 + 1.298 + movdqa xmm6,xmm1 ; transpose coefficients(phase 3) 1.299 + punpcklqdq xmm1,xmm5 ; xmm1=(00 01 02 03 04 05 06 07)=data0 1.300 + punpckhqdq xmm6,xmm5 ; xmm6=(10 11 12 13 14 15 16 17)=data1 1.301 + movdqa xmm0,xmm7 ; transpose coefficients(phase 3) 1.302 + punpcklqdq xmm7,xmm3 ; xmm7=(60 61 62 63 64 65 66 67)=data6 1.303 + punpckhqdq xmm0,xmm3 ; xmm0=(70 71 72 73 74 75 76 77)=data7 1.304 + 1.305 + movdqa xmm5,xmm6 1.306 + movdqa xmm3,xmm1 1.307 + psubw xmm6,xmm7 ; xmm6=data1-data6=tmp6 1.308 + psubw xmm1,xmm0 ; xmm1=data0-data7=tmp7 1.309 + paddw xmm5,xmm7 ; xmm5=data1+data6=tmp1 1.310 + paddw xmm3,xmm0 ; xmm3=data0+data7=tmp0 1.311 + 1.312 + movdqa xmm7, XMMWORD [wk(0)] ; xmm7=(24 25 26 27 34 35 36 37) 1.313 + movdqa xmm0, XMMWORD [wk(1)] ; xmm0=(44 45 46 47 54 55 56 57) 1.314 + movdqa XMMWORD [wk(0)], xmm6 ; wk(0)=tmp6 1.315 + movdqa XMMWORD [wk(1)], xmm1 ; wk(1)=tmp7 1.316 + 1.317 + movdqa xmm6,xmm2 ; transpose coefficients(phase 3) 1.318 + punpcklqdq xmm2,xmm7 ; xmm2=(20 21 22 23 24 25 26 27)=data2 1.319 + punpckhqdq xmm6,xmm7 ; xmm6=(30 31 32 33 34 35 36 37)=data3 1.320 + movdqa xmm1,xmm4 ; transpose coefficients(phase 3) 1.321 + punpcklqdq xmm4,xmm0 ; xmm4=(40 41 42 43 44 45 46 47)=data4 1.322 + punpckhqdq xmm1,xmm0 ; xmm1=(50 51 52 53 54 55 56 57)=data5 1.323 + 1.324 + movdqa xmm7,xmm6 1.325 + movdqa xmm0,xmm2 1.326 + paddw xmm6,xmm4 ; xmm6=data3+data4=tmp3 1.327 + paddw xmm2,xmm1 ; xmm2=data2+data5=tmp2 1.328 + psubw xmm7,xmm4 ; xmm7=data3-data4=tmp4 1.329 + psubw xmm0,xmm1 ; xmm0=data2-data5=tmp5 1.330 + 1.331 + ; -- Even part 1.332 + 1.333 + movdqa xmm4,xmm3 1.334 + movdqa xmm1,xmm5 1.335 + psubw xmm3,xmm6 ; xmm3=tmp13 1.336 + psubw xmm5,xmm2 ; xmm5=tmp12 1.337 + paddw xmm4,xmm6 ; xmm4=tmp10 1.338 + paddw xmm1,xmm2 ; xmm1=tmp11 1.339 + 1.340 + paddw xmm5,xmm3 1.341 + psllw xmm5,PRE_MULTIPLY_SCALE_BITS 1.342 + pmulhw xmm5,[GOTOFF(ebx,PW_F0707)] ; xmm5=z1 1.343 + 1.344 + movdqa xmm6,xmm4 1.345 + movdqa xmm2,xmm3 1.346 + psubw xmm4,xmm1 ; xmm4=data4 1.347 + psubw xmm3,xmm5 ; xmm3=data6 1.348 + paddw xmm6,xmm1 ; xmm6=data0 1.349 + paddw xmm2,xmm5 ; xmm2=data2 1.350 + 1.351 + movdqa XMMWORD [XMMBLOCK(4,0,edx,SIZEOF_DCTELEM)], xmm4 1.352 + movdqa XMMWORD [XMMBLOCK(6,0,edx,SIZEOF_DCTELEM)], xmm3 1.353 + movdqa XMMWORD [XMMBLOCK(0,0,edx,SIZEOF_DCTELEM)], xmm6 1.354 + movdqa XMMWORD [XMMBLOCK(2,0,edx,SIZEOF_DCTELEM)], xmm2 1.355 + 1.356 + ; -- Odd part 1.357 + 1.358 + movdqa xmm1, XMMWORD [wk(0)] ; xmm1=tmp6 1.359 + movdqa xmm5, XMMWORD [wk(1)] ; xmm5=tmp7 1.360 + 1.361 + paddw xmm7,xmm0 ; xmm7=tmp10 1.362 + paddw xmm0,xmm1 ; xmm0=tmp11 1.363 + paddw xmm1,xmm5 ; xmm1=tmp12, xmm5=tmp7 1.364 + 1.365 + psllw xmm7,PRE_MULTIPLY_SCALE_BITS 1.366 + psllw xmm1,PRE_MULTIPLY_SCALE_BITS 1.367 + 1.368 + psllw xmm0,PRE_MULTIPLY_SCALE_BITS 1.369 + pmulhw xmm0,[GOTOFF(ebx,PW_F0707)] ; xmm0=z3 1.370 + 1.371 + movdqa xmm4,xmm7 ; xmm4=tmp10 1.372 + psubw xmm7,xmm1 1.373 + pmulhw xmm7,[GOTOFF(ebx,PW_F0382)] ; xmm7=z5 1.374 + pmulhw xmm4,[GOTOFF(ebx,PW_F0541)] ; xmm4=MULTIPLY(tmp10,FIX_0_541196) 1.375 + pmulhw xmm1,[GOTOFF(ebx,PW_F1306)] ; xmm1=MULTIPLY(tmp12,FIX_1_306562) 1.376 + paddw xmm4,xmm7 ; xmm4=z2 1.377 + paddw xmm1,xmm7 ; xmm1=z4 1.378 + 1.379 + movdqa xmm3,xmm5 1.380 + psubw xmm5,xmm0 ; xmm5=z13 1.381 + paddw xmm3,xmm0 ; xmm3=z11 1.382 + 1.383 + movdqa xmm6,xmm5 1.384 + movdqa xmm2,xmm3 1.385 + psubw xmm5,xmm4 ; xmm5=data3 1.386 + psubw xmm3,xmm1 ; xmm3=data7 1.387 + paddw xmm6,xmm4 ; xmm6=data5 1.388 + paddw xmm2,xmm1 ; xmm2=data1 1.389 + 1.390 + movdqa XMMWORD [XMMBLOCK(3,0,edx,SIZEOF_DCTELEM)], xmm5 1.391 + movdqa XMMWORD [XMMBLOCK(7,0,edx,SIZEOF_DCTELEM)], xmm3 1.392 + movdqa XMMWORD [XMMBLOCK(5,0,edx,SIZEOF_DCTELEM)], xmm6 1.393 + movdqa XMMWORD [XMMBLOCK(1,0,edx,SIZEOF_DCTELEM)], xmm2 1.394 + 1.395 +; pop edi ; unused 1.396 +; pop esi ; unused 1.397 +; pop edx ; need not be preserved 1.398 +; pop ecx ; unused 1.399 + poppic ebx 1.400 + mov esp,ebp ; esp <- aligned ebp 1.401 + pop esp ; esp <- original ebp 1.402 + pop ebp 1.403 + ret 1.404 + 1.405 +; For some reason, the OS X linker does not honor the request to align the 1.406 +; segment unless we do this. 1.407 + align 16