michael@0: ; michael@0: ; Copyright (c) 2010 The WebM project authors. All Rights Reserved. michael@0: ; michael@0: ; Use of this source code is governed by a BSD-style license michael@0: ; that can be found in the LICENSE file in the root of the source michael@0: ; tree. An additional intellectual property rights grant can be found michael@0: ; in the file PATENTS. All contributing project authors may michael@0: ; be found in the AUTHORS file in the root of the source tree. michael@0: ; michael@0: michael@0: michael@0: %include "vpx_ports/x86_abi_support.asm" michael@0: michael@0: ;void vp8_idct_dequant_0_2x_sse2 michael@0: ; ( michael@0: ; short *qcoeff - 0 michael@0: ; short *dequant - 1 michael@0: ; unsigned char *dst - 2 michael@0: ; int dst_stride - 3 michael@0: ; ) michael@0: michael@0: global sym(vp8_idct_dequant_0_2x_sse2) PRIVATE michael@0: sym(vp8_idct_dequant_0_2x_sse2): michael@0: push rbp michael@0: mov rbp, rsp michael@0: SHADOW_ARGS_TO_STACK 4 michael@0: GET_GOT rbx michael@0: ; end prolog michael@0: michael@0: mov rdx, arg(1) ; dequant michael@0: mov rax, arg(0) ; qcoeff michael@0: michael@0: movd xmm4, [rax] michael@0: movd xmm5, [rdx] michael@0: michael@0: pinsrw xmm4, [rax+32], 4 michael@0: pinsrw xmm5, [rdx], 4 michael@0: michael@0: pmullw xmm4, xmm5 michael@0: michael@0: ; Zero out xmm5, for use unpacking michael@0: pxor xmm5, xmm5 michael@0: michael@0: ; clear coeffs michael@0: movd [rax], xmm5 michael@0: movd [rax+32], xmm5 michael@0: ;pshufb michael@0: mov rax, arg(2) ; dst michael@0: movsxd rdx, dword ptr arg(3) ; dst_stride michael@0: michael@0: pshuflw xmm4, xmm4, 00000000b michael@0: pshufhw xmm4, xmm4, 00000000b michael@0: michael@0: lea rcx, [rdx + rdx*2] michael@0: paddw xmm4, [GLOBAL(fours)] michael@0: michael@0: psraw xmm4, 3 michael@0: michael@0: movq xmm0, [rax] michael@0: movq xmm1, [rax+rdx] michael@0: movq xmm2, [rax+2*rdx] michael@0: movq xmm3, [rax+rcx] michael@0: michael@0: punpcklbw xmm0, xmm5 michael@0: punpcklbw xmm1, xmm5 michael@0: punpcklbw xmm2, xmm5 michael@0: punpcklbw xmm3, xmm5 michael@0: michael@0: michael@0: ; Add to predict buffer michael@0: paddw xmm0, xmm4 michael@0: paddw xmm1, xmm4 michael@0: paddw xmm2, xmm4 michael@0: paddw xmm3, xmm4 michael@0: michael@0: ; pack up before storing michael@0: packuswb xmm0, xmm5 michael@0: packuswb xmm1, xmm5 michael@0: packuswb xmm2, xmm5 michael@0: packuswb xmm3, xmm5 michael@0: michael@0: ; store blocks back out michael@0: movq [rax], xmm0 michael@0: movq [rax + rdx], xmm1 michael@0: michael@0: lea rax, [rax + 2*rdx] michael@0: michael@0: movq [rax], xmm2 michael@0: movq [rax + rdx], xmm3 michael@0: michael@0: ; begin epilog michael@0: RESTORE_GOT michael@0: UNSHADOW_ARGS michael@0: pop rbp michael@0: ret michael@0: michael@0: ;void vp8_idct_dequant_full_2x_sse2 michael@0: ; ( michael@0: ; short *qcoeff - 0 michael@0: ; short *dequant - 1 michael@0: ; unsigned char *dst - 2 michael@0: ; int dst_stride - 3 michael@0: ; ) michael@0: global sym(vp8_idct_dequant_full_2x_sse2) PRIVATE michael@0: sym(vp8_idct_dequant_full_2x_sse2): michael@0: push rbp michael@0: mov rbp, rsp michael@0: SHADOW_ARGS_TO_STACK 4 michael@0: SAVE_XMM 7 michael@0: GET_GOT rbx michael@0: push rsi michael@0: push rdi michael@0: ; end prolog michael@0: michael@0: ; special case when 2 blocks have 0 or 1 coeffs michael@0: ; dc is set as first coeff, so no need to load qcoeff michael@0: mov rax, arg(0) ; qcoeff michael@0: mov rdx, arg(1) ; dequant michael@0: mov rdi, arg(2) ; dst michael@0: michael@0: michael@0: ; Zero out xmm7, for use unpacking michael@0: pxor xmm7, xmm7 michael@0: michael@0: michael@0: ; note the transpose of xmm1 and xmm2, necessary for shuffle michael@0: ; to spit out sensicle data michael@0: movdqa xmm0, [rax] michael@0: movdqa xmm2, [rax+16] michael@0: movdqa xmm1, [rax+32] michael@0: movdqa xmm3, [rax+48] michael@0: michael@0: ; Clear out coeffs michael@0: movdqa [rax], xmm7 michael@0: movdqa [rax+16], xmm7 michael@0: movdqa [rax+32], xmm7 michael@0: movdqa [rax+48], xmm7 michael@0: michael@0: ; dequantize qcoeff buffer michael@0: pmullw xmm0, [rdx] michael@0: pmullw xmm2, [rdx+16] michael@0: pmullw xmm1, [rdx] michael@0: pmullw xmm3, [rdx+16] michael@0: movsxd rdx, dword ptr arg(3) ; dst_stride michael@0: michael@0: ; repack so block 0 row x and block 1 row x are together michael@0: movdqa xmm4, xmm0 michael@0: punpckldq xmm0, xmm1 michael@0: punpckhdq xmm4, xmm1 michael@0: michael@0: pshufd xmm0, xmm0, 11011000b michael@0: pshufd xmm1, xmm4, 11011000b michael@0: michael@0: movdqa xmm4, xmm2 michael@0: punpckldq xmm2, xmm3 michael@0: punpckhdq xmm4, xmm3 michael@0: michael@0: pshufd xmm2, xmm2, 11011000b michael@0: pshufd xmm3, xmm4, 11011000b michael@0: michael@0: ; first pass michael@0: psubw xmm0, xmm2 ; b1 = 0-2 michael@0: paddw xmm2, xmm2 ; michael@0: michael@0: movdqa xmm5, xmm1 michael@0: paddw xmm2, xmm0 ; a1 = 0+2 michael@0: michael@0: pmulhw xmm5, [GLOBAL(x_s1sqr2)] michael@0: lea rcx, [rdx + rdx*2] ;dst_stride * 3 michael@0: paddw xmm5, xmm1 ; ip1 * sin(pi/8) * sqrt(2) michael@0: michael@0: movdqa xmm7, xmm3 michael@0: pmulhw xmm7, [GLOBAL(x_c1sqr2less1)] michael@0: michael@0: paddw xmm7, xmm3 ; ip3 * cos(pi/8) * sqrt(2) michael@0: psubw xmm7, xmm5 ; c1 michael@0: michael@0: movdqa xmm5, xmm1 michael@0: movdqa xmm4, xmm3 michael@0: michael@0: pmulhw xmm5, [GLOBAL(x_c1sqr2less1)] michael@0: paddw xmm5, xmm1 michael@0: michael@0: pmulhw xmm3, [GLOBAL(x_s1sqr2)] michael@0: paddw xmm3, xmm4 michael@0: michael@0: paddw xmm3, xmm5 ; d1 michael@0: movdqa xmm6, xmm2 ; a1 michael@0: michael@0: movdqa xmm4, xmm0 ; b1 michael@0: paddw xmm2, xmm3 ;0 michael@0: michael@0: paddw xmm4, xmm7 ;1 michael@0: psubw xmm0, xmm7 ;2 michael@0: michael@0: psubw xmm6, xmm3 ;3 michael@0: michael@0: ; transpose for the second pass michael@0: movdqa xmm7, xmm2 ; 103 102 101 100 003 002 001 000 michael@0: punpcklwd xmm2, xmm0 ; 007 003 006 002 005 001 004 000 michael@0: punpckhwd xmm7, xmm0 ; 107 103 106 102 105 101 104 100 michael@0: michael@0: movdqa xmm5, xmm4 ; 111 110 109 108 011 010 009 008 michael@0: punpcklwd xmm4, xmm6 ; 015 011 014 010 013 009 012 008 michael@0: punpckhwd xmm5, xmm6 ; 115 111 114 110 113 109 112 108 michael@0: michael@0: michael@0: movdqa xmm1, xmm2 ; 007 003 006 002 005 001 004 000 michael@0: punpckldq xmm2, xmm4 ; 013 009 005 001 012 008 004 000 michael@0: punpckhdq xmm1, xmm4 ; 015 011 007 003 014 010 006 002 michael@0: michael@0: movdqa xmm6, xmm7 ; 107 103 106 102 105 101 104 100 michael@0: punpckldq xmm7, xmm5 ; 113 109 105 101 112 108 104 100 michael@0: punpckhdq xmm6, xmm5 ; 115 111 107 103 114 110 106 102 michael@0: michael@0: michael@0: movdqa xmm5, xmm2 ; 013 009 005 001 012 008 004 000 michael@0: punpckldq xmm2, xmm7 ; 112 108 012 008 104 100 004 000 michael@0: punpckhdq xmm5, xmm7 ; 113 109 013 009 105 101 005 001 michael@0: michael@0: movdqa xmm7, xmm1 ; 015 011 007 003 014 010 006 002 michael@0: punpckldq xmm1, xmm6 ; 114 110 014 010 106 102 006 002 michael@0: punpckhdq xmm7, xmm6 ; 115 111 015 011 107 103 007 003 michael@0: michael@0: pshufd xmm0, xmm2, 11011000b michael@0: pshufd xmm2, xmm1, 11011000b michael@0: michael@0: pshufd xmm1, xmm5, 11011000b michael@0: pshufd xmm3, xmm7, 11011000b michael@0: michael@0: ; second pass michael@0: psubw xmm0, xmm2 ; b1 = 0-2 michael@0: paddw xmm2, xmm2 michael@0: michael@0: movdqa xmm5, xmm1 michael@0: paddw xmm2, xmm0 ; a1 = 0+2 michael@0: michael@0: pmulhw xmm5, [GLOBAL(x_s1sqr2)] michael@0: paddw xmm5, xmm1 ; ip1 * sin(pi/8) * sqrt(2) michael@0: michael@0: movdqa xmm7, xmm3 michael@0: pmulhw xmm7, [GLOBAL(x_c1sqr2less1)] michael@0: michael@0: paddw xmm7, xmm3 ; ip3 * cos(pi/8) * sqrt(2) michael@0: psubw xmm7, xmm5 ; c1 michael@0: michael@0: movdqa xmm5, xmm1 michael@0: movdqa xmm4, xmm3 michael@0: michael@0: pmulhw xmm5, [GLOBAL(x_c1sqr2less1)] michael@0: paddw xmm5, xmm1 michael@0: michael@0: pmulhw xmm3, [GLOBAL(x_s1sqr2)] michael@0: paddw xmm3, xmm4 michael@0: michael@0: paddw xmm3, xmm5 ; d1 michael@0: paddw xmm0, [GLOBAL(fours)] michael@0: michael@0: paddw xmm2, [GLOBAL(fours)] michael@0: movdqa xmm6, xmm2 ; a1 michael@0: michael@0: movdqa xmm4, xmm0 ; b1 michael@0: paddw xmm2, xmm3 ;0 michael@0: michael@0: paddw xmm4, xmm7 ;1 michael@0: psubw xmm0, xmm7 ;2 michael@0: michael@0: psubw xmm6, xmm3 ;3 michael@0: psraw xmm2, 3 michael@0: michael@0: psraw xmm0, 3 michael@0: psraw xmm4, 3 michael@0: michael@0: psraw xmm6, 3 michael@0: michael@0: ; transpose to save michael@0: movdqa xmm7, xmm2 ; 103 102 101 100 003 002 001 000 michael@0: punpcklwd xmm2, xmm0 ; 007 003 006 002 005 001 004 000 michael@0: punpckhwd xmm7, xmm0 ; 107 103 106 102 105 101 104 100 michael@0: michael@0: movdqa xmm5, xmm4 ; 111 110 109 108 011 010 009 008 michael@0: punpcklwd xmm4, xmm6 ; 015 011 014 010 013 009 012 008 michael@0: punpckhwd xmm5, xmm6 ; 115 111 114 110 113 109 112 108 michael@0: michael@0: michael@0: movdqa xmm1, xmm2 ; 007 003 006 002 005 001 004 000 michael@0: punpckldq xmm2, xmm4 ; 013 009 005 001 012 008 004 000 michael@0: punpckhdq xmm1, xmm4 ; 015 011 007 003 014 010 006 002 michael@0: michael@0: movdqa xmm6, xmm7 ; 107 103 106 102 105 101 104 100 michael@0: punpckldq xmm7, xmm5 ; 113 109 105 101 112 108 104 100 michael@0: punpckhdq xmm6, xmm5 ; 115 111 107 103 114 110 106 102 michael@0: michael@0: michael@0: movdqa xmm5, xmm2 ; 013 009 005 001 012 008 004 000 michael@0: punpckldq xmm2, xmm7 ; 112 108 012 008 104 100 004 000 michael@0: punpckhdq xmm5, xmm7 ; 113 109 013 009 105 101 005 001 michael@0: michael@0: movdqa xmm7, xmm1 ; 015 011 007 003 014 010 006 002 michael@0: punpckldq xmm1, xmm6 ; 114 110 014 010 106 102 006 002 michael@0: punpckhdq xmm7, xmm6 ; 115 111 015 011 107 103 007 003 michael@0: michael@0: pshufd xmm0, xmm2, 11011000b michael@0: pshufd xmm2, xmm1, 11011000b michael@0: michael@0: pshufd xmm1, xmm5, 11011000b michael@0: pshufd xmm3, xmm7, 11011000b michael@0: michael@0: pxor xmm7, xmm7 michael@0: michael@0: ; Load up predict blocks michael@0: movq xmm4, [rdi] michael@0: movq xmm5, [rdi+rdx] michael@0: michael@0: punpcklbw xmm4, xmm7 michael@0: punpcklbw xmm5, xmm7 michael@0: michael@0: paddw xmm0, xmm4 michael@0: paddw xmm1, xmm5 michael@0: michael@0: movq xmm4, [rdi+2*rdx] michael@0: movq xmm5, [rdi+rcx] michael@0: michael@0: punpcklbw xmm4, xmm7 michael@0: punpcklbw xmm5, xmm7 michael@0: michael@0: paddw xmm2, xmm4 michael@0: paddw xmm3, xmm5 michael@0: michael@0: .finish: michael@0: michael@0: ; pack up before storing michael@0: packuswb xmm0, xmm7 michael@0: packuswb xmm1, xmm7 michael@0: packuswb xmm2, xmm7 michael@0: packuswb xmm3, xmm7 michael@0: michael@0: ; store blocks back out michael@0: movq [rdi], xmm0 michael@0: movq [rdi + rdx], xmm1 michael@0: movq [rdi + rdx*2], xmm2 michael@0: movq [rdi + rcx], xmm3 michael@0: michael@0: ; begin epilog michael@0: pop rdi michael@0: pop rsi michael@0: RESTORE_GOT michael@0: RESTORE_XMM michael@0: UNSHADOW_ARGS michael@0: pop rbp michael@0: ret michael@0: michael@0: ;void vp8_idct_dequant_dc_0_2x_sse2 michael@0: ; ( michael@0: ; short *qcoeff - 0 michael@0: ; short *dequant - 1 michael@0: ; unsigned char *dst - 2 michael@0: ; int dst_stride - 3 michael@0: ; short *dc - 4 michael@0: ; ) michael@0: global sym(vp8_idct_dequant_dc_0_2x_sse2) PRIVATE michael@0: sym(vp8_idct_dequant_dc_0_2x_sse2): michael@0: push rbp michael@0: mov rbp, rsp michael@0: SHADOW_ARGS_TO_STACK 5 michael@0: GET_GOT rbx michael@0: push rdi michael@0: ; end prolog michael@0: michael@0: ; special case when 2 blocks have 0 or 1 coeffs michael@0: ; dc is set as first coeff, so no need to load qcoeff michael@0: mov rax, arg(0) ; qcoeff michael@0: michael@0: mov rdi, arg(2) ; dst michael@0: mov rdx, arg(4) ; dc michael@0: michael@0: ; Zero out xmm5, for use unpacking michael@0: pxor xmm5, xmm5 michael@0: michael@0: ; load up 2 dc words here == 2*16 = doubleword michael@0: movd xmm4, [rdx] michael@0: michael@0: movsxd rdx, dword ptr arg(3) ; dst_stride michael@0: lea rcx, [rdx + rdx*2] michael@0: ; Load up predict blocks michael@0: movq xmm0, [rdi] michael@0: movq xmm1, [rdi+rdx*1] michael@0: movq xmm2, [rdi+rdx*2] michael@0: movq xmm3, [rdi+rcx] michael@0: michael@0: ; Duplicate and expand dc across michael@0: punpcklwd xmm4, xmm4 michael@0: punpckldq xmm4, xmm4 michael@0: michael@0: ; Rounding to dequant and downshift michael@0: paddw xmm4, [GLOBAL(fours)] michael@0: psraw xmm4, 3 michael@0: michael@0: ; Predict buffer needs to be expanded from bytes to words michael@0: punpcklbw xmm0, xmm5 michael@0: punpcklbw xmm1, xmm5 michael@0: punpcklbw xmm2, xmm5 michael@0: punpcklbw xmm3, xmm5 michael@0: michael@0: ; Add to predict buffer michael@0: paddw xmm0, xmm4 michael@0: paddw xmm1, xmm4 michael@0: paddw xmm2, xmm4 michael@0: paddw xmm3, xmm4 michael@0: michael@0: ; pack up before storing michael@0: packuswb xmm0, xmm5 michael@0: packuswb xmm1, xmm5 michael@0: packuswb xmm2, xmm5 michael@0: packuswb xmm3, xmm5 michael@0: michael@0: ; store blocks back out michael@0: movq [rdi], xmm0 michael@0: movq [rdi + rdx], xmm1 michael@0: movq [rdi + rdx*2], xmm2 michael@0: movq [rdi + rcx], xmm3 michael@0: michael@0: ; begin epilog michael@0: pop rdi michael@0: RESTORE_GOT michael@0: UNSHADOW_ARGS michael@0: pop rbp michael@0: ret michael@0: ;void vp8_idct_dequant_dc_full_2x_sse2 michael@0: ; ( michael@0: ; short *qcoeff - 0 michael@0: ; short *dequant - 1 michael@0: ; unsigned char *dst - 2 michael@0: ; int dst_stride - 3 michael@0: ; short *dc - 4 michael@0: ; ) michael@0: global sym(vp8_idct_dequant_dc_full_2x_sse2) PRIVATE michael@0: sym(vp8_idct_dequant_dc_full_2x_sse2): michael@0: push rbp michael@0: mov rbp, rsp michael@0: SHADOW_ARGS_TO_STACK 5 michael@0: SAVE_XMM 7 michael@0: GET_GOT rbx michael@0: push rdi michael@0: ; end prolog michael@0: michael@0: ; special case when 2 blocks have 0 or 1 coeffs michael@0: ; dc is set as first coeff, so no need to load qcoeff michael@0: mov rax, arg(0) ; qcoeff michael@0: mov rdx, arg(1) ; dequant michael@0: michael@0: mov rdi, arg(2) ; dst michael@0: michael@0: ; Zero out xmm7, for use unpacking michael@0: pxor xmm7, xmm7 michael@0: michael@0: michael@0: ; note the transpose of xmm1 and xmm2, necessary for shuffle michael@0: ; to spit out sensicle data michael@0: movdqa xmm0, [rax] michael@0: movdqa xmm2, [rax+16] michael@0: movdqa xmm1, [rax+32] michael@0: movdqa xmm3, [rax+48] michael@0: michael@0: ; Clear out coeffs michael@0: movdqa [rax], xmm7 michael@0: movdqa [rax+16], xmm7 michael@0: movdqa [rax+32], xmm7 michael@0: movdqa [rax+48], xmm7 michael@0: michael@0: ; dequantize qcoeff buffer michael@0: pmullw xmm0, [rdx] michael@0: pmullw xmm2, [rdx+16] michael@0: pmullw xmm1, [rdx] michael@0: pmullw xmm3, [rdx+16] michael@0: michael@0: ; DC component michael@0: mov rdx, arg(4) michael@0: michael@0: ; repack so block 0 row x and block 1 row x are together michael@0: movdqa xmm4, xmm0 michael@0: punpckldq xmm0, xmm1 michael@0: punpckhdq xmm4, xmm1 michael@0: michael@0: pshufd xmm0, xmm0, 11011000b michael@0: pshufd xmm1, xmm4, 11011000b michael@0: michael@0: movdqa xmm4, xmm2 michael@0: punpckldq xmm2, xmm3 michael@0: punpckhdq xmm4, xmm3 michael@0: michael@0: pshufd xmm2, xmm2, 11011000b michael@0: pshufd xmm3, xmm4, 11011000b michael@0: michael@0: ; insert DC component michael@0: pinsrw xmm0, [rdx], 0 michael@0: pinsrw xmm0, [rdx+2], 4 michael@0: michael@0: ; first pass michael@0: psubw xmm0, xmm2 ; b1 = 0-2 michael@0: paddw xmm2, xmm2 ; michael@0: michael@0: movdqa xmm5, xmm1 michael@0: paddw xmm2, xmm0 ; a1 = 0+2 michael@0: michael@0: pmulhw xmm5, [GLOBAL(x_s1sqr2)] michael@0: paddw xmm5, xmm1 ; ip1 * sin(pi/8) * sqrt(2) michael@0: michael@0: movdqa xmm7, xmm3 michael@0: pmulhw xmm7, [GLOBAL(x_c1sqr2less1)] michael@0: michael@0: paddw xmm7, xmm3 ; ip3 * cos(pi/8) * sqrt(2) michael@0: psubw xmm7, xmm5 ; c1 michael@0: michael@0: movdqa xmm5, xmm1 michael@0: movdqa xmm4, xmm3 michael@0: michael@0: pmulhw xmm5, [GLOBAL(x_c1sqr2less1)] michael@0: paddw xmm5, xmm1 michael@0: michael@0: pmulhw xmm3, [GLOBAL(x_s1sqr2)] michael@0: paddw xmm3, xmm4 michael@0: michael@0: paddw xmm3, xmm5 ; d1 michael@0: movdqa xmm6, xmm2 ; a1 michael@0: michael@0: movdqa xmm4, xmm0 ; b1 michael@0: paddw xmm2, xmm3 ;0 michael@0: michael@0: paddw xmm4, xmm7 ;1 michael@0: psubw xmm0, xmm7 ;2 michael@0: michael@0: psubw xmm6, xmm3 ;3 michael@0: michael@0: ; transpose for the second pass michael@0: movdqa xmm7, xmm2 ; 103 102 101 100 003 002 001 000 michael@0: punpcklwd xmm2, xmm0 ; 007 003 006 002 005 001 004 000 michael@0: punpckhwd xmm7, xmm0 ; 107 103 106 102 105 101 104 100 michael@0: michael@0: movdqa xmm5, xmm4 ; 111 110 109 108 011 010 009 008 michael@0: punpcklwd xmm4, xmm6 ; 015 011 014 010 013 009 012 008 michael@0: punpckhwd xmm5, xmm6 ; 115 111 114 110 113 109 112 108 michael@0: michael@0: michael@0: movdqa xmm1, xmm2 ; 007 003 006 002 005 001 004 000 michael@0: punpckldq xmm2, xmm4 ; 013 009 005 001 012 008 004 000 michael@0: punpckhdq xmm1, xmm4 ; 015 011 007 003 014 010 006 002 michael@0: michael@0: movdqa xmm6, xmm7 ; 107 103 106 102 105 101 104 100 michael@0: punpckldq xmm7, xmm5 ; 113 109 105 101 112 108 104 100 michael@0: punpckhdq xmm6, xmm5 ; 115 111 107 103 114 110 106 102 michael@0: michael@0: michael@0: movdqa xmm5, xmm2 ; 013 009 005 001 012 008 004 000 michael@0: punpckldq xmm2, xmm7 ; 112 108 012 008 104 100 004 000 michael@0: punpckhdq xmm5, xmm7 ; 113 109 013 009 105 101 005 001 michael@0: michael@0: movdqa xmm7, xmm1 ; 015 011 007 003 014 010 006 002 michael@0: punpckldq xmm1, xmm6 ; 114 110 014 010 106 102 006 002 michael@0: punpckhdq xmm7, xmm6 ; 115 111 015 011 107 103 007 003 michael@0: michael@0: pshufd xmm0, xmm2, 11011000b michael@0: pshufd xmm2, xmm1, 11011000b michael@0: michael@0: pshufd xmm1, xmm5, 11011000b michael@0: pshufd xmm3, xmm7, 11011000b michael@0: michael@0: ; second pass michael@0: psubw xmm0, xmm2 ; b1 = 0-2 michael@0: paddw xmm2, xmm2 michael@0: michael@0: movdqa xmm5, xmm1 michael@0: paddw xmm2, xmm0 ; a1 = 0+2 michael@0: michael@0: pmulhw xmm5, [GLOBAL(x_s1sqr2)] michael@0: paddw xmm5, xmm1 ; ip1 * sin(pi/8) * sqrt(2) michael@0: michael@0: movdqa xmm7, xmm3 michael@0: pmulhw xmm7, [GLOBAL(x_c1sqr2less1)] michael@0: michael@0: paddw xmm7, xmm3 ; ip3 * cos(pi/8) * sqrt(2) michael@0: psubw xmm7, xmm5 ; c1 michael@0: michael@0: movdqa xmm5, xmm1 michael@0: movdqa xmm4, xmm3 michael@0: michael@0: pmulhw xmm5, [GLOBAL(x_c1sqr2less1)] michael@0: paddw xmm5, xmm1 michael@0: michael@0: pmulhw xmm3, [GLOBAL(x_s1sqr2)] michael@0: paddw xmm3, xmm4 michael@0: michael@0: paddw xmm3, xmm5 ; d1 michael@0: paddw xmm0, [GLOBAL(fours)] michael@0: michael@0: paddw xmm2, [GLOBAL(fours)] michael@0: movdqa xmm6, xmm2 ; a1 michael@0: michael@0: movdqa xmm4, xmm0 ; b1 michael@0: paddw xmm2, xmm3 ;0 michael@0: michael@0: paddw xmm4, xmm7 ;1 michael@0: psubw xmm0, xmm7 ;2 michael@0: michael@0: psubw xmm6, xmm3 ;3 michael@0: psraw xmm2, 3 michael@0: michael@0: psraw xmm0, 3 michael@0: psraw xmm4, 3 michael@0: michael@0: psraw xmm6, 3 michael@0: michael@0: ; transpose to save michael@0: movdqa xmm7, xmm2 ; 103 102 101 100 003 002 001 000 michael@0: punpcklwd xmm2, xmm0 ; 007 003 006 002 005 001 004 000 michael@0: punpckhwd xmm7, xmm0 ; 107 103 106 102 105 101 104 100 michael@0: michael@0: movdqa xmm5, xmm4 ; 111 110 109 108 011 010 009 008 michael@0: punpcklwd xmm4, xmm6 ; 015 011 014 010 013 009 012 008 michael@0: punpckhwd xmm5, xmm6 ; 115 111 114 110 113 109 112 108 michael@0: michael@0: michael@0: movdqa xmm1, xmm2 ; 007 003 006 002 005 001 004 000 michael@0: punpckldq xmm2, xmm4 ; 013 009 005 001 012 008 004 000 michael@0: punpckhdq xmm1, xmm4 ; 015 011 007 003 014 010 006 002 michael@0: michael@0: movdqa xmm6, xmm7 ; 107 103 106 102 105 101 104 100 michael@0: punpckldq xmm7, xmm5 ; 113 109 105 101 112 108 104 100 michael@0: punpckhdq xmm6, xmm5 ; 115 111 107 103 114 110 106 102 michael@0: michael@0: michael@0: movdqa xmm5, xmm2 ; 013 009 005 001 012 008 004 000 michael@0: punpckldq xmm2, xmm7 ; 112 108 012 008 104 100 004 000 michael@0: punpckhdq xmm5, xmm7 ; 113 109 013 009 105 101 005 001 michael@0: michael@0: movdqa xmm7, xmm1 ; 015 011 007 003 014 010 006 002 michael@0: punpckldq xmm1, xmm6 ; 114 110 014 010 106 102 006 002 michael@0: punpckhdq xmm7, xmm6 ; 115 111 015 011 107 103 007 003 michael@0: michael@0: pshufd xmm0, xmm2, 11011000b michael@0: pshufd xmm2, xmm1, 11011000b michael@0: michael@0: pshufd xmm1, xmm5, 11011000b michael@0: pshufd xmm3, xmm7, 11011000b michael@0: michael@0: pxor xmm7, xmm7 michael@0: michael@0: ; Load up predict blocks michael@0: movsxd rdx, dword ptr arg(3) ; dst_stride michael@0: movq xmm4, [rdi] michael@0: movq xmm5, [rdi+rdx] michael@0: lea rcx, [rdx + rdx*2] michael@0: michael@0: punpcklbw xmm4, xmm7 michael@0: punpcklbw xmm5, xmm7 michael@0: michael@0: paddw xmm0, xmm4 michael@0: paddw xmm1, xmm5 michael@0: michael@0: movq xmm4, [rdi+rdx*2] michael@0: movq xmm5, [rdi+rcx] michael@0: michael@0: punpcklbw xmm4, xmm7 michael@0: punpcklbw xmm5, xmm7 michael@0: michael@0: paddw xmm2, xmm4 michael@0: paddw xmm3, xmm5 michael@0: michael@0: .finish: michael@0: michael@0: ; pack up before storing michael@0: packuswb xmm0, xmm7 michael@0: packuswb xmm1, xmm7 michael@0: packuswb xmm2, xmm7 michael@0: packuswb xmm3, xmm7 michael@0: michael@0: ; Load destination stride before writing out, michael@0: ; doesn't need to persist michael@0: movsxd rdx, dword ptr arg(3) ; dst_stride michael@0: michael@0: ; store blocks back out michael@0: movq [rdi], xmm0 michael@0: movq [rdi + rdx], xmm1 michael@0: michael@0: lea rdi, [rdi + 2*rdx] michael@0: michael@0: movq [rdi], xmm2 michael@0: movq [rdi + rdx], xmm3 michael@0: michael@0: michael@0: ; begin epilog michael@0: pop rdi michael@0: RESTORE_GOT michael@0: RESTORE_XMM michael@0: UNSHADOW_ARGS michael@0: pop rbp michael@0: ret michael@0: michael@0: SECTION_RODATA michael@0: align 16 michael@0: fours: michael@0: times 8 dw 0x0004 michael@0: align 16 michael@0: x_s1sqr2: michael@0: times 8 dw 0x8A8C michael@0: align 16 michael@0: x_c1sqr2less1: michael@0: times 8 dw 0x4E7B