michael@0: ; michael@0: ; Copyright (c) 2010 The WebM project authors. All Rights Reserved. michael@0: ; michael@0: ; Use of this source code is governed by a BSD-style license michael@0: ; that can be found in the LICENSE file in the root of the source michael@0: ; tree. An additional intellectual property rights grant can be found michael@0: ; in the file PATENTS. All contributing project authors may michael@0: ; be found in the AUTHORS file in the root of the source tree. michael@0: ; michael@0: michael@0: michael@0: %include "vpx_ports/x86_abi_support.asm" michael@0: extern sym(vp8_bilinear_filters_x86_8) michael@0: michael@0: %define BLOCK_HEIGHT_WIDTH 4 michael@0: %define VP8_FILTER_WEIGHT 128 michael@0: %define VP8_FILTER_SHIFT 7 michael@0: michael@0: michael@0: ;/************************************************************************************ michael@0: ; Notes: filter_block1d_h6 applies a 6 tap filter horizontally to the input pixels. The michael@0: ; input pixel array has output_height rows. This routine assumes that output_height is an michael@0: ; even number. This function handles 8 pixels in horizontal direction, calculating ONE michael@0: ; rows each iteration to take advantage of the 128 bits operations. michael@0: ;*************************************************************************************/ michael@0: ;void vp8_filter_block1d8_h6_sse2 michael@0: ;( michael@0: ; unsigned char *src_ptr, michael@0: ; unsigned short *output_ptr, michael@0: ; unsigned int src_pixels_per_line, michael@0: ; unsigned int pixel_step, michael@0: ; unsigned int output_height, michael@0: ; unsigned int output_width, michael@0: ; short *vp8_filter michael@0: ;) michael@0: global sym(vp8_filter_block1d8_h6_sse2) PRIVATE michael@0: sym(vp8_filter_block1d8_h6_sse2): michael@0: push rbp michael@0: mov rbp, rsp michael@0: SHADOW_ARGS_TO_STACK 7 michael@0: SAVE_XMM 7 michael@0: GET_GOT rbx michael@0: push rsi michael@0: push rdi michael@0: ; end prolog michael@0: michael@0: mov rdx, arg(6) ;vp8_filter michael@0: mov rsi, arg(0) ;src_ptr michael@0: michael@0: mov rdi, arg(1) ;output_ptr michael@0: michael@0: movsxd rcx, dword ptr arg(4) ;output_height michael@0: movsxd rax, dword ptr arg(2) ;src_pixels_per_line ; Pitch for Source michael@0: %if ABI_IS_32BIT=0 michael@0: movsxd r8, dword ptr arg(5) ;output_width michael@0: %endif michael@0: pxor xmm0, xmm0 ; clear xmm0 for unpack michael@0: michael@0: .filter_block1d8_h6_rowloop: michael@0: movq xmm3, MMWORD PTR [rsi - 2] michael@0: movq xmm1, MMWORD PTR [rsi + 6] michael@0: michael@0: prefetcht2 [rsi+rax-2] michael@0: michael@0: pslldq xmm1, 8 michael@0: por xmm1, xmm3 michael@0: michael@0: movdqa xmm4, xmm1 michael@0: movdqa xmm5, xmm1 michael@0: michael@0: movdqa xmm6, xmm1 michael@0: movdqa xmm7, xmm1 michael@0: michael@0: punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2 michael@0: psrldq xmm4, 1 ; xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 -1 michael@0: michael@0: pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1 michael@0: punpcklbw xmm4, xmm0 ; xx06 xx05 xx04 xx03 xx02 xx01 xx00 xx-1 michael@0: michael@0: psrldq xmm5, 2 ; xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 michael@0: pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2 michael@0: michael@0: michael@0: punpcklbw xmm5, xmm0 ; xx07 xx06 xx05 xx04 xx03 xx02 xx01 xx00 michael@0: psrldq xmm6, 3 ; xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 michael@0: michael@0: pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3 michael@0: michael@0: punpcklbw xmm6, xmm0 ; xx08 xx07 xx06 xx05 xx04 xx03 xx02 xx01 michael@0: psrldq xmm7, 4 ; xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 michael@0: michael@0: pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4 michael@0: michael@0: punpcklbw xmm7, xmm0 ; xx09 xx08 xx07 xx06 xx05 xx04 xx03 xx02 michael@0: psrldq xmm1, 5 ; xx xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 michael@0: michael@0: michael@0: pmullw xmm7, [rdx+64] ; x[ 2] * h[ 2] ; Tap 5 michael@0: michael@0: punpcklbw xmm1, xmm0 ; xx0a xx09 xx08 xx07 xx06 xx05 xx04 xx03 michael@0: pmullw xmm1, [rdx+80] ; x[ 3] * h[ 3] ; Tap 6 michael@0: michael@0: michael@0: paddsw xmm4, xmm7 michael@0: paddsw xmm4, xmm5 michael@0: michael@0: paddsw xmm4, xmm3 michael@0: paddsw xmm4, xmm6 michael@0: michael@0: paddsw xmm4, xmm1 michael@0: paddsw xmm4, [GLOBAL(rd)] michael@0: michael@0: psraw xmm4, 7 michael@0: michael@0: packuswb xmm4, xmm0 michael@0: punpcklbw xmm4, xmm0 michael@0: michael@0: movdqa XMMWORD Ptr [rdi], xmm4 michael@0: lea rsi, [rsi + rax] michael@0: michael@0: %if ABI_IS_32BIT michael@0: add rdi, DWORD Ptr arg(5) ;[output_width] michael@0: %else michael@0: add rdi, r8 michael@0: %endif michael@0: dec rcx michael@0: michael@0: jnz .filter_block1d8_h6_rowloop ; next row michael@0: michael@0: ; begin epilog michael@0: pop rdi michael@0: pop rsi michael@0: RESTORE_GOT michael@0: RESTORE_XMM michael@0: UNSHADOW_ARGS michael@0: pop rbp michael@0: ret michael@0: michael@0: michael@0: ;void vp8_filter_block1d16_h6_sse2 michael@0: ;( michael@0: ; unsigned char *src_ptr, michael@0: ; unsigned short *output_ptr, michael@0: ; unsigned int src_pixels_per_line, michael@0: ; unsigned int pixel_step, michael@0: ; unsigned int output_height, michael@0: ; unsigned int output_width, michael@0: ; short *vp8_filter michael@0: ;) michael@0: ;/************************************************************************************ michael@0: ; Notes: filter_block1d_h6 applies a 6 tap filter horizontally to the input pixels. The michael@0: ; input pixel array has output_height rows. This routine assumes that output_height is an michael@0: ; even number. This function handles 8 pixels in horizontal direction, calculating ONE michael@0: ; rows each iteration to take advantage of the 128 bits operations. michael@0: ;*************************************************************************************/ michael@0: global sym(vp8_filter_block1d16_h6_sse2) PRIVATE michael@0: sym(vp8_filter_block1d16_h6_sse2): michael@0: push rbp michael@0: mov rbp, rsp michael@0: SHADOW_ARGS_TO_STACK 7 michael@0: SAVE_XMM 7 michael@0: GET_GOT rbx michael@0: push rsi michael@0: push rdi michael@0: ; end prolog michael@0: michael@0: mov rdx, arg(6) ;vp8_filter michael@0: mov rsi, arg(0) ;src_ptr michael@0: michael@0: mov rdi, arg(1) ;output_ptr michael@0: michael@0: movsxd rcx, dword ptr arg(4) ;output_height michael@0: movsxd rax, dword ptr arg(2) ;src_pixels_per_line ; Pitch for Source michael@0: %if ABI_IS_32BIT=0 michael@0: movsxd r8, dword ptr arg(5) ;output_width michael@0: %endif michael@0: michael@0: pxor xmm0, xmm0 ; clear xmm0 for unpack michael@0: michael@0: .filter_block1d16_h6_sse2_rowloop: michael@0: movq xmm3, MMWORD PTR [rsi - 2] michael@0: movq xmm1, MMWORD PTR [rsi + 6] michael@0: michael@0: movq xmm2, MMWORD PTR [rsi +14] michael@0: pslldq xmm2, 8 michael@0: michael@0: por xmm2, xmm1 michael@0: prefetcht2 [rsi+rax-2] michael@0: michael@0: pslldq xmm1, 8 michael@0: por xmm1, xmm3 michael@0: michael@0: movdqa xmm4, xmm1 michael@0: movdqa xmm5, xmm1 michael@0: michael@0: movdqa xmm6, xmm1 michael@0: movdqa xmm7, xmm1 michael@0: michael@0: punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2 michael@0: psrldq xmm4, 1 ; xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 -1 michael@0: michael@0: pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1 michael@0: punpcklbw xmm4, xmm0 ; xx06 xx05 xx04 xx03 xx02 xx01 xx00 xx-1 michael@0: michael@0: psrldq xmm5, 2 ; xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 michael@0: pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2 michael@0: michael@0: michael@0: punpcklbw xmm5, xmm0 ; xx07 xx06 xx05 xx04 xx03 xx02 xx01 xx00 michael@0: psrldq xmm6, 3 ; xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 michael@0: michael@0: pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3 michael@0: michael@0: punpcklbw xmm6, xmm0 ; xx08 xx07 xx06 xx05 xx04 xx03 xx02 xx01 michael@0: psrldq xmm7, 4 ; xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 michael@0: michael@0: pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4 michael@0: michael@0: punpcklbw xmm7, xmm0 ; xx09 xx08 xx07 xx06 xx05 xx04 xx03 xx02 michael@0: psrldq xmm1, 5 ; xx xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 michael@0: michael@0: michael@0: pmullw xmm7, [rdx+64] ; x[ 2] * h[ 2] ; Tap 5 michael@0: michael@0: punpcklbw xmm1, xmm0 ; xx0a xx09 xx08 xx07 xx06 xx05 xx04 xx03 michael@0: pmullw xmm1, [rdx+80] ; x[ 3] * h[ 3] ; Tap 6 michael@0: michael@0: paddsw xmm4, xmm7 michael@0: paddsw xmm4, xmm5 michael@0: michael@0: paddsw xmm4, xmm3 michael@0: paddsw xmm4, xmm6 michael@0: michael@0: paddsw xmm4, xmm1 michael@0: paddsw xmm4, [GLOBAL(rd)] michael@0: michael@0: psraw xmm4, 7 michael@0: michael@0: packuswb xmm4, xmm0 michael@0: punpcklbw xmm4, xmm0 michael@0: michael@0: movdqa XMMWORD Ptr [rdi], xmm4 michael@0: michael@0: movdqa xmm3, xmm2 michael@0: movdqa xmm4, xmm2 michael@0: michael@0: movdqa xmm5, xmm2 michael@0: movdqa xmm6, xmm2 michael@0: michael@0: movdqa xmm7, xmm2 michael@0: michael@0: punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2 michael@0: psrldq xmm4, 1 ; xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 -1 michael@0: michael@0: pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1 michael@0: punpcklbw xmm4, xmm0 ; xx06 xx05 xx04 xx03 xx02 xx01 xx00 xx-1 michael@0: michael@0: psrldq xmm5, 2 ; xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 michael@0: pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2 michael@0: michael@0: michael@0: punpcklbw xmm5, xmm0 ; xx07 xx06 xx05 xx04 xx03 xx02 xx01 xx00 michael@0: psrldq xmm6, 3 ; xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 michael@0: michael@0: pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3 michael@0: michael@0: punpcklbw xmm6, xmm0 ; xx08 xx07 xx06 xx05 xx04 xx03 xx02 xx01 michael@0: psrldq xmm7, 4 ; xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 michael@0: michael@0: pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4 michael@0: michael@0: punpcklbw xmm7, xmm0 ; xx09 xx08 xx07 xx06 xx05 xx04 xx03 xx02 michael@0: psrldq xmm2, 5 ; xx xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 michael@0: michael@0: pmullw xmm7, [rdx+64] ; x[ 2] * h[ 2] ; Tap 5 michael@0: michael@0: punpcklbw xmm2, xmm0 ; xx0a xx09 xx08 xx07 xx06 xx05 xx04 xx03 michael@0: pmullw xmm2, [rdx+80] ; x[ 3] * h[ 3] ; Tap 6 michael@0: michael@0: michael@0: paddsw xmm4, xmm7 michael@0: paddsw xmm4, xmm5 michael@0: michael@0: paddsw xmm4, xmm3 michael@0: paddsw xmm4, xmm6 michael@0: michael@0: paddsw xmm4, xmm2 michael@0: paddsw xmm4, [GLOBAL(rd)] michael@0: michael@0: psraw xmm4, 7 michael@0: michael@0: packuswb xmm4, xmm0 michael@0: punpcklbw xmm4, xmm0 michael@0: michael@0: movdqa XMMWORD Ptr [rdi+16], xmm4 michael@0: michael@0: lea rsi, [rsi + rax] michael@0: %if ABI_IS_32BIT michael@0: add rdi, DWORD Ptr arg(5) ;[output_width] michael@0: %else michael@0: add rdi, r8 michael@0: %endif michael@0: michael@0: dec rcx michael@0: jnz .filter_block1d16_h6_sse2_rowloop ; next row michael@0: michael@0: ; begin epilog michael@0: pop rdi michael@0: pop rsi michael@0: RESTORE_GOT michael@0: RESTORE_XMM michael@0: UNSHADOW_ARGS michael@0: pop rbp michael@0: ret michael@0: michael@0: michael@0: ;void vp8_filter_block1d8_v6_sse2 michael@0: ;( michael@0: ; short *src_ptr, michael@0: ; unsigned char *output_ptr, michael@0: ; int dst_ptich, michael@0: ; unsigned int pixels_per_line, michael@0: ; unsigned int pixel_step, michael@0: ; unsigned int output_height, michael@0: ; unsigned int output_width, michael@0: ; short * vp8_filter michael@0: ;) michael@0: ;/************************************************************************************ michael@0: ; Notes: filter_block1d8_v6 applies a 6 tap filter vertically to the input pixels. The michael@0: ; input pixel array has output_height rows. michael@0: ;*************************************************************************************/ michael@0: global sym(vp8_filter_block1d8_v6_sse2) PRIVATE michael@0: sym(vp8_filter_block1d8_v6_sse2): michael@0: push rbp michael@0: mov rbp, rsp michael@0: SHADOW_ARGS_TO_STACK 8 michael@0: SAVE_XMM 7 michael@0: GET_GOT rbx michael@0: push rsi michael@0: push rdi michael@0: ; end prolog michael@0: michael@0: mov rax, arg(7) ;vp8_filter michael@0: movsxd rdx, dword ptr arg(3) ;pixels_per_line michael@0: michael@0: mov rdi, arg(1) ;output_ptr michael@0: mov rsi, arg(0) ;src_ptr michael@0: michael@0: sub rsi, rdx michael@0: sub rsi, rdx michael@0: michael@0: movsxd rcx, DWORD PTR arg(5) ;[output_height] michael@0: pxor xmm0, xmm0 ; clear xmm0 michael@0: michael@0: movdqa xmm7, XMMWORD PTR [GLOBAL(rd)] michael@0: %if ABI_IS_32BIT=0 michael@0: movsxd r8, dword ptr arg(2) ; dst_ptich michael@0: %endif michael@0: michael@0: .vp8_filter_block1d8_v6_sse2_loop: michael@0: movdqa xmm1, XMMWORD PTR [rsi] michael@0: pmullw xmm1, [rax] michael@0: michael@0: movdqa xmm2, XMMWORD PTR [rsi + rdx] michael@0: pmullw xmm2, [rax + 16] michael@0: michael@0: movdqa xmm3, XMMWORD PTR [rsi + rdx * 2] michael@0: pmullw xmm3, [rax + 32] michael@0: michael@0: movdqa xmm5, XMMWORD PTR [rsi + rdx * 4] michael@0: pmullw xmm5, [rax + 64] michael@0: michael@0: add rsi, rdx michael@0: movdqa xmm4, XMMWORD PTR [rsi + rdx * 2] michael@0: michael@0: pmullw xmm4, [rax + 48] michael@0: movdqa xmm6, XMMWORD PTR [rsi + rdx * 4] michael@0: michael@0: pmullw xmm6, [rax + 80] michael@0: michael@0: paddsw xmm2, xmm5 michael@0: paddsw xmm2, xmm3 michael@0: michael@0: paddsw xmm2, xmm1 michael@0: paddsw xmm2, xmm4 michael@0: michael@0: paddsw xmm2, xmm6 michael@0: paddsw xmm2, xmm7 michael@0: michael@0: psraw xmm2, 7 michael@0: packuswb xmm2, xmm0 ; pack and saturate michael@0: michael@0: movq QWORD PTR [rdi], xmm2 ; store the results in the destination michael@0: %if ABI_IS_32BIT michael@0: add rdi, DWORD PTR arg(2) ;[dst_ptich] michael@0: %else michael@0: add rdi, r8 michael@0: %endif michael@0: dec rcx ; decrement count michael@0: jnz .vp8_filter_block1d8_v6_sse2_loop ; next row michael@0: michael@0: ; begin epilog michael@0: pop rdi michael@0: pop rsi michael@0: RESTORE_GOT michael@0: RESTORE_XMM michael@0: UNSHADOW_ARGS michael@0: pop rbp michael@0: ret michael@0: michael@0: michael@0: ;void vp8_filter_block1d16_v6_sse2 michael@0: ;( michael@0: ; unsigned short *src_ptr, michael@0: ; unsigned char *output_ptr, michael@0: ; int dst_ptich, michael@0: ; unsigned int pixels_per_line, michael@0: ; unsigned int pixel_step, michael@0: ; unsigned int output_height, michael@0: ; unsigned int output_width, michael@0: ; const short *vp8_filter michael@0: ;) michael@0: ;/************************************************************************************ michael@0: ; Notes: filter_block1d16_v6 applies a 6 tap filter vertically to the input pixels. The michael@0: ; input pixel array has output_height rows. michael@0: ;*************************************************************************************/ michael@0: global sym(vp8_filter_block1d16_v6_sse2) PRIVATE michael@0: sym(vp8_filter_block1d16_v6_sse2): michael@0: push rbp michael@0: mov rbp, rsp michael@0: SHADOW_ARGS_TO_STACK 8 michael@0: SAVE_XMM 7 michael@0: GET_GOT rbx michael@0: push rsi michael@0: push rdi michael@0: ; end prolog michael@0: michael@0: mov rax, arg(7) ;vp8_filter michael@0: movsxd rdx, dword ptr arg(3) ;pixels_per_line michael@0: michael@0: mov rdi, arg(1) ;output_ptr michael@0: mov rsi, arg(0) ;src_ptr michael@0: michael@0: sub rsi, rdx michael@0: sub rsi, rdx michael@0: michael@0: movsxd rcx, DWORD PTR arg(5) ;[output_height] michael@0: %if ABI_IS_32BIT=0 michael@0: movsxd r8, dword ptr arg(2) ; dst_ptich michael@0: %endif michael@0: michael@0: .vp8_filter_block1d16_v6_sse2_loop: michael@0: ; The order for adding 6-tap is 2 5 3 1 4 6. Read in data in that order. michael@0: movdqa xmm1, XMMWORD PTR [rsi + rdx] ; line 2 michael@0: movdqa xmm2, XMMWORD PTR [rsi + rdx + 16] michael@0: pmullw xmm1, [rax + 16] michael@0: pmullw xmm2, [rax + 16] michael@0: michael@0: movdqa xmm3, XMMWORD PTR [rsi + rdx * 4] ; line 5 michael@0: movdqa xmm4, XMMWORD PTR [rsi + rdx * 4 + 16] michael@0: pmullw xmm3, [rax + 64] michael@0: pmullw xmm4, [rax + 64] michael@0: michael@0: movdqa xmm5, XMMWORD PTR [rsi + rdx * 2] ; line 3 michael@0: movdqa xmm6, XMMWORD PTR [rsi + rdx * 2 + 16] michael@0: pmullw xmm5, [rax + 32] michael@0: pmullw xmm6, [rax + 32] michael@0: michael@0: movdqa xmm7, XMMWORD PTR [rsi] ; line 1 michael@0: movdqa xmm0, XMMWORD PTR [rsi + 16] michael@0: pmullw xmm7, [rax] michael@0: pmullw xmm0, [rax] michael@0: michael@0: paddsw xmm1, xmm3 michael@0: paddsw xmm2, xmm4 michael@0: paddsw xmm1, xmm5 michael@0: paddsw xmm2, xmm6 michael@0: paddsw xmm1, xmm7 michael@0: paddsw xmm2, xmm0 michael@0: michael@0: add rsi, rdx michael@0: michael@0: movdqa xmm3, XMMWORD PTR [rsi + rdx * 2] ; line 4 michael@0: movdqa xmm4, XMMWORD PTR [rsi + rdx * 2 + 16] michael@0: pmullw xmm3, [rax + 48] michael@0: pmullw xmm4, [rax + 48] michael@0: michael@0: movdqa xmm5, XMMWORD PTR [rsi + rdx * 4] ; line 6 michael@0: movdqa xmm6, XMMWORD PTR [rsi + rdx * 4 + 16] michael@0: pmullw xmm5, [rax + 80] michael@0: pmullw xmm6, [rax + 80] michael@0: michael@0: movdqa xmm7, XMMWORD PTR [GLOBAL(rd)] michael@0: pxor xmm0, xmm0 ; clear xmm0 michael@0: michael@0: paddsw xmm1, xmm3 michael@0: paddsw xmm2, xmm4 michael@0: paddsw xmm1, xmm5 michael@0: paddsw xmm2, xmm6 michael@0: michael@0: paddsw xmm1, xmm7 michael@0: paddsw xmm2, xmm7 michael@0: michael@0: psraw xmm1, 7 michael@0: psraw xmm2, 7 michael@0: michael@0: packuswb xmm1, xmm2 ; pack and saturate michael@0: movdqa XMMWORD PTR [rdi], xmm1 ; store the results in the destination michael@0: %if ABI_IS_32BIT michael@0: add rdi, DWORD PTR arg(2) ;[dst_ptich] michael@0: %else michael@0: add rdi, r8 michael@0: %endif michael@0: dec rcx ; decrement count michael@0: jnz .vp8_filter_block1d16_v6_sse2_loop ; next row michael@0: michael@0: ; begin epilog michael@0: pop rdi michael@0: pop rsi michael@0: RESTORE_GOT michael@0: RESTORE_XMM michael@0: UNSHADOW_ARGS michael@0: pop rbp michael@0: ret michael@0: michael@0: michael@0: ;void vp8_filter_block1d8_h6_only_sse2 michael@0: ;( michael@0: ; unsigned char *src_ptr, michael@0: ; unsigned int src_pixels_per_line, michael@0: ; unsigned char *output_ptr, michael@0: ; int dst_ptich, michael@0: ; unsigned int output_height, michael@0: ; const short *vp8_filter michael@0: ;) michael@0: ; First-pass filter only when yoffset==0 michael@0: global sym(vp8_filter_block1d8_h6_only_sse2) PRIVATE michael@0: sym(vp8_filter_block1d8_h6_only_sse2): michael@0: push rbp michael@0: mov rbp, rsp michael@0: SHADOW_ARGS_TO_STACK 6 michael@0: SAVE_XMM 7 michael@0: GET_GOT rbx michael@0: push rsi michael@0: push rdi michael@0: ; end prolog michael@0: michael@0: mov rdx, arg(5) ;vp8_filter michael@0: mov rsi, arg(0) ;src_ptr michael@0: michael@0: mov rdi, arg(2) ;output_ptr michael@0: michael@0: movsxd rcx, dword ptr arg(4) ;output_height michael@0: movsxd rax, dword ptr arg(1) ;src_pixels_per_line ; Pitch for Source michael@0: %if ABI_IS_32BIT=0 michael@0: movsxd r8, dword ptr arg(3) ;dst_ptich michael@0: %endif michael@0: pxor xmm0, xmm0 ; clear xmm0 for unpack michael@0: michael@0: .filter_block1d8_h6_only_rowloop: michael@0: movq xmm3, MMWORD PTR [rsi - 2] michael@0: movq xmm1, MMWORD PTR [rsi + 6] michael@0: michael@0: prefetcht2 [rsi+rax-2] michael@0: michael@0: pslldq xmm1, 8 michael@0: por xmm1, xmm3 michael@0: michael@0: movdqa xmm4, xmm1 michael@0: movdqa xmm5, xmm1 michael@0: michael@0: movdqa xmm6, xmm1 michael@0: movdqa xmm7, xmm1 michael@0: michael@0: punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2 michael@0: psrldq xmm4, 1 ; xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 -1 michael@0: michael@0: pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1 michael@0: punpcklbw xmm4, xmm0 ; xx06 xx05 xx04 xx03 xx02 xx01 xx00 xx-1 michael@0: michael@0: psrldq xmm5, 2 ; xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 michael@0: pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2 michael@0: michael@0: michael@0: punpcklbw xmm5, xmm0 ; xx07 xx06 xx05 xx04 xx03 xx02 xx01 xx00 michael@0: psrldq xmm6, 3 ; xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 michael@0: michael@0: pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3 michael@0: michael@0: punpcklbw xmm6, xmm0 ; xx08 xx07 xx06 xx05 xx04 xx03 xx02 xx01 michael@0: psrldq xmm7, 4 ; xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 michael@0: michael@0: pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4 michael@0: michael@0: punpcklbw xmm7, xmm0 ; xx09 xx08 xx07 xx06 xx05 xx04 xx03 xx02 michael@0: psrldq xmm1, 5 ; xx xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 michael@0: michael@0: michael@0: pmullw xmm7, [rdx+64] ; x[ 2] * h[ 2] ; Tap 5 michael@0: michael@0: punpcklbw xmm1, xmm0 ; xx0a xx09 xx08 xx07 xx06 xx05 xx04 xx03 michael@0: pmullw xmm1, [rdx+80] ; x[ 3] * h[ 3] ; Tap 6 michael@0: michael@0: michael@0: paddsw xmm4, xmm7 michael@0: paddsw xmm4, xmm5 michael@0: michael@0: paddsw xmm4, xmm3 michael@0: paddsw xmm4, xmm6 michael@0: michael@0: paddsw xmm4, xmm1 michael@0: paddsw xmm4, [GLOBAL(rd)] michael@0: michael@0: psraw xmm4, 7 michael@0: michael@0: packuswb xmm4, xmm0 michael@0: michael@0: movq QWORD PTR [rdi], xmm4 ; store the results in the destination michael@0: lea rsi, [rsi + rax] michael@0: michael@0: %if ABI_IS_32BIT michael@0: add rdi, DWORD Ptr arg(3) ;dst_ptich michael@0: %else michael@0: add rdi, r8 michael@0: %endif michael@0: dec rcx michael@0: michael@0: jnz .filter_block1d8_h6_only_rowloop ; next row michael@0: michael@0: ; begin epilog michael@0: pop rdi michael@0: pop rsi michael@0: RESTORE_GOT michael@0: RESTORE_XMM michael@0: UNSHADOW_ARGS michael@0: pop rbp michael@0: ret michael@0: michael@0: michael@0: ;void vp8_filter_block1d16_h6_only_sse2 michael@0: ;( michael@0: ; unsigned char *src_ptr, michael@0: ; unsigned int src_pixels_per_line, michael@0: ; unsigned char *output_ptr, michael@0: ; int dst_ptich, michael@0: ; unsigned int output_height, michael@0: ; const short *vp8_filter michael@0: ;) michael@0: ; First-pass filter only when yoffset==0 michael@0: global sym(vp8_filter_block1d16_h6_only_sse2) PRIVATE michael@0: sym(vp8_filter_block1d16_h6_only_sse2): michael@0: push rbp michael@0: mov rbp, rsp michael@0: SHADOW_ARGS_TO_STACK 6 michael@0: SAVE_XMM 7 michael@0: GET_GOT rbx michael@0: push rsi michael@0: push rdi michael@0: ; end prolog michael@0: michael@0: mov rdx, arg(5) ;vp8_filter michael@0: mov rsi, arg(0) ;src_ptr michael@0: michael@0: mov rdi, arg(2) ;output_ptr michael@0: michael@0: movsxd rcx, dword ptr arg(4) ;output_height michael@0: movsxd rax, dword ptr arg(1) ;src_pixels_per_line ; Pitch for Source michael@0: %if ABI_IS_32BIT=0 michael@0: movsxd r8, dword ptr arg(3) ;dst_ptich michael@0: %endif michael@0: michael@0: pxor xmm0, xmm0 ; clear xmm0 for unpack michael@0: michael@0: .filter_block1d16_h6_only_sse2_rowloop: michael@0: movq xmm3, MMWORD PTR [rsi - 2] michael@0: movq xmm1, MMWORD PTR [rsi + 6] michael@0: michael@0: movq xmm2, MMWORD PTR [rsi +14] michael@0: pslldq xmm2, 8 michael@0: michael@0: por xmm2, xmm1 michael@0: prefetcht2 [rsi+rax-2] michael@0: michael@0: pslldq xmm1, 8 michael@0: por xmm1, xmm3 michael@0: michael@0: movdqa xmm4, xmm1 michael@0: movdqa xmm5, xmm1 michael@0: michael@0: movdqa xmm6, xmm1 michael@0: movdqa xmm7, xmm1 michael@0: michael@0: punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2 michael@0: psrldq xmm4, 1 ; xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 -1 michael@0: michael@0: pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1 michael@0: punpcklbw xmm4, xmm0 ; xx06 xx05 xx04 xx03 xx02 xx01 xx00 xx-1 michael@0: michael@0: psrldq xmm5, 2 ; xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 michael@0: pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2 michael@0: michael@0: punpcklbw xmm5, xmm0 ; xx07 xx06 xx05 xx04 xx03 xx02 xx01 xx00 michael@0: psrldq xmm6, 3 ; xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 michael@0: michael@0: pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3 michael@0: michael@0: punpcklbw xmm6, xmm0 ; xx08 xx07 xx06 xx05 xx04 xx03 xx02 xx01 michael@0: psrldq xmm7, 4 ; xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 michael@0: michael@0: pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4 michael@0: michael@0: punpcklbw xmm7, xmm0 ; xx09 xx08 xx07 xx06 xx05 xx04 xx03 xx02 michael@0: psrldq xmm1, 5 ; xx xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 michael@0: michael@0: pmullw xmm7, [rdx+64] ; x[ 2] * h[ 2] ; Tap 5 michael@0: michael@0: punpcklbw xmm1, xmm0 ; xx0a xx09 xx08 xx07 xx06 xx05 xx04 xx03 michael@0: pmullw xmm1, [rdx+80] ; x[ 3] * h[ 3] ; Tap 6 michael@0: michael@0: paddsw xmm4, xmm7 michael@0: paddsw xmm4, xmm5 michael@0: michael@0: paddsw xmm4, xmm3 michael@0: paddsw xmm4, xmm6 michael@0: michael@0: paddsw xmm4, xmm1 michael@0: paddsw xmm4, [GLOBAL(rd)] michael@0: michael@0: psraw xmm4, 7 michael@0: michael@0: packuswb xmm4, xmm0 ; lower 8 bytes michael@0: michael@0: movq QWORD Ptr [rdi], xmm4 ; store the results in the destination michael@0: michael@0: movdqa xmm3, xmm2 michael@0: movdqa xmm4, xmm2 michael@0: michael@0: movdqa xmm5, xmm2 michael@0: movdqa xmm6, xmm2 michael@0: michael@0: movdqa xmm7, xmm2 michael@0: michael@0: punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2 michael@0: psrldq xmm4, 1 ; xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 -1 michael@0: michael@0: pmullw xmm3, XMMWORD PTR [rdx] ; x[-2] * H[-2]; Tap 1 michael@0: punpcklbw xmm4, xmm0 ; xx06 xx05 xx04 xx03 xx02 xx01 xx00 xx-1 michael@0: michael@0: psrldq xmm5, 2 ; xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 michael@0: pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2 michael@0: michael@0: punpcklbw xmm5, xmm0 ; xx07 xx06 xx05 xx04 xx03 xx02 xx01 xx00 michael@0: psrldq xmm6, 3 ; xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 michael@0: michael@0: pmullw xmm5, [rdx+32] ; x[ 0] * H[ 0]; Tap 3 michael@0: michael@0: punpcklbw xmm6, xmm0 ; xx08 xx07 xx06 xx05 xx04 xx03 xx02 xx01 michael@0: psrldq xmm7, 4 ; xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 michael@0: michael@0: pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4 michael@0: michael@0: punpcklbw xmm7, xmm0 ; xx09 xx08 xx07 xx06 xx05 xx04 xx03 xx02 michael@0: psrldq xmm2, 5 ; xx xx xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 michael@0: michael@0: pmullw xmm7, [rdx+64] ; x[ 2] * h[ 2] ; Tap 5 michael@0: michael@0: punpcklbw xmm2, xmm0 ; xx0a xx09 xx08 xx07 xx06 xx05 xx04 xx03 michael@0: pmullw xmm2, [rdx+80] ; x[ 3] * h[ 3] ; Tap 6 michael@0: michael@0: paddsw xmm4, xmm7 michael@0: paddsw xmm4, xmm5 michael@0: michael@0: paddsw xmm4, xmm3 michael@0: paddsw xmm4, xmm6 michael@0: michael@0: paddsw xmm4, xmm2 michael@0: paddsw xmm4, [GLOBAL(rd)] michael@0: michael@0: psraw xmm4, 7 michael@0: michael@0: packuswb xmm4, xmm0 ; higher 8 bytes michael@0: michael@0: movq QWORD Ptr [rdi+8], xmm4 ; store the results in the destination michael@0: michael@0: lea rsi, [rsi + rax] michael@0: %if ABI_IS_32BIT michael@0: add rdi, DWORD Ptr arg(3) ;dst_ptich michael@0: %else michael@0: add rdi, r8 michael@0: %endif michael@0: michael@0: dec rcx michael@0: jnz .filter_block1d16_h6_only_sse2_rowloop ; next row michael@0: michael@0: ; begin epilog michael@0: pop rdi michael@0: pop rsi michael@0: RESTORE_GOT michael@0: RESTORE_XMM michael@0: UNSHADOW_ARGS michael@0: pop rbp michael@0: ret michael@0: michael@0: michael@0: ;void vp8_filter_block1d8_v6_only_sse2 michael@0: ;( michael@0: ; unsigned char *src_ptr, michael@0: ; unsigned int src_pixels_per_line, michael@0: ; unsigned char *output_ptr, michael@0: ; int dst_ptich, michael@0: ; unsigned int output_height, michael@0: ; const short *vp8_filter michael@0: ;) michael@0: ; Second-pass filter only when xoffset==0 michael@0: global sym(vp8_filter_block1d8_v6_only_sse2) PRIVATE michael@0: sym(vp8_filter_block1d8_v6_only_sse2): michael@0: push rbp michael@0: mov rbp, rsp michael@0: SHADOW_ARGS_TO_STACK 6 michael@0: SAVE_XMM 7 michael@0: GET_GOT rbx michael@0: push rsi michael@0: push rdi michael@0: ; end prolog michael@0: michael@0: mov rsi, arg(0) ;src_ptr michael@0: mov rdi, arg(2) ;output_ptr michael@0: michael@0: movsxd rcx, dword ptr arg(4) ;output_height michael@0: movsxd rdx, dword ptr arg(1) ;src_pixels_per_line michael@0: michael@0: mov rax, arg(5) ;vp8_filter michael@0: michael@0: pxor xmm0, xmm0 ; clear xmm0 michael@0: michael@0: movdqa xmm7, XMMWORD PTR [GLOBAL(rd)] michael@0: %if ABI_IS_32BIT=0 michael@0: movsxd r8, dword ptr arg(3) ; dst_ptich michael@0: %endif michael@0: michael@0: .vp8_filter_block1d8_v6_only_sse2_loop: michael@0: movq xmm1, MMWORD PTR [rsi] michael@0: movq xmm2, MMWORD PTR [rsi + rdx] michael@0: movq xmm3, MMWORD PTR [rsi + rdx * 2] michael@0: movq xmm5, MMWORD PTR [rsi + rdx * 4] michael@0: add rsi, rdx michael@0: movq xmm4, MMWORD PTR [rsi + rdx * 2] michael@0: movq xmm6, MMWORD PTR [rsi + rdx * 4] michael@0: michael@0: punpcklbw xmm1, xmm0 michael@0: pmullw xmm1, [rax] michael@0: michael@0: punpcklbw xmm2, xmm0 michael@0: pmullw xmm2, [rax + 16] michael@0: michael@0: punpcklbw xmm3, xmm0 michael@0: pmullw xmm3, [rax + 32] michael@0: michael@0: punpcklbw xmm5, xmm0 michael@0: pmullw xmm5, [rax + 64] michael@0: michael@0: punpcklbw xmm4, xmm0 michael@0: pmullw xmm4, [rax + 48] michael@0: michael@0: punpcklbw xmm6, xmm0 michael@0: pmullw xmm6, [rax + 80] michael@0: michael@0: paddsw xmm2, xmm5 michael@0: paddsw xmm2, xmm3 michael@0: michael@0: paddsw xmm2, xmm1 michael@0: paddsw xmm2, xmm4 michael@0: michael@0: paddsw xmm2, xmm6 michael@0: paddsw xmm2, xmm7 michael@0: michael@0: psraw xmm2, 7 michael@0: packuswb xmm2, xmm0 ; pack and saturate michael@0: michael@0: movq QWORD PTR [rdi], xmm2 ; store the results in the destination michael@0: %if ABI_IS_32BIT michael@0: add rdi, DWORD PTR arg(3) ;[dst_ptich] michael@0: %else michael@0: add rdi, r8 michael@0: %endif michael@0: dec rcx ; decrement count michael@0: jnz .vp8_filter_block1d8_v6_only_sse2_loop ; next row michael@0: michael@0: ; begin epilog michael@0: pop rdi michael@0: pop rsi michael@0: RESTORE_GOT michael@0: RESTORE_XMM michael@0: UNSHADOW_ARGS michael@0: pop rbp michael@0: ret michael@0: michael@0: michael@0: ;void vp8_unpack_block1d16_h6_sse2 michael@0: ;( michael@0: ; unsigned char *src_ptr, michael@0: ; unsigned short *output_ptr, michael@0: ; unsigned int src_pixels_per_line, michael@0: ; unsigned int output_height, michael@0: ; unsigned int output_width michael@0: ;) michael@0: global sym(vp8_unpack_block1d16_h6_sse2) PRIVATE michael@0: sym(vp8_unpack_block1d16_h6_sse2): michael@0: push rbp michael@0: mov rbp, rsp michael@0: SHADOW_ARGS_TO_STACK 5 michael@0: GET_GOT rbx michael@0: push rsi michael@0: push rdi michael@0: ; end prolog michael@0: michael@0: mov rsi, arg(0) ;src_ptr michael@0: mov rdi, arg(1) ;output_ptr michael@0: michael@0: movsxd rcx, dword ptr arg(3) ;output_height michael@0: movsxd rax, dword ptr arg(2) ;src_pixels_per_line ; Pitch for Source michael@0: michael@0: pxor xmm0, xmm0 ; clear xmm0 for unpack michael@0: %if ABI_IS_32BIT=0 michael@0: movsxd r8, dword ptr arg(4) ;output_width ; Pitch for Source michael@0: %endif michael@0: michael@0: .unpack_block1d16_h6_sse2_rowloop: michael@0: movq xmm1, MMWORD PTR [rsi] ; 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 -1 -2 michael@0: movq xmm3, MMWORD PTR [rsi+8] ; make copy of xmm1 michael@0: michael@0: punpcklbw xmm3, xmm0 ; xx05 xx04 xx03 xx02 xx01 xx01 xx-1 xx-2 michael@0: punpcklbw xmm1, xmm0 michael@0: michael@0: movdqa XMMWORD Ptr [rdi], xmm1 michael@0: movdqa XMMWORD Ptr [rdi + 16], xmm3 michael@0: michael@0: lea rsi, [rsi + rax] michael@0: %if ABI_IS_32BIT michael@0: add rdi, DWORD Ptr arg(4) ;[output_width] michael@0: %else michael@0: add rdi, r8 michael@0: %endif michael@0: dec rcx michael@0: jnz .unpack_block1d16_h6_sse2_rowloop ; next row michael@0: michael@0: ; begin epilog michael@0: pop rdi michael@0: pop rsi michael@0: RESTORE_GOT michael@0: UNSHADOW_ARGS michael@0: pop rbp michael@0: ret michael@0: michael@0: michael@0: ;void vp8_bilinear_predict16x16_sse2 michael@0: ;( michael@0: ; unsigned char *src_ptr, michael@0: ; int src_pixels_per_line, michael@0: ; int xoffset, michael@0: ; int yoffset, michael@0: ; unsigned char *dst_ptr, michael@0: ; int dst_pitch michael@0: ;) michael@0: extern sym(vp8_bilinear_filters_x86_8) michael@0: global sym(vp8_bilinear_predict16x16_sse2) PRIVATE michael@0: sym(vp8_bilinear_predict16x16_sse2): michael@0: push rbp michael@0: mov rbp, rsp michael@0: SHADOW_ARGS_TO_STACK 6 michael@0: SAVE_XMM 7 michael@0: GET_GOT rbx michael@0: push rsi michael@0: push rdi michael@0: ; end prolog michael@0: michael@0: ;const short *HFilter = vp8_bilinear_filters_x86_8[xoffset] michael@0: ;const short *VFilter = vp8_bilinear_filters_x86_8[yoffset] michael@0: michael@0: lea rcx, [GLOBAL(sym(vp8_bilinear_filters_x86_8))] michael@0: movsxd rax, dword ptr arg(2) ;xoffset michael@0: michael@0: cmp rax, 0 ;skip first_pass filter if xoffset=0 michael@0: je .b16x16_sp_only michael@0: michael@0: shl rax, 5 michael@0: add rax, rcx ;HFilter michael@0: michael@0: mov rdi, arg(4) ;dst_ptr michael@0: mov rsi, arg(0) ;src_ptr michael@0: movsxd rdx, dword ptr arg(5) ;dst_pitch michael@0: michael@0: movdqa xmm1, [rax] michael@0: movdqa xmm2, [rax+16] michael@0: michael@0: movsxd rax, dword ptr arg(3) ;yoffset michael@0: michael@0: cmp rax, 0 ;skip second_pass filter if yoffset=0 michael@0: je .b16x16_fp_only michael@0: michael@0: shl rax, 5 michael@0: add rax, rcx ;VFilter michael@0: michael@0: lea rcx, [rdi+rdx*8] michael@0: lea rcx, [rcx+rdx*8] michael@0: movsxd rdx, dword ptr arg(1) ;src_pixels_per_line michael@0: michael@0: pxor xmm0, xmm0 michael@0: michael@0: %if ABI_IS_32BIT=0 michael@0: movsxd r8, dword ptr arg(5) ;dst_pitch michael@0: %endif michael@0: ; get the first horizontal line done michael@0: movdqu xmm3, [rsi] ; xx 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 michael@0: movdqa xmm4, xmm3 ; make a copy of current line michael@0: michael@0: punpcklbw xmm3, xmm0 ; xx 00 01 02 03 04 05 06 michael@0: punpckhbw xmm4, xmm0 michael@0: michael@0: pmullw xmm3, xmm1 michael@0: pmullw xmm4, xmm1 michael@0: michael@0: movdqu xmm5, [rsi+1] michael@0: movdqa xmm6, xmm5 michael@0: michael@0: punpcklbw xmm5, xmm0 michael@0: punpckhbw xmm6, xmm0 michael@0: michael@0: pmullw xmm5, xmm2 michael@0: pmullw xmm6, xmm2 michael@0: michael@0: paddw xmm3, xmm5 michael@0: paddw xmm4, xmm6 michael@0: michael@0: paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value michael@0: psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128 michael@0: michael@0: paddw xmm4, [GLOBAL(rd)] michael@0: psraw xmm4, VP8_FILTER_SHIFT michael@0: michael@0: movdqa xmm7, xmm3 michael@0: packuswb xmm7, xmm4 michael@0: michael@0: add rsi, rdx ; next line michael@0: .next_row: michael@0: movdqu xmm3, [rsi] ; xx 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 michael@0: movdqa xmm4, xmm3 ; make a copy of current line michael@0: michael@0: punpcklbw xmm3, xmm0 ; xx 00 01 02 03 04 05 06 michael@0: punpckhbw xmm4, xmm0 michael@0: michael@0: pmullw xmm3, xmm1 michael@0: pmullw xmm4, xmm1 michael@0: michael@0: movdqu xmm5, [rsi+1] michael@0: movdqa xmm6, xmm5 michael@0: michael@0: punpcklbw xmm5, xmm0 michael@0: punpckhbw xmm6, xmm0 michael@0: michael@0: pmullw xmm5, xmm2 michael@0: pmullw xmm6, xmm2 michael@0: michael@0: paddw xmm3, xmm5 michael@0: paddw xmm4, xmm6 michael@0: michael@0: movdqa xmm5, xmm7 michael@0: movdqa xmm6, xmm7 michael@0: michael@0: punpcklbw xmm5, xmm0 michael@0: punpckhbw xmm6, xmm0 michael@0: michael@0: pmullw xmm5, [rax] michael@0: pmullw xmm6, [rax] michael@0: michael@0: paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value michael@0: psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128 michael@0: michael@0: paddw xmm4, [GLOBAL(rd)] michael@0: psraw xmm4, VP8_FILTER_SHIFT michael@0: michael@0: movdqa xmm7, xmm3 michael@0: packuswb xmm7, xmm4 michael@0: michael@0: pmullw xmm3, [rax+16] michael@0: pmullw xmm4, [rax+16] michael@0: michael@0: paddw xmm3, xmm5 michael@0: paddw xmm4, xmm6 michael@0: michael@0: paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value michael@0: psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128 michael@0: michael@0: paddw xmm4, [GLOBAL(rd)] michael@0: psraw xmm4, VP8_FILTER_SHIFT michael@0: michael@0: packuswb xmm3, xmm4 michael@0: movdqa [rdi], xmm3 ; store the results in the destination michael@0: michael@0: add rsi, rdx ; next line michael@0: %if ABI_IS_32BIT michael@0: add rdi, DWORD PTR arg(5) ;dst_pitch michael@0: %else michael@0: add rdi, r8 michael@0: %endif michael@0: michael@0: cmp rdi, rcx michael@0: jne .next_row michael@0: michael@0: jmp .done michael@0: michael@0: .b16x16_sp_only: michael@0: movsxd rax, dword ptr arg(3) ;yoffset michael@0: shl rax, 5 michael@0: add rax, rcx ;VFilter michael@0: michael@0: mov rdi, arg(4) ;dst_ptr michael@0: mov rsi, arg(0) ;src_ptr michael@0: movsxd rdx, dword ptr arg(5) ;dst_pitch michael@0: michael@0: movdqa xmm1, [rax] michael@0: movdqa xmm2, [rax+16] michael@0: michael@0: lea rcx, [rdi+rdx*8] michael@0: lea rcx, [rcx+rdx*8] michael@0: movsxd rax, dword ptr arg(1) ;src_pixels_per_line michael@0: michael@0: pxor xmm0, xmm0 michael@0: michael@0: ; get the first horizontal line done michael@0: movdqu xmm7, [rsi] ; xx 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 michael@0: michael@0: add rsi, rax ; next line michael@0: .next_row_spo: michael@0: movdqu xmm3, [rsi] ; xx 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 michael@0: michael@0: movdqa xmm5, xmm7 michael@0: movdqa xmm6, xmm7 michael@0: michael@0: movdqa xmm4, xmm3 ; make a copy of current line michael@0: movdqa xmm7, xmm3 michael@0: michael@0: punpcklbw xmm5, xmm0 michael@0: punpckhbw xmm6, xmm0 michael@0: punpcklbw xmm3, xmm0 ; xx 00 01 02 03 04 05 06 michael@0: punpckhbw xmm4, xmm0 michael@0: michael@0: pmullw xmm5, xmm1 michael@0: pmullw xmm6, xmm1 michael@0: pmullw xmm3, xmm2 michael@0: pmullw xmm4, xmm2 michael@0: michael@0: paddw xmm3, xmm5 michael@0: paddw xmm4, xmm6 michael@0: michael@0: paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value michael@0: psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128 michael@0: michael@0: paddw xmm4, [GLOBAL(rd)] michael@0: psraw xmm4, VP8_FILTER_SHIFT michael@0: michael@0: packuswb xmm3, xmm4 michael@0: movdqa [rdi], xmm3 ; store the results in the destination michael@0: michael@0: add rsi, rax ; next line michael@0: add rdi, rdx ;dst_pitch michael@0: cmp rdi, rcx michael@0: jne .next_row_spo michael@0: michael@0: jmp .done michael@0: michael@0: .b16x16_fp_only: michael@0: lea rcx, [rdi+rdx*8] michael@0: lea rcx, [rcx+rdx*8] michael@0: movsxd rax, dword ptr arg(1) ;src_pixels_per_line michael@0: pxor xmm0, xmm0 michael@0: michael@0: .next_row_fpo: michael@0: movdqu xmm3, [rsi] ; xx 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 michael@0: movdqa xmm4, xmm3 ; make a copy of current line michael@0: michael@0: punpcklbw xmm3, xmm0 ; xx 00 01 02 03 04 05 06 michael@0: punpckhbw xmm4, xmm0 michael@0: michael@0: pmullw xmm3, xmm1 michael@0: pmullw xmm4, xmm1 michael@0: michael@0: movdqu xmm5, [rsi+1] michael@0: movdqa xmm6, xmm5 michael@0: michael@0: punpcklbw xmm5, xmm0 michael@0: punpckhbw xmm6, xmm0 michael@0: michael@0: pmullw xmm5, xmm2 michael@0: pmullw xmm6, xmm2 michael@0: michael@0: paddw xmm3, xmm5 michael@0: paddw xmm4, xmm6 michael@0: michael@0: paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value michael@0: psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128 michael@0: michael@0: paddw xmm4, [GLOBAL(rd)] michael@0: psraw xmm4, VP8_FILTER_SHIFT michael@0: michael@0: packuswb xmm3, xmm4 michael@0: movdqa [rdi], xmm3 ; store the results in the destination michael@0: michael@0: add rsi, rax ; next line michael@0: add rdi, rdx ; dst_pitch michael@0: cmp rdi, rcx michael@0: jne .next_row_fpo michael@0: michael@0: .done: michael@0: ; begin epilog michael@0: pop rdi michael@0: pop rsi michael@0: RESTORE_GOT michael@0: RESTORE_XMM michael@0: UNSHADOW_ARGS michael@0: pop rbp michael@0: ret michael@0: michael@0: michael@0: ;void vp8_bilinear_predict8x8_sse2 michael@0: ;( michael@0: ; unsigned char *src_ptr, michael@0: ; int src_pixels_per_line, michael@0: ; int xoffset, michael@0: ; int yoffset, michael@0: ; unsigned char *dst_ptr, michael@0: ; int dst_pitch michael@0: ;) michael@0: global sym(vp8_bilinear_predict8x8_sse2) PRIVATE michael@0: sym(vp8_bilinear_predict8x8_sse2): michael@0: push rbp michael@0: mov rbp, rsp michael@0: SHADOW_ARGS_TO_STACK 6 michael@0: SAVE_XMM 7 michael@0: GET_GOT rbx michael@0: push rsi michael@0: push rdi michael@0: ; end prolog michael@0: michael@0: ALIGN_STACK 16, rax michael@0: sub rsp, 144 ; reserve 144 bytes michael@0: michael@0: ;const short *HFilter = vp8_bilinear_filters_x86_8[xoffset] michael@0: ;const short *VFilter = vp8_bilinear_filters_x86_8[yoffset] michael@0: lea rcx, [GLOBAL(sym(vp8_bilinear_filters_x86_8))] michael@0: michael@0: mov rsi, arg(0) ;src_ptr michael@0: movsxd rdx, dword ptr arg(1) ;src_pixels_per_line michael@0: michael@0: ;Read 9-line unaligned data in and put them on stack. This gives a big michael@0: ;performance boost. michael@0: movdqu xmm0, [rsi] michael@0: lea rax, [rdx + rdx*2] michael@0: movdqu xmm1, [rsi+rdx] michael@0: movdqu xmm2, [rsi+rdx*2] michael@0: add rsi, rax michael@0: movdqu xmm3, [rsi] michael@0: movdqu xmm4, [rsi+rdx] michael@0: movdqu xmm5, [rsi+rdx*2] michael@0: add rsi, rax michael@0: movdqu xmm6, [rsi] michael@0: movdqu xmm7, [rsi+rdx] michael@0: michael@0: movdqa XMMWORD PTR [rsp], xmm0 michael@0: michael@0: movdqu xmm0, [rsi+rdx*2] michael@0: michael@0: movdqa XMMWORD PTR [rsp+16], xmm1 michael@0: movdqa XMMWORD PTR [rsp+32], xmm2 michael@0: movdqa XMMWORD PTR [rsp+48], xmm3 michael@0: movdqa XMMWORD PTR [rsp+64], xmm4 michael@0: movdqa XMMWORD PTR [rsp+80], xmm5 michael@0: movdqa XMMWORD PTR [rsp+96], xmm6 michael@0: movdqa XMMWORD PTR [rsp+112], xmm7 michael@0: movdqa XMMWORD PTR [rsp+128], xmm0 michael@0: michael@0: movsxd rax, dword ptr arg(2) ;xoffset michael@0: shl rax, 5 michael@0: add rax, rcx ;HFilter michael@0: michael@0: mov rdi, arg(4) ;dst_ptr michael@0: movsxd rdx, dword ptr arg(5) ;dst_pitch michael@0: michael@0: movdqa xmm1, [rax] michael@0: movdqa xmm2, [rax+16] michael@0: michael@0: movsxd rax, dword ptr arg(3) ;yoffset michael@0: shl rax, 5 michael@0: add rax, rcx ;VFilter michael@0: michael@0: lea rcx, [rdi+rdx*8] michael@0: michael@0: movdqa xmm5, [rax] michael@0: movdqa xmm6, [rax+16] michael@0: michael@0: pxor xmm0, xmm0 michael@0: michael@0: ; get the first horizontal line done michael@0: movdqa xmm3, XMMWORD PTR [rsp] michael@0: movdqa xmm4, xmm3 ; make a copy of current line michael@0: psrldq xmm4, 1 michael@0: michael@0: punpcklbw xmm3, xmm0 ; 00 01 02 03 04 05 06 07 michael@0: punpcklbw xmm4, xmm0 ; 01 02 03 04 05 06 07 08 michael@0: michael@0: pmullw xmm3, xmm1 michael@0: pmullw xmm4, xmm2 michael@0: michael@0: paddw xmm3, xmm4 michael@0: michael@0: paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value michael@0: psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128 michael@0: michael@0: movdqa xmm7, xmm3 michael@0: add rsp, 16 ; next line michael@0: .next_row8x8: michael@0: movdqa xmm3, XMMWORD PTR [rsp] ; 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 michael@0: movdqa xmm4, xmm3 ; make a copy of current line michael@0: psrldq xmm4, 1 michael@0: michael@0: punpcklbw xmm3, xmm0 ; 00 01 02 03 04 05 06 07 michael@0: punpcklbw xmm4, xmm0 ; 01 02 03 04 05 06 07 08 michael@0: michael@0: pmullw xmm3, xmm1 michael@0: pmullw xmm4, xmm2 michael@0: michael@0: paddw xmm3, xmm4 michael@0: pmullw xmm7, xmm5 michael@0: michael@0: paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value michael@0: psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128 michael@0: michael@0: movdqa xmm4, xmm3 michael@0: michael@0: pmullw xmm3, xmm6 michael@0: paddw xmm3, xmm7 michael@0: michael@0: movdqa xmm7, xmm4 michael@0: michael@0: paddw xmm3, [GLOBAL(rd)] ; xmm3 += round value michael@0: psraw xmm3, VP8_FILTER_SHIFT ; xmm3 /= 128 michael@0: michael@0: packuswb xmm3, xmm0 michael@0: movq [rdi], xmm3 ; store the results in the destination michael@0: michael@0: add rsp, 16 ; next line michael@0: add rdi, rdx michael@0: michael@0: cmp rdi, rcx michael@0: jne .next_row8x8 michael@0: michael@0: ;add rsp, 144 michael@0: pop rsp michael@0: ; begin epilog michael@0: pop rdi michael@0: pop rsi michael@0: RESTORE_GOT michael@0: RESTORE_XMM michael@0: UNSHADOW_ARGS michael@0: pop rbp michael@0: ret michael@0: michael@0: michael@0: SECTION_RODATA michael@0: align 16 michael@0: rd: michael@0: times 8 dw 0x40