michael@0: ; michael@0: ; Copyright (c) 2010 The WebM project authors. All Rights Reserved. michael@0: ; michael@0: ; Use of this source code is governed by a BSD-style license michael@0: ; that can be found in the LICENSE file in the root of the source michael@0: ; tree. An additional intellectual property rights grant can be found michael@0: ; in the file PATENTS. All contributing project authors may michael@0: ; be found in the AUTHORS file in the root of the source tree. michael@0: ; michael@0: michael@0: %include "third_party/x86inc/x86inc.asm" michael@0: michael@0: SECTION .text michael@0: michael@0: %macro SAD_FN 4 michael@0: %if %4 == 0 michael@0: %if %3 == 5 michael@0: cglobal sad%1x%2, 4, %3, 5, src, src_stride, ref, ref_stride, n_rows michael@0: %else ; %3 == 7 michael@0: cglobal sad%1x%2, 4, %3, 5, src, src_stride, ref, ref_stride, \ michael@0: src_stride3, ref_stride3, n_rows michael@0: %endif ; %3 == 5/7 michael@0: %else ; avg michael@0: %if %3 == 5 michael@0: cglobal sad%1x%2_avg, 5, 1 + %3, 5, src, src_stride, ref, ref_stride, \ michael@0: second_pred, n_rows michael@0: %else ; %3 == 7 michael@0: cglobal sad%1x%2_avg, 5, ARCH_X86_64 + %3, 5, src, src_stride, \ michael@0: ref, ref_stride, \ michael@0: second_pred, \ michael@0: src_stride3, ref_stride3 michael@0: %if ARCH_X86_64 michael@0: %define n_rowsd r7d michael@0: %else ; x86-32 michael@0: %define n_rowsd dword r0m michael@0: %endif ; x86-32/64 michael@0: %endif ; %3 == 5/7 michael@0: %endif ; avg/sad michael@0: movsxdifnidn src_strideq, src_strided michael@0: movsxdifnidn ref_strideq, ref_strided michael@0: %if %3 == 7 michael@0: lea src_stride3q, [src_strideq*3] michael@0: lea ref_stride3q, [ref_strideq*3] michael@0: %endif ; %3 == 7 michael@0: %endmacro michael@0: michael@0: ; unsigned int vp9_sad64x64_sse2(uint8_t *src, int src_stride, michael@0: ; uint8_t *ref, int ref_stride); michael@0: %macro SAD64XN 1-2 0 michael@0: SAD_FN 64, %1, 5, %2 michael@0: mov n_rowsd, %1 michael@0: pxor m0, m0 michael@0: .loop: michael@0: movu m1, [refq] michael@0: movu m2, [refq+16] michael@0: movu m3, [refq+32] michael@0: movu m4, [refq+48] michael@0: %if %2 == 1 michael@0: pavgb m1, [second_predq+mmsize*0] michael@0: pavgb m2, [second_predq+mmsize*1] michael@0: pavgb m3, [second_predq+mmsize*2] michael@0: pavgb m4, [second_predq+mmsize*3] michael@0: lea second_predq, [second_predq+mmsize*4] michael@0: %endif michael@0: psadbw m1, [srcq] michael@0: psadbw m2, [srcq+16] michael@0: psadbw m3, [srcq+32] michael@0: psadbw m4, [srcq+48] michael@0: paddd m1, m2 michael@0: paddd m3, m4 michael@0: add refq, ref_strideq michael@0: paddd m0, m1 michael@0: add srcq, src_strideq michael@0: paddd m0, m3 michael@0: dec n_rowsd michael@0: jg .loop michael@0: michael@0: movhlps m1, m0 michael@0: paddd m0, m1 michael@0: movd eax, m0 michael@0: RET michael@0: %endmacro michael@0: michael@0: INIT_XMM sse2 michael@0: SAD64XN 64 ; sad64x64_sse2 michael@0: SAD64XN 32 ; sad64x32_sse2 michael@0: SAD64XN 64, 1 ; sad64x64_avg_sse2 michael@0: SAD64XN 32, 1 ; sad64x32_avg_sse2 michael@0: michael@0: ; unsigned int vp9_sad32x32_sse2(uint8_t *src, int src_stride, michael@0: ; uint8_t *ref, int ref_stride); michael@0: %macro SAD32XN 1-2 0 michael@0: SAD_FN 32, %1, 5, %2 michael@0: mov n_rowsd, %1/2 michael@0: pxor m0, m0 michael@0: .loop: michael@0: movu m1, [refq] michael@0: movu m2, [refq+16] michael@0: movu m3, [refq+ref_strideq] michael@0: movu m4, [refq+ref_strideq+16] michael@0: %if %2 == 1 michael@0: pavgb m1, [second_predq+mmsize*0] michael@0: pavgb m2, [second_predq+mmsize*1] michael@0: pavgb m3, [second_predq+mmsize*2] michael@0: pavgb m4, [second_predq+mmsize*3] michael@0: lea second_predq, [second_predq+mmsize*4] michael@0: %endif michael@0: psadbw m1, [srcq] michael@0: psadbw m2, [srcq+16] michael@0: psadbw m3, [srcq+src_strideq] michael@0: psadbw m4, [srcq+src_strideq+16] michael@0: paddd m1, m2 michael@0: paddd m3, m4 michael@0: lea refq, [refq+ref_strideq*2] michael@0: paddd m0, m1 michael@0: lea srcq, [srcq+src_strideq*2] michael@0: paddd m0, m3 michael@0: dec n_rowsd michael@0: jg .loop michael@0: michael@0: movhlps m1, m0 michael@0: paddd m0, m1 michael@0: movd eax, m0 michael@0: RET michael@0: %endmacro michael@0: michael@0: INIT_XMM sse2 michael@0: SAD32XN 64 ; sad32x64_sse2 michael@0: SAD32XN 32 ; sad32x32_sse2 michael@0: SAD32XN 16 ; sad32x16_sse2 michael@0: SAD32XN 64, 1 ; sad32x64_avg_sse2 michael@0: SAD32XN 32, 1 ; sad32x32_avg_sse2 michael@0: SAD32XN 16, 1 ; sad32x16_avg_sse2 michael@0: michael@0: ; unsigned int vp9_sad16x{8,16}_sse2(uint8_t *src, int src_stride, michael@0: ; uint8_t *ref, int ref_stride); michael@0: %macro SAD16XN 1-2 0 michael@0: SAD_FN 16, %1, 7, %2 michael@0: mov n_rowsd, %1/4 michael@0: pxor m0, m0 michael@0: michael@0: .loop: michael@0: movu m1, [refq] michael@0: movu m2, [refq+ref_strideq] michael@0: movu m3, [refq+ref_strideq*2] michael@0: movu m4, [refq+ref_stride3q] michael@0: %if %2 == 1 michael@0: pavgb m1, [second_predq+mmsize*0] michael@0: pavgb m2, [second_predq+mmsize*1] michael@0: pavgb m3, [second_predq+mmsize*2] michael@0: pavgb m4, [second_predq+mmsize*3] michael@0: lea second_predq, [second_predq+mmsize*4] michael@0: %endif michael@0: psadbw m1, [srcq] michael@0: psadbw m2, [srcq+src_strideq] michael@0: psadbw m3, [srcq+src_strideq*2] michael@0: psadbw m4, [srcq+src_stride3q] michael@0: paddd m1, m2 michael@0: paddd m3, m4 michael@0: lea refq, [refq+ref_strideq*4] michael@0: paddd m0, m1 michael@0: lea srcq, [srcq+src_strideq*4] michael@0: paddd m0, m3 michael@0: dec n_rowsd michael@0: jg .loop michael@0: michael@0: movhlps m1, m0 michael@0: paddd m0, m1 michael@0: movd eax, m0 michael@0: RET michael@0: %endmacro michael@0: michael@0: INIT_XMM sse2 michael@0: SAD16XN 32 ; sad16x32_sse2 michael@0: SAD16XN 16 ; sad16x16_sse2 michael@0: SAD16XN 8 ; sad16x8_sse2 michael@0: SAD16XN 32, 1 ; sad16x32_avg_sse2 michael@0: SAD16XN 16, 1 ; sad16x16_avg_sse2 michael@0: SAD16XN 8, 1 ; sad16x8_avg_sse2 michael@0: michael@0: ; unsigned int vp9_sad8x{8,16}_sse2(uint8_t *src, int src_stride, michael@0: ; uint8_t *ref, int ref_stride); michael@0: %macro SAD8XN 1-2 0 michael@0: SAD_FN 8, %1, 7, %2 michael@0: mov n_rowsd, %1/4 michael@0: pxor m0, m0 michael@0: michael@0: .loop: michael@0: movh m1, [refq] michael@0: movhps m1, [refq+ref_strideq] michael@0: movh m2, [refq+ref_strideq*2] michael@0: movhps m2, [refq+ref_stride3q] michael@0: %if %2 == 1 michael@0: pavgb m1, [second_predq+mmsize*0] michael@0: pavgb m2, [second_predq+mmsize*1] michael@0: lea second_predq, [second_predq+mmsize*2] michael@0: %endif michael@0: movh m3, [srcq] michael@0: movhps m3, [srcq+src_strideq] michael@0: movh m4, [srcq+src_strideq*2] michael@0: movhps m4, [srcq+src_stride3q] michael@0: psadbw m1, m3 michael@0: psadbw m2, m4 michael@0: lea refq, [refq+ref_strideq*4] michael@0: paddd m0, m1 michael@0: lea srcq, [srcq+src_strideq*4] michael@0: paddd m0, m2 michael@0: dec n_rowsd michael@0: jg .loop michael@0: michael@0: movhlps m1, m0 michael@0: paddd m0, m1 michael@0: movd eax, m0 michael@0: RET michael@0: %endmacro michael@0: michael@0: INIT_XMM sse2 michael@0: SAD8XN 16 ; sad8x16_sse2 michael@0: SAD8XN 8 ; sad8x8_sse2 michael@0: SAD8XN 4 ; sad8x4_sse2 michael@0: SAD8XN 16, 1 ; sad8x16_avg_sse2 michael@0: SAD8XN 8, 1 ; sad8x8_avg_sse2 michael@0: SAD8XN 4, 1 ; sad8x4_avg_sse2 michael@0: michael@0: ; unsigned int vp9_sad4x{4, 8}_sse(uint8_t *src, int src_stride, michael@0: ; uint8_t *ref, int ref_stride); michael@0: %macro SAD4XN 1-2 0 michael@0: SAD_FN 4, %1, 7, %2 michael@0: mov n_rowsd, %1/4 michael@0: pxor m0, m0 michael@0: michael@0: .loop: michael@0: movd m1, [refq] michael@0: movd m2, [refq+ref_strideq] michael@0: movd m3, [refq+ref_strideq*2] michael@0: movd m4, [refq+ref_stride3q] michael@0: punpckldq m1, m2 michael@0: punpckldq m3, m4 michael@0: %if %2 == 1 michael@0: pavgb m1, [second_predq+mmsize*0] michael@0: pavgb m3, [second_predq+mmsize*1] michael@0: lea second_predq, [second_predq+mmsize*2] michael@0: %endif michael@0: movd m2, [srcq] michael@0: movd m5, [srcq+src_strideq] michael@0: movd m4, [srcq+src_strideq*2] michael@0: movd m6, [srcq+src_stride3q] michael@0: punpckldq m2, m5 michael@0: punpckldq m4, m6 michael@0: psadbw m1, m2 michael@0: psadbw m3, m4 michael@0: lea refq, [refq+ref_strideq*4] michael@0: paddd m0, m1 michael@0: lea srcq, [srcq+src_strideq*4] michael@0: paddd m0, m3 michael@0: dec n_rowsd michael@0: jg .loop michael@0: michael@0: movd eax, m0 michael@0: RET michael@0: %endmacro michael@0: michael@0: INIT_MMX sse michael@0: SAD4XN 8 ; sad4x8_sse michael@0: SAD4XN 4 ; sad4x4_sse michael@0: SAD4XN 8, 1 ; sad4x8_avg_sse michael@0: SAD4XN 4, 1 ; sad4x4_avg_sse