michael@0: ; michael@0: ; Copyright (c) 2010 The WebM project authors. All Rights Reserved. michael@0: ; michael@0: ; Use of this source code is governed by a BSD-style license michael@0: ; that can be found in the LICENSE file in the root of the source michael@0: ; tree. An additional intellectual property rights grant can be found michael@0: ; in the file PATENTS. All contributing project authors may michael@0: ; be found in the AUTHORS file in the root of the source tree. michael@0: ; michael@0: michael@0: %include "third_party/x86inc/x86inc.asm" michael@0: michael@0: SECTION .text michael@0: michael@0: ; PROCESS_4x2x4 first, off_{first,second}_{src,ref}, advance_at_end michael@0: %macro PROCESS_4x2x4 5-6 0 michael@0: movd m0, [srcq +%2] michael@0: %if %1 == 1 michael@0: movd m6, [ref1q+%3] michael@0: movd m4, [ref2q+%3] michael@0: movd m7, [ref3q+%3] michael@0: movd m5, [ref4q+%3] michael@0: punpckldq m0, [srcq +%4] michael@0: punpckldq m6, [ref1q+%5] michael@0: punpckldq m4, [ref2q+%5] michael@0: punpckldq m7, [ref3q+%5] michael@0: punpckldq m5, [ref4q+%5] michael@0: psadbw m6, m0 michael@0: psadbw m4, m0 michael@0: psadbw m7, m0 michael@0: psadbw m5, m0 michael@0: punpckldq m6, m4 michael@0: punpckldq m7, m5 michael@0: %else michael@0: movd m1, [ref1q+%3] michael@0: movd m2, [ref2q+%3] michael@0: movd m3, [ref3q+%3] michael@0: movd m4, [ref4q+%3] michael@0: punpckldq m0, [srcq +%4] michael@0: punpckldq m1, [ref1q+%5] michael@0: punpckldq m2, [ref2q+%5] michael@0: punpckldq m3, [ref3q+%5] michael@0: punpckldq m4, [ref4q+%5] michael@0: psadbw m1, m0 michael@0: psadbw m2, m0 michael@0: psadbw m3, m0 michael@0: psadbw m4, m0 michael@0: punpckldq m1, m2 michael@0: punpckldq m3, m4 michael@0: paddd m6, m1 michael@0: paddd m7, m3 michael@0: %endif michael@0: %if %6 == 1 michael@0: lea srcq, [srcq +src_strideq*2] michael@0: lea ref1q, [ref1q+ref_strideq*2] michael@0: lea ref2q, [ref2q+ref_strideq*2] michael@0: lea ref3q, [ref3q+ref_strideq*2] michael@0: lea ref4q, [ref4q+ref_strideq*2] michael@0: %endif michael@0: %endmacro michael@0: michael@0: ; PROCESS_8x2x4 first, off_{first,second}_{src,ref}, advance_at_end michael@0: %macro PROCESS_8x2x4 5-6 0 michael@0: movh m0, [srcq +%2] michael@0: %if %1 == 1 michael@0: movh m4, [ref1q+%3] michael@0: movh m5, [ref2q+%3] michael@0: movh m6, [ref3q+%3] michael@0: movh m7, [ref4q+%3] michael@0: movhps m0, [srcq +%4] michael@0: movhps m4, [ref1q+%5] michael@0: movhps m5, [ref2q+%5] michael@0: movhps m6, [ref3q+%5] michael@0: movhps m7, [ref4q+%5] michael@0: psadbw m4, m0 michael@0: psadbw m5, m0 michael@0: psadbw m6, m0 michael@0: psadbw m7, m0 michael@0: %else michael@0: movh m1, [ref1q+%3] michael@0: movh m2, [ref2q+%3] michael@0: movh m3, [ref3q+%3] michael@0: movhps m0, [srcq +%4] michael@0: movhps m1, [ref1q+%5] michael@0: movhps m2, [ref2q+%5] michael@0: movhps m3, [ref3q+%5] michael@0: psadbw m1, m0 michael@0: psadbw m2, m0 michael@0: psadbw m3, m0 michael@0: paddd m4, m1 michael@0: movh m1, [ref4q+%3] michael@0: movhps m1, [ref4q+%5] michael@0: paddd m5, m2 michael@0: paddd m6, m3 michael@0: psadbw m1, m0 michael@0: paddd m7, m1 michael@0: %endif michael@0: %if %6 == 1 michael@0: lea srcq, [srcq +src_strideq*2] michael@0: lea ref1q, [ref1q+ref_strideq*2] michael@0: lea ref2q, [ref2q+ref_strideq*2] michael@0: lea ref3q, [ref3q+ref_strideq*2] michael@0: lea ref4q, [ref4q+ref_strideq*2] michael@0: %endif michael@0: %endmacro michael@0: michael@0: ; PROCESS_16x2x4 first, off_{first,second}_{src,ref}, advance_at_end michael@0: %macro PROCESS_16x2x4 5-6 0 michael@0: ; 1st 16 px michael@0: mova m0, [srcq +%2] michael@0: %if %1 == 1 michael@0: movu m4, [ref1q+%3] michael@0: movu m5, [ref2q+%3] michael@0: movu m6, [ref3q+%3] michael@0: movu m7, [ref4q+%3] michael@0: psadbw m4, m0 michael@0: psadbw m5, m0 michael@0: psadbw m6, m0 michael@0: psadbw m7, m0 michael@0: %else michael@0: movu m1, [ref1q+%3] michael@0: movu m2, [ref2q+%3] michael@0: movu m3, [ref3q+%3] michael@0: psadbw m1, m0 michael@0: psadbw m2, m0 michael@0: psadbw m3, m0 michael@0: paddd m4, m1 michael@0: movu m1, [ref4q+%3] michael@0: paddd m5, m2 michael@0: paddd m6, m3 michael@0: psadbw m1, m0 michael@0: paddd m7, m1 michael@0: %endif michael@0: michael@0: ; 2nd 16 px michael@0: mova m0, [srcq +%4] michael@0: movu m1, [ref1q+%5] michael@0: movu m2, [ref2q+%5] michael@0: movu m3, [ref3q+%5] michael@0: psadbw m1, m0 michael@0: psadbw m2, m0 michael@0: psadbw m3, m0 michael@0: paddd m4, m1 michael@0: movu m1, [ref4q+%5] michael@0: paddd m5, m2 michael@0: paddd m6, m3 michael@0: %if %6 == 1 michael@0: lea srcq, [srcq +src_strideq*2] michael@0: lea ref1q, [ref1q+ref_strideq*2] michael@0: lea ref2q, [ref2q+ref_strideq*2] michael@0: lea ref3q, [ref3q+ref_strideq*2] michael@0: lea ref4q, [ref4q+ref_strideq*2] michael@0: %endif michael@0: psadbw m1, m0 michael@0: paddd m7, m1 michael@0: %endmacro michael@0: michael@0: ; PROCESS_32x2x4 first, off_{first,second}_{src,ref}, advance_at_end michael@0: %macro PROCESS_32x2x4 5-6 0 michael@0: PROCESS_16x2x4 %1, %2, %3, %2 + 16, %3 + 16 michael@0: PROCESS_16x2x4 0, %4, %5, %4 + 16, %5 + 16, %6 michael@0: %endmacro michael@0: michael@0: ; PROCESS_64x2x4 first, off_{first,second}_{src,ref}, advance_at_end michael@0: %macro PROCESS_64x2x4 5-6 0 michael@0: PROCESS_32x2x4 %1, %2, %3, %2 + 32, %3 + 32 michael@0: PROCESS_32x2x4 0, %4, %5, %4 + 32, %5 + 32, %6 michael@0: %endmacro michael@0: michael@0: ; void vp9_sadNxNx4d_sse2(uint8_t *src, int src_stride, michael@0: ; uint8_t *ref[4], int ref_stride, michael@0: ; unsigned int res[4]); michael@0: ; where NxN = 64x64, 32x32, 16x16, 16x8, 8x16 or 8x8 michael@0: %macro SADNXN4D 2 michael@0: %if UNIX64 michael@0: cglobal sad%1x%2x4d, 5, 8, 8, src, src_stride, ref1, ref_stride, \ michael@0: res, ref2, ref3, ref4 michael@0: %else michael@0: cglobal sad%1x%2x4d, 4, 7, 8, src, src_stride, ref1, ref_stride, \ michael@0: ref2, ref3, ref4 michael@0: %endif michael@0: movsxdifnidn src_strideq, src_strided michael@0: movsxdifnidn ref_strideq, ref_strided michael@0: mov ref2q, [ref1q+gprsize*1] michael@0: mov ref3q, [ref1q+gprsize*2] michael@0: mov ref4q, [ref1q+gprsize*3] michael@0: mov ref1q, [ref1q+gprsize*0] michael@0: michael@0: PROCESS_%1x2x4 1, 0, 0, src_strideq, ref_strideq, 1 michael@0: %rep (%2-4)/2 michael@0: PROCESS_%1x2x4 0, 0, 0, src_strideq, ref_strideq, 1 michael@0: %endrep michael@0: PROCESS_%1x2x4 0, 0, 0, src_strideq, ref_strideq, 0 michael@0: michael@0: %if mmsize == 16 michael@0: pslldq m5, 4 michael@0: pslldq m7, 4 michael@0: por m4, m5 michael@0: por m6, m7 michael@0: mova m5, m4 michael@0: mova m7, m6 michael@0: punpcklqdq m4, m6 michael@0: punpckhqdq m5, m7 michael@0: movifnidn r4, r4mp michael@0: paddd m4, m5 michael@0: movu [r4], m4 michael@0: RET michael@0: %else michael@0: movifnidn r4, r4mp michael@0: movq [r4+0], m6 michael@0: movq [r4+8], m7 michael@0: RET michael@0: %endif michael@0: %endmacro michael@0: michael@0: INIT_XMM sse2 michael@0: SADNXN4D 64, 64 michael@0: SADNXN4D 64, 32 michael@0: SADNXN4D 32, 64 michael@0: SADNXN4D 32, 32 michael@0: SADNXN4D 32, 16 michael@0: SADNXN4D 16, 32 michael@0: SADNXN4D 16, 16 michael@0: SADNXN4D 16, 8 michael@0: SADNXN4D 8, 16 michael@0: SADNXN4D 8, 8 michael@0: SADNXN4D 8, 4 michael@0: michael@0: INIT_MMX sse michael@0: SADNXN4D 4, 8 michael@0: SADNXN4D 4, 4