michael@0: /* michael@0: * Copyright 2012 The LibYuv Project Authors. All rights reserved. michael@0: * michael@0: * Use of this source code is governed by a BSD-style license michael@0: * that can be found in the LICENSE file in the root of the source michael@0: * tree. An additional intellectual property rights grant can be found michael@0: * in the file PATENTS. All contributing project authors may michael@0: * be found in the AUTHORS file in the root of the source tree. michael@0: */ michael@0: michael@0: #include "libyuv/basic_types.h" michael@0: #include "libyuv/row.h" michael@0: michael@0: #ifdef __cplusplus michael@0: namespace libyuv { michael@0: extern "C" { michael@0: #endif michael@0: michael@0: #if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && defined(_MSC_VER) michael@0: michael@0: __declspec(naked) __declspec(align(16)) michael@0: uint32 SumSquareError_SSE2(const uint8* src_a, const uint8* src_b, int count) { michael@0: __asm { michael@0: mov eax, [esp + 4] // src_a michael@0: mov edx, [esp + 8] // src_b michael@0: mov ecx, [esp + 12] // count michael@0: pxor xmm0, xmm0 michael@0: pxor xmm5, xmm5 michael@0: michael@0: align 4 michael@0: wloop: michael@0: movdqa xmm1, [eax] michael@0: lea eax, [eax + 16] michael@0: movdqa xmm2, [edx] michael@0: lea edx, [edx + 16] michael@0: sub ecx, 16 michael@0: movdqa xmm3, xmm1 // abs trick michael@0: psubusb xmm1, xmm2 michael@0: psubusb xmm2, xmm3 michael@0: por xmm1, xmm2 michael@0: movdqa xmm2, xmm1 michael@0: punpcklbw xmm1, xmm5 michael@0: punpckhbw xmm2, xmm5 michael@0: pmaddwd xmm1, xmm1 michael@0: pmaddwd xmm2, xmm2 michael@0: paddd xmm0, xmm1 michael@0: paddd xmm0, xmm2 michael@0: jg wloop michael@0: michael@0: pshufd xmm1, xmm0, 0xee michael@0: paddd xmm0, xmm1 michael@0: pshufd xmm1, xmm0, 0x01 michael@0: paddd xmm0, xmm1 michael@0: movd eax, xmm0 michael@0: ret michael@0: } michael@0: } michael@0: michael@0: // Visual C 2012 required for AVX2. michael@0: #if _MSC_VER >= 1700 michael@0: // C4752: found Intel(R) Advanced Vector Extensions; consider using /arch:AVX. michael@0: #pragma warning(disable: 4752) michael@0: __declspec(naked) __declspec(align(16)) michael@0: uint32 SumSquareError_AVX2(const uint8* src_a, const uint8* src_b, int count) { michael@0: __asm { michael@0: mov eax, [esp + 4] // src_a michael@0: mov edx, [esp + 8] // src_b michael@0: mov ecx, [esp + 12] // count michael@0: vpxor ymm0, ymm0, ymm0 // sum michael@0: vpxor ymm5, ymm5, ymm5 // constant 0 for unpck michael@0: sub edx, eax michael@0: michael@0: align 4 michael@0: wloop: michael@0: vmovdqu ymm1, [eax] michael@0: vmovdqu ymm2, [eax + edx] michael@0: lea eax, [eax + 32] michael@0: sub ecx, 32 michael@0: vpsubusb ymm3, ymm1, ymm2 // abs difference trick michael@0: vpsubusb ymm2, ymm2, ymm1 michael@0: vpor ymm1, ymm2, ymm3 michael@0: vpunpcklbw ymm2, ymm1, ymm5 // u16. mutates order. michael@0: vpunpckhbw ymm1, ymm1, ymm5 michael@0: vpmaddwd ymm2, ymm2, ymm2 // square + hadd to u32. michael@0: vpmaddwd ymm1, ymm1, ymm1 michael@0: vpaddd ymm0, ymm0, ymm1 michael@0: vpaddd ymm0, ymm0, ymm2 michael@0: jg wloop michael@0: michael@0: vpshufd ymm1, ymm0, 0xee // 3, 2 + 1, 0 both lanes. michael@0: vpaddd ymm0, ymm0, ymm1 michael@0: vpshufd ymm1, ymm0, 0x01 // 1 + 0 both lanes. michael@0: vpaddd ymm0, ymm0, ymm1 michael@0: vpermq ymm1, ymm0, 0x02 // high + low lane. michael@0: vpaddd ymm0, ymm0, ymm1 michael@0: vmovd eax, xmm0 michael@0: vzeroupper michael@0: ret michael@0: } michael@0: } michael@0: #endif // _MSC_VER >= 1700 michael@0: michael@0: #define HAS_HASHDJB2_SSE41 michael@0: static uvec32 kHash16x33 = { 0x92d9e201, 0, 0, 0 }; // 33 ^ 16 michael@0: static uvec32 kHashMul0 = { michael@0: 0x0c3525e1, // 33 ^ 15 michael@0: 0xa3476dc1, // 33 ^ 14 michael@0: 0x3b4039a1, // 33 ^ 13 michael@0: 0x4f5f0981, // 33 ^ 12 michael@0: }; michael@0: static uvec32 kHashMul1 = { michael@0: 0x30f35d61, // 33 ^ 11 michael@0: 0x855cb541, // 33 ^ 10 michael@0: 0x040a9121, // 33 ^ 9 michael@0: 0x747c7101, // 33 ^ 8 michael@0: }; michael@0: static uvec32 kHashMul2 = { michael@0: 0xec41d4e1, // 33 ^ 7 michael@0: 0x4cfa3cc1, // 33 ^ 6 michael@0: 0x025528a1, // 33 ^ 5 michael@0: 0x00121881, // 33 ^ 4 michael@0: }; michael@0: static uvec32 kHashMul3 = { michael@0: 0x00008c61, // 33 ^ 3 michael@0: 0x00000441, // 33 ^ 2 michael@0: 0x00000021, // 33 ^ 1 michael@0: 0x00000001, // 33 ^ 0 michael@0: }; michael@0: michael@0: // 27: 66 0F 38 40 C6 pmulld xmm0,xmm6 michael@0: // 44: 66 0F 38 40 DD pmulld xmm3,xmm5 michael@0: // 59: 66 0F 38 40 E5 pmulld xmm4,xmm5 michael@0: // 72: 66 0F 38 40 D5 pmulld xmm2,xmm5 michael@0: // 83: 66 0F 38 40 CD pmulld xmm1,xmm5 michael@0: #define pmulld(reg) _asm _emit 0x66 _asm _emit 0x0F _asm _emit 0x38 \ michael@0: _asm _emit 0x40 _asm _emit reg michael@0: michael@0: __declspec(naked) __declspec(align(16)) michael@0: uint32 HashDjb2_SSE41(const uint8* src, int count, uint32 seed) { michael@0: __asm { michael@0: mov eax, [esp + 4] // src michael@0: mov ecx, [esp + 8] // count michael@0: movd xmm0, [esp + 12] // seed michael@0: michael@0: pxor xmm7, xmm7 // constant 0 for unpck michael@0: movdqa xmm6, kHash16x33 michael@0: michael@0: align 4 michael@0: wloop: michael@0: movdqu xmm1, [eax] // src[0-15] michael@0: lea eax, [eax + 16] michael@0: pmulld(0xc6) // pmulld xmm0,xmm6 hash *= 33 ^ 16 michael@0: movdqa xmm5, kHashMul0 michael@0: movdqa xmm2, xmm1 michael@0: punpcklbw xmm2, xmm7 // src[0-7] michael@0: movdqa xmm3, xmm2 michael@0: punpcklwd xmm3, xmm7 // src[0-3] michael@0: pmulld(0xdd) // pmulld xmm3, xmm5 michael@0: movdqa xmm5, kHashMul1 michael@0: movdqa xmm4, xmm2 michael@0: punpckhwd xmm4, xmm7 // src[4-7] michael@0: pmulld(0xe5) // pmulld xmm4, xmm5 michael@0: movdqa xmm5, kHashMul2 michael@0: punpckhbw xmm1, xmm7 // src[8-15] michael@0: movdqa xmm2, xmm1 michael@0: punpcklwd xmm2, xmm7 // src[8-11] michael@0: pmulld(0xd5) // pmulld xmm2, xmm5 michael@0: movdqa xmm5, kHashMul3 michael@0: punpckhwd xmm1, xmm7 // src[12-15] michael@0: pmulld(0xcd) // pmulld xmm1, xmm5 michael@0: paddd xmm3, xmm4 // add 16 results michael@0: paddd xmm1, xmm2 michael@0: sub ecx, 16 michael@0: paddd xmm1, xmm3 michael@0: michael@0: pshufd xmm2, xmm1, 0x0e // upper 2 dwords michael@0: paddd xmm1, xmm2 michael@0: pshufd xmm2, xmm1, 0x01 michael@0: paddd xmm1, xmm2 michael@0: paddd xmm0, xmm1 michael@0: jg wloop michael@0: michael@0: movd eax, xmm0 // return hash michael@0: ret michael@0: } michael@0: } michael@0: michael@0: // Visual C 2012 required for AVX2. michael@0: #if _MSC_VER >= 1700 michael@0: __declspec(naked) __declspec(align(16)) michael@0: uint32 HashDjb2_AVX2(const uint8* src, int count, uint32 seed) { michael@0: __asm { michael@0: mov eax, [esp + 4] // src michael@0: mov ecx, [esp + 8] // count michael@0: movd xmm0, [esp + 12] // seed michael@0: movdqa xmm6, kHash16x33 michael@0: michael@0: align 4 michael@0: wloop: michael@0: vpmovzxbd xmm3, dword ptr [eax] // src[0-3] michael@0: pmulld xmm0, xmm6 // hash *= 33 ^ 16 michael@0: vpmovzxbd xmm4, dword ptr [eax + 4] // src[4-7] michael@0: pmulld xmm3, kHashMul0 michael@0: vpmovzxbd xmm2, dword ptr [eax + 8] // src[8-11] michael@0: pmulld xmm4, kHashMul1 michael@0: vpmovzxbd xmm1, dword ptr [eax + 12] // src[12-15] michael@0: pmulld xmm2, kHashMul2 michael@0: lea eax, [eax + 16] michael@0: pmulld xmm1, kHashMul3 michael@0: paddd xmm3, xmm4 // add 16 results michael@0: paddd xmm1, xmm2 michael@0: sub ecx, 16 michael@0: paddd xmm1, xmm3 michael@0: pshufd xmm2, xmm1, 0x0e // upper 2 dwords michael@0: paddd xmm1, xmm2 michael@0: pshufd xmm2, xmm1, 0x01 michael@0: paddd xmm1, xmm2 michael@0: paddd xmm0, xmm1 michael@0: jg wloop michael@0: michael@0: movd eax, xmm0 // return hash michael@0: ret michael@0: } michael@0: } michael@0: #endif // _MSC_VER >= 1700 michael@0: michael@0: #endif // !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && defined(_MSC_VER) michael@0: michael@0: #ifdef __cplusplus michael@0: } // extern "C" michael@0: } // namespace libyuv michael@0: #endif