media/libyuv/source/compare_win.cc

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/media/libyuv/source/compare_win.cc	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,232 @@
     1.4 +/*
     1.5 + *  Copyright 2012 The LibYuv Project Authors. All rights reserved.
     1.6 + *
     1.7 + *  Use of this source code is governed by a BSD-style license
     1.8 + *  that can be found in the LICENSE file in the root of the source
     1.9 + *  tree. An additional intellectual property rights grant can be found
    1.10 + *  in the file PATENTS. All contributing project authors may
    1.11 + *  be found in the AUTHORS file in the root of the source tree.
    1.12 + */
    1.13 +
    1.14 +#include "libyuv/basic_types.h"
    1.15 +#include "libyuv/row.h"
    1.16 +
    1.17 +#ifdef __cplusplus
    1.18 +namespace libyuv {
    1.19 +extern "C" {
    1.20 +#endif
    1.21 +
    1.22 +#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && defined(_MSC_VER)
    1.23 +
    1.24 +__declspec(naked) __declspec(align(16))
    1.25 +uint32 SumSquareError_SSE2(const uint8* src_a, const uint8* src_b, int count) {
    1.26 +  __asm {
    1.27 +    mov        eax, [esp + 4]    // src_a
    1.28 +    mov        edx, [esp + 8]    // src_b
    1.29 +    mov        ecx, [esp + 12]   // count
    1.30 +    pxor       xmm0, xmm0
    1.31 +    pxor       xmm5, xmm5
    1.32 +
    1.33 +    align      4
    1.34 +  wloop:
    1.35 +    movdqa     xmm1, [eax]
    1.36 +    lea        eax,  [eax + 16]
    1.37 +    movdqa     xmm2, [edx]
    1.38 +    lea        edx,  [edx + 16]
    1.39 +    sub        ecx, 16
    1.40 +    movdqa     xmm3, xmm1  // abs trick
    1.41 +    psubusb    xmm1, xmm2
    1.42 +    psubusb    xmm2, xmm3
    1.43 +    por        xmm1, xmm2
    1.44 +    movdqa     xmm2, xmm1
    1.45 +    punpcklbw  xmm1, xmm5
    1.46 +    punpckhbw  xmm2, xmm5
    1.47 +    pmaddwd    xmm1, xmm1
    1.48 +    pmaddwd    xmm2, xmm2
    1.49 +    paddd      xmm0, xmm1
    1.50 +    paddd      xmm0, xmm2
    1.51 +    jg         wloop
    1.52 +
    1.53 +    pshufd     xmm1, xmm0, 0xee
    1.54 +    paddd      xmm0, xmm1
    1.55 +    pshufd     xmm1, xmm0, 0x01
    1.56 +    paddd      xmm0, xmm1
    1.57 +    movd       eax, xmm0
    1.58 +    ret
    1.59 +  }
    1.60 +}
    1.61 +
    1.62 +// Visual C 2012 required for AVX2.
    1.63 +#if _MSC_VER >= 1700
    1.64 +// C4752: found Intel(R) Advanced Vector Extensions; consider using /arch:AVX.
    1.65 +#pragma warning(disable: 4752)
    1.66 +__declspec(naked) __declspec(align(16))
    1.67 +uint32 SumSquareError_AVX2(const uint8* src_a, const uint8* src_b, int count) {
    1.68 +  __asm {
    1.69 +    mov        eax, [esp + 4]    // src_a
    1.70 +    mov        edx, [esp + 8]    // src_b
    1.71 +    mov        ecx, [esp + 12]   // count
    1.72 +    vpxor      ymm0, ymm0, ymm0  // sum
    1.73 +    vpxor      ymm5, ymm5, ymm5  // constant 0 for unpck
    1.74 +    sub        edx, eax
    1.75 +
    1.76 +    align      4
    1.77 +  wloop:
    1.78 +    vmovdqu    ymm1, [eax]
    1.79 +    vmovdqu    ymm2, [eax + edx]
    1.80 +    lea        eax,  [eax + 32]
    1.81 +    sub        ecx, 32
    1.82 +    vpsubusb   ymm3, ymm1, ymm2  // abs difference trick
    1.83 +    vpsubusb   ymm2, ymm2, ymm1
    1.84 +    vpor       ymm1, ymm2, ymm3
    1.85 +    vpunpcklbw ymm2, ymm1, ymm5  // u16.  mutates order.
    1.86 +    vpunpckhbw ymm1, ymm1, ymm5
    1.87 +    vpmaddwd   ymm2, ymm2, ymm2  // square + hadd to u32.
    1.88 +    vpmaddwd   ymm1, ymm1, ymm1
    1.89 +    vpaddd     ymm0, ymm0, ymm1
    1.90 +    vpaddd     ymm0, ymm0, ymm2
    1.91 +    jg         wloop
    1.92 +
    1.93 +    vpshufd    ymm1, ymm0, 0xee  // 3, 2 + 1, 0 both lanes.
    1.94 +    vpaddd     ymm0, ymm0, ymm1
    1.95 +    vpshufd    ymm1, ymm0, 0x01  // 1 + 0 both lanes.
    1.96 +    vpaddd     ymm0, ymm0, ymm1
    1.97 +    vpermq     ymm1, ymm0, 0x02  // high + low lane.
    1.98 +    vpaddd     ymm0, ymm0, ymm1
    1.99 +    vmovd      eax, xmm0
   1.100 +    vzeroupper
   1.101 +    ret
   1.102 +  }
   1.103 +}
   1.104 +#endif  // _MSC_VER >= 1700
   1.105 +
   1.106 +#define HAS_HASHDJB2_SSE41
   1.107 +static uvec32 kHash16x33 = { 0x92d9e201, 0, 0, 0 };  // 33 ^ 16
   1.108 +static uvec32 kHashMul0 = {
   1.109 +  0x0c3525e1,  // 33 ^ 15
   1.110 +  0xa3476dc1,  // 33 ^ 14
   1.111 +  0x3b4039a1,  // 33 ^ 13
   1.112 +  0x4f5f0981,  // 33 ^ 12
   1.113 +};
   1.114 +static uvec32 kHashMul1 = {
   1.115 +  0x30f35d61,  // 33 ^ 11
   1.116 +  0x855cb541,  // 33 ^ 10
   1.117 +  0x040a9121,  // 33 ^ 9
   1.118 +  0x747c7101,  // 33 ^ 8
   1.119 +};
   1.120 +static uvec32 kHashMul2 = {
   1.121 +  0xec41d4e1,  // 33 ^ 7
   1.122 +  0x4cfa3cc1,  // 33 ^ 6
   1.123 +  0x025528a1,  // 33 ^ 5
   1.124 +  0x00121881,  // 33 ^ 4
   1.125 +};
   1.126 +static uvec32 kHashMul3 = {
   1.127 +  0x00008c61,  // 33 ^ 3
   1.128 +  0x00000441,  // 33 ^ 2
   1.129 +  0x00000021,  // 33 ^ 1
   1.130 +  0x00000001,  // 33 ^ 0
   1.131 +};
   1.132 +
   1.133 +// 27: 66 0F 38 40 C6     pmulld      xmm0,xmm6
   1.134 +// 44: 66 0F 38 40 DD     pmulld      xmm3,xmm5
   1.135 +// 59: 66 0F 38 40 E5     pmulld      xmm4,xmm5
   1.136 +// 72: 66 0F 38 40 D5     pmulld      xmm2,xmm5
   1.137 +// 83: 66 0F 38 40 CD     pmulld      xmm1,xmm5
   1.138 +#define pmulld(reg) _asm _emit 0x66 _asm _emit 0x0F _asm _emit 0x38 \
   1.139 +    _asm _emit 0x40 _asm _emit reg
   1.140 +
   1.141 +__declspec(naked) __declspec(align(16))
   1.142 +uint32 HashDjb2_SSE41(const uint8* src, int count, uint32 seed) {
   1.143 +  __asm {
   1.144 +    mov        eax, [esp + 4]    // src
   1.145 +    mov        ecx, [esp + 8]    // count
   1.146 +    movd       xmm0, [esp + 12]  // seed
   1.147 +
   1.148 +    pxor       xmm7, xmm7        // constant 0 for unpck
   1.149 +    movdqa     xmm6, kHash16x33
   1.150 +
   1.151 +    align      4
   1.152 +  wloop:
   1.153 +    movdqu     xmm1, [eax]       // src[0-15]
   1.154 +    lea        eax, [eax + 16]
   1.155 +    pmulld(0xc6)                 // pmulld      xmm0,xmm6  hash *= 33 ^ 16
   1.156 +    movdqa     xmm5, kHashMul0
   1.157 +    movdqa     xmm2, xmm1
   1.158 +    punpcklbw  xmm2, xmm7        // src[0-7]
   1.159 +    movdqa     xmm3, xmm2
   1.160 +    punpcklwd  xmm3, xmm7        // src[0-3]
   1.161 +    pmulld(0xdd)                 // pmulld     xmm3, xmm5
   1.162 +    movdqa     xmm5, kHashMul1
   1.163 +    movdqa     xmm4, xmm2
   1.164 +    punpckhwd  xmm4, xmm7        // src[4-7]
   1.165 +    pmulld(0xe5)                 // pmulld     xmm4, xmm5
   1.166 +    movdqa     xmm5, kHashMul2
   1.167 +    punpckhbw  xmm1, xmm7        // src[8-15]
   1.168 +    movdqa     xmm2, xmm1
   1.169 +    punpcklwd  xmm2, xmm7        // src[8-11]
   1.170 +    pmulld(0xd5)                 // pmulld     xmm2, xmm5
   1.171 +    movdqa     xmm5, kHashMul3
   1.172 +    punpckhwd  xmm1, xmm7        // src[12-15]
   1.173 +    pmulld(0xcd)                 // pmulld     xmm1, xmm5
   1.174 +    paddd      xmm3, xmm4        // add 16 results
   1.175 +    paddd      xmm1, xmm2
   1.176 +    sub        ecx, 16
   1.177 +    paddd      xmm1, xmm3
   1.178 +
   1.179 +    pshufd     xmm2, xmm1, 0x0e  // upper 2 dwords
   1.180 +    paddd      xmm1, xmm2
   1.181 +    pshufd     xmm2, xmm1, 0x01
   1.182 +    paddd      xmm1, xmm2
   1.183 +    paddd      xmm0, xmm1
   1.184 +    jg         wloop
   1.185 +
   1.186 +    movd       eax, xmm0         // return hash
   1.187 +    ret
   1.188 +  }
   1.189 +}
   1.190 +
   1.191 +// Visual C 2012 required for AVX2.
   1.192 +#if _MSC_VER >= 1700
   1.193 +__declspec(naked) __declspec(align(16))
   1.194 +uint32 HashDjb2_AVX2(const uint8* src, int count, uint32 seed) {
   1.195 +  __asm {
   1.196 +    mov        eax, [esp + 4]    // src
   1.197 +    mov        ecx, [esp + 8]    // count
   1.198 +    movd       xmm0, [esp + 12]  // seed
   1.199 +    movdqa     xmm6, kHash16x33
   1.200 +
   1.201 +    align      4
   1.202 +  wloop:
   1.203 +    vpmovzxbd  xmm3, dword ptr [eax]  // src[0-3]
   1.204 +    pmulld     xmm0, xmm6  // hash *= 33 ^ 16
   1.205 +    vpmovzxbd  xmm4, dword ptr [eax + 4]  // src[4-7]
   1.206 +    pmulld     xmm3, kHashMul0
   1.207 +    vpmovzxbd  xmm2, dword ptr [eax + 8]  // src[8-11]
   1.208 +    pmulld     xmm4, kHashMul1
   1.209 +    vpmovzxbd  xmm1, dword ptr [eax + 12]  // src[12-15]
   1.210 +    pmulld     xmm2, kHashMul2
   1.211 +    lea        eax, [eax + 16]
   1.212 +    pmulld     xmm1, kHashMul3
   1.213 +    paddd      xmm3, xmm4        // add 16 results
   1.214 +    paddd      xmm1, xmm2
   1.215 +    sub        ecx, 16
   1.216 +    paddd      xmm1, xmm3
   1.217 +    pshufd     xmm2, xmm1, 0x0e  // upper 2 dwords
   1.218 +    paddd      xmm1, xmm2
   1.219 +    pshufd     xmm2, xmm1, 0x01
   1.220 +    paddd      xmm1, xmm2
   1.221 +    paddd      xmm0, xmm1
   1.222 +    jg         wloop
   1.223 +
   1.224 +    movd       eax, xmm0         // return hash
   1.225 +    ret
   1.226 +  }
   1.227 +}
   1.228 +#endif  // _MSC_VER >= 1700
   1.229 +
   1.230 +#endif  // !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && defined(_MSC_VER)
   1.231 +
   1.232 +#ifdef __cplusplus
   1.233 +}  // extern "C"
   1.234 +}  // namespace libyuv
   1.235 +#endif

mercurial