michael@0: /* michael@0: * Copyright 2013 The LibYuv Project Authors. All rights reserved. michael@0: * michael@0: * Use of this source code is governed by a BSD-style license michael@0: * that can be found in the LICENSE file in the root of the source michael@0: * tree. An additional intellectual property rights grant can be found michael@0: * in the file PATENTS. All contributing project authors may michael@0: * be found in the AUTHORS file in the root of the source tree. michael@0: */ michael@0: michael@0: #include "./psnr.h" // NOLINT michael@0: michael@0: #include michael@0: michael@0: #ifdef _OPENMP michael@0: #include michael@0: #endif michael@0: #ifdef _MSC_VER michael@0: #include // For __cpuid() michael@0: #endif michael@0: michael@0: #ifdef __cplusplus michael@0: extern "C" { michael@0: #endif michael@0: michael@0: typedef unsigned int uint32; // NOLINT michael@0: #ifdef _MSC_VER michael@0: typedef unsigned __int64 uint64; michael@0: #else // COMPILER_MSVC michael@0: #if defined(__LP64__) && !defined(__OpenBSD__) && !defined(__APPLE__) michael@0: typedef unsigned long uint64; // NOLINT michael@0: #else // defined(__LP64__) && !defined(__OpenBSD__) && !defined(__APPLE__) michael@0: typedef unsigned long long uint64; // NOLINT michael@0: #endif // __LP64__ michael@0: #endif // _MSC_VER michael@0: michael@0: // PSNR formula: psnr = 10 * log10 (Peak Signal^2 * size / sse) michael@0: double ComputePSNR(double sse, double size) { michael@0: const double kMINSSE = 255.0 * 255.0 * size / pow(10., kMaxPSNR / 10.); michael@0: if (sse <= kMINSSE) michael@0: sse = kMINSSE; // Produces max PSNR of 128 michael@0: return 10.0 * log10(65025.0 * size / sse); michael@0: } michael@0: michael@0: #if !defined(LIBYUV_DISABLE_NEON) && defined(__ARM_NEON__) michael@0: #define HAS_SUMSQUAREERROR_NEON michael@0: static uint32 SumSquareError_NEON(const uint8* src_a, michael@0: const uint8* src_b, int count) { michael@0: volatile uint32 sse; michael@0: asm volatile ( // NOLINT michael@0: "vmov.u8 q7, #0 \n" michael@0: "vmov.u8 q9, #0 \n" michael@0: "vmov.u8 q8, #0 \n" michael@0: "vmov.u8 q10, #0 \n" michael@0: michael@0: "1: \n" michael@0: "vld1.u8 {q0}, [%0]! \n" michael@0: "vld1.u8 {q1}, [%1]! \n" michael@0: "vsubl.u8 q2, d0, d2 \n" michael@0: "vsubl.u8 q3, d1, d3 \n" michael@0: "vmlal.s16 q7, d4, d4 \n" michael@0: "vmlal.s16 q8, d6, d6 \n" michael@0: "vmlal.s16 q8, d5, d5 \n" michael@0: "vmlal.s16 q10, d7, d7 \n" michael@0: "subs %2, %2, #16 \n" michael@0: "bhi 1b \n" michael@0: michael@0: "vadd.u32 q7, q7, q8 \n" michael@0: "vadd.u32 q9, q9, q10 \n" michael@0: "vadd.u32 q10, q7, q9 \n" michael@0: "vpaddl.u32 q1, q10 \n" michael@0: "vadd.u64 d0, d2, d3 \n" michael@0: "vmov.32 %3, d0[0] \n" michael@0: : "+r"(src_a), michael@0: "+r"(src_b), michael@0: "+r"(count), michael@0: "=r"(sse) michael@0: : michael@0: : "memory", "cc", "q0", "q1", "q2", "q3", "q7", "q8", "q9", "q10"); michael@0: return sse; michael@0: } michael@0: #elif !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && defined(_MSC_VER) michael@0: #define HAS_SUMSQUAREERROR_SSE2 michael@0: __declspec(naked) michael@0: static uint32 SumSquareError_SSE2(const uint8* /*src_a*/, michael@0: const uint8* /*src_b*/, int /*count*/) { michael@0: __asm { michael@0: mov eax, [esp + 4] // src_a michael@0: mov edx, [esp + 8] // src_b michael@0: mov ecx, [esp + 12] // count michael@0: pxor xmm0, xmm0 michael@0: pxor xmm5, xmm5 michael@0: sub edx, eax michael@0: michael@0: wloop: michael@0: movdqu xmm1, [eax] michael@0: movdqu xmm2, [eax + edx] michael@0: lea eax, [eax + 16] michael@0: movdqu xmm3, xmm1 michael@0: psubusb xmm1, xmm2 michael@0: psubusb xmm2, xmm3 michael@0: por xmm1, xmm2 michael@0: movdqu xmm2, xmm1 michael@0: punpcklbw xmm1, xmm5 michael@0: punpckhbw xmm2, xmm5 michael@0: pmaddwd xmm1, xmm1 michael@0: pmaddwd xmm2, xmm2 michael@0: paddd xmm0, xmm1 michael@0: paddd xmm0, xmm2 michael@0: sub ecx, 16 michael@0: ja wloop michael@0: michael@0: pshufd xmm1, xmm0, 0EEh michael@0: paddd xmm0, xmm1 michael@0: pshufd xmm1, xmm0, 01h michael@0: paddd xmm0, xmm1 michael@0: movd eax, xmm0 michael@0: ret michael@0: } michael@0: } michael@0: #elif !defined(LIBYUV_DISABLE_X86) && (defined(__x86_64__) || defined(__i386__)) michael@0: #define HAS_SUMSQUAREERROR_SSE2 michael@0: static uint32 SumSquareError_SSE2(const uint8* src_a, michael@0: const uint8* src_b, int count) { michael@0: uint32 sse; michael@0: asm volatile ( // NOLINT michael@0: "pxor %%xmm0,%%xmm0 \n" michael@0: "pxor %%xmm5,%%xmm5 \n" michael@0: "sub %0,%1 \n" michael@0: michael@0: "1: \n" michael@0: "movdqu (%0),%%xmm1 \n" michael@0: "movdqu (%0,%1,1),%%xmm2 \n" michael@0: "lea 0x10(%0),%0 \n" michael@0: "movdqu %%xmm1,%%xmm3 \n" michael@0: "psubusb %%xmm2,%%xmm1 \n" michael@0: "psubusb %%xmm3,%%xmm2 \n" michael@0: "por %%xmm2,%%xmm1 \n" michael@0: "movdqu %%xmm1,%%xmm2 \n" michael@0: "punpcklbw %%xmm5,%%xmm1 \n" michael@0: "punpckhbw %%xmm5,%%xmm2 \n" michael@0: "pmaddwd %%xmm1,%%xmm1 \n" michael@0: "pmaddwd %%xmm2,%%xmm2 \n" michael@0: "paddd %%xmm1,%%xmm0 \n" michael@0: "paddd %%xmm2,%%xmm0 \n" michael@0: "sub $0x10,%2 \n" michael@0: "ja 1b \n" michael@0: michael@0: "pshufd $0xee,%%xmm0,%%xmm1 \n" michael@0: "paddd %%xmm1,%%xmm0 \n" michael@0: "pshufd $0x1,%%xmm0,%%xmm1 \n" michael@0: "paddd %%xmm1,%%xmm0 \n" michael@0: "movd %%xmm0,%3 \n" michael@0: michael@0: : "+r"(src_a), // %0 michael@0: "+r"(src_b), // %1 michael@0: "+r"(count), // %2 michael@0: "=g"(sse) // %3 michael@0: : michael@0: : "memory", "cc" michael@0: #if defined(__SSE2__) michael@0: , "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" michael@0: #endif michael@0: ); // NOLINT michael@0: return sse; michael@0: } michael@0: #endif // LIBYUV_DISABLE_X86 etc michael@0: michael@0: #if defined(HAS_SUMSQUAREERROR_SSE2) michael@0: #if (defined(__pic__) || defined(__APPLE__)) && defined(__i386__) michael@0: static __inline void __cpuid(int cpu_info[4], int info_type) { michael@0: asm volatile ( // NOLINT michael@0: "mov %%ebx, %%edi \n" michael@0: "cpuid \n" michael@0: "xchg %%edi, %%ebx \n" michael@0: : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) michael@0: : "a"(info_type)); michael@0: } michael@0: #elif defined(__i386__) || defined(__x86_64__) michael@0: static __inline void __cpuid(int cpu_info[4], int info_type) { michael@0: asm volatile ( // NOLINT michael@0: "cpuid \n" michael@0: : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) michael@0: : "a"(info_type)); michael@0: } michael@0: #endif michael@0: michael@0: static int CpuHasSSE2() { michael@0: #if defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) michael@0: int cpu_info[4]; michael@0: __cpuid(cpu_info, 1); michael@0: if (cpu_info[3] & 0x04000000) { michael@0: return 1; michael@0: } michael@0: #endif michael@0: return 0; michael@0: } michael@0: #endif // HAS_SUMSQUAREERROR_SSE2 michael@0: michael@0: static uint32 SumSquareError_C(const uint8* src_a, michael@0: const uint8* src_b, int count) { michael@0: uint32 sse = 0u; michael@0: for (int x = 0; x < count; ++x) { michael@0: int diff = src_a[x] - src_b[x]; michael@0: sse += static_cast(diff * diff); michael@0: } michael@0: return sse; michael@0: } michael@0: michael@0: double ComputeSumSquareError(const uint8* src_a, michael@0: const uint8* src_b, int count) { michael@0: uint32 (*SumSquareError)(const uint8* src_a, michael@0: const uint8* src_b, int count) = SumSquareError_C; michael@0: #if defined(HAS_SUMSQUAREERROR_NEON) michael@0: SumSquareError = SumSquareError_NEON; michael@0: #endif michael@0: #if defined(HAS_SUMSQUAREERROR_SSE2) michael@0: if (CpuHasSSE2()) { michael@0: SumSquareError = SumSquareError_SSE2; michael@0: } michael@0: #endif michael@0: const int kBlockSize = 1 << 15; michael@0: uint64 sse = 0; michael@0: #ifdef _OPENMP michael@0: #pragma omp parallel for reduction(+: sse) michael@0: #endif michael@0: for (int i = 0; i < (count - (kBlockSize - 1)); i += kBlockSize) { michael@0: sse += SumSquareError(src_a + i, src_b + i, kBlockSize); michael@0: } michael@0: src_a += count & ~(kBlockSize - 1); michael@0: src_b += count & ~(kBlockSize - 1); michael@0: int remainder = count & (kBlockSize - 1) & ~15; michael@0: if (remainder) { michael@0: sse += SumSquareError(src_a, src_b, remainder); michael@0: src_a += remainder; michael@0: src_b += remainder; michael@0: } michael@0: remainder = count & 15; michael@0: if (remainder) { michael@0: sse += SumSquareError_C(src_a, src_b, remainder); michael@0: } michael@0: return static_cast(sse); michael@0: } michael@0: michael@0: #ifdef __cplusplus michael@0: } // extern "C" michael@0: #endif