michael@0: /* michael@0: * Copyright 2011 The LibYuv Project Authors. All rights reserved. michael@0: * michael@0: * Use of this source code is governed by a BSD-style license michael@0: * that can be found in the LICENSE file in the root of the source michael@0: * tree. An additional intellectual property rights grant can be found michael@0: * in the file PATENTS. All contributing project authors may michael@0: * be found in the AUTHORS file in the root of the source tree. michael@0: */ michael@0: michael@0: #include "libyuv/compare.h" michael@0: michael@0: #include michael@0: #include michael@0: #ifdef _OPENMP michael@0: #include michael@0: #endif michael@0: michael@0: #include "libyuv/basic_types.h" michael@0: #include "libyuv/cpu_id.h" michael@0: #include "libyuv/row.h" michael@0: michael@0: #ifdef __cplusplus michael@0: namespace libyuv { michael@0: extern "C" { michael@0: #endif michael@0: michael@0: // hash seed of 5381 recommended. michael@0: // Internal C version of HashDjb2 with int sized count for efficiency. michael@0: uint32 HashDjb2_C(const uint8* src, int count, uint32 seed); michael@0: michael@0: // This module is for Visual C x86 michael@0: #if !defined(LIBYUV_DISABLE_X86) && \ michael@0: (defined(_M_IX86) || \ michael@0: (defined(__x86_64__) || (defined(__i386__) && !defined(__pic__)))) michael@0: #define HAS_HASHDJB2_SSE41 michael@0: uint32 HashDjb2_SSE41(const uint8* src, int count, uint32 seed); michael@0: michael@0: #if _MSC_VER >= 1700 michael@0: #define HAS_HASHDJB2_AVX2 michael@0: uint32 HashDjb2_AVX2(const uint8* src, int count, uint32 seed); michael@0: #endif michael@0: michael@0: #endif // HAS_HASHDJB2_SSE41 michael@0: michael@0: // hash seed of 5381 recommended. michael@0: LIBYUV_API michael@0: uint32 HashDjb2(const uint8* src, uint64 count, uint32 seed) { michael@0: const int kBlockSize = 1 << 15; // 32768; michael@0: int remainder; michael@0: uint32 (*HashDjb2_SSE)(const uint8* src, int count, uint32 seed) = HashDjb2_C; michael@0: #if defined(HAS_HASHDJB2_SSE41) michael@0: if (TestCpuFlag(kCpuHasSSE41)) { michael@0: HashDjb2_SSE = HashDjb2_SSE41; michael@0: } michael@0: #endif michael@0: #if defined(HAS_HASHDJB2_AVX2) michael@0: if (TestCpuFlag(kCpuHasAVX2)) { michael@0: HashDjb2_SSE = HashDjb2_AVX2; michael@0: } michael@0: #endif michael@0: michael@0: while (count >= (uint64)(kBlockSize)) { michael@0: seed = HashDjb2_SSE(src, kBlockSize, seed); michael@0: src += kBlockSize; michael@0: count -= kBlockSize; michael@0: } michael@0: remainder = (int)(count) & ~15; michael@0: if (remainder) { michael@0: seed = HashDjb2_SSE(src, remainder, seed); michael@0: src += remainder; michael@0: count -= remainder; michael@0: } michael@0: remainder = (int)(count) & 15; michael@0: if (remainder) { michael@0: seed = HashDjb2_C(src, remainder, seed); michael@0: } michael@0: return seed; michael@0: } michael@0: michael@0: uint32 SumSquareError_C(const uint8* src_a, const uint8* src_b, int count); michael@0: #if !defined(LIBYUV_DISABLE_NEON) && \ michael@0: (defined(__ARM_NEON__) || defined(LIBYUV_NEON)) michael@0: #define HAS_SUMSQUAREERROR_NEON michael@0: uint32 SumSquareError_NEON(const uint8* src_a, const uint8* src_b, int count); michael@0: #endif michael@0: #if !defined(LIBYUV_DISABLE_X86) && \ michael@0: (defined(_M_IX86) || defined(__x86_64__) || defined(__i386__)) michael@0: #define HAS_SUMSQUAREERROR_SSE2 michael@0: uint32 SumSquareError_SSE2(const uint8* src_a, const uint8* src_b, int count); michael@0: #endif michael@0: // Visual C 2012 required for AVX2. michael@0: #if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && _MSC_VER >= 1700 michael@0: #define HAS_SUMSQUAREERROR_AVX2 michael@0: uint32 SumSquareError_AVX2(const uint8* src_a, const uint8* src_b, int count); michael@0: #endif michael@0: michael@0: // TODO(fbarchard): Refactor into row function. michael@0: LIBYUV_API michael@0: uint64 ComputeSumSquareError(const uint8* src_a, const uint8* src_b, michael@0: int count) { michael@0: // SumSquareError returns values 0 to 65535 for each squared difference. michael@0: // Up to 65536 of those can be summed and remain within a uint32. michael@0: // After each block of 65536 pixels, accumulate into a uint64. michael@0: const int kBlockSize = 65536; michael@0: int remainder = count & (kBlockSize - 1) & ~31; michael@0: uint64 sse = 0; michael@0: int i; michael@0: uint32 (*SumSquareError)(const uint8* src_a, const uint8* src_b, int count) = michael@0: SumSquareError_C; michael@0: #if defined(HAS_SUMSQUAREERROR_NEON) michael@0: if (TestCpuFlag(kCpuHasNEON)) { michael@0: SumSquareError = SumSquareError_NEON; michael@0: } michael@0: #endif michael@0: #if defined(HAS_SUMSQUAREERROR_SSE2) michael@0: if (TestCpuFlag(kCpuHasSSE2) && michael@0: IS_ALIGNED(src_a, 16) && IS_ALIGNED(src_b, 16)) { michael@0: // Note only used for multiples of 16 so count is not checked. michael@0: SumSquareError = SumSquareError_SSE2; michael@0: } michael@0: #endif michael@0: #if defined(HAS_SUMSQUAREERROR_AVX2) michael@0: if (TestCpuFlag(kCpuHasAVX2)) { michael@0: // Note only used for multiples of 32 so count is not checked. michael@0: SumSquareError = SumSquareError_AVX2; michael@0: } michael@0: #endif michael@0: #ifdef _OPENMP michael@0: #pragma omp parallel for reduction(+: sse) michael@0: #endif michael@0: for (i = 0; i < (count - (kBlockSize - 1)); i += kBlockSize) { michael@0: sse += SumSquareError(src_a + i, src_b + i, kBlockSize); michael@0: } michael@0: src_a += count & ~(kBlockSize - 1); michael@0: src_b += count & ~(kBlockSize - 1); michael@0: if (remainder) { michael@0: sse += SumSquareError(src_a, src_b, remainder); michael@0: src_a += remainder; michael@0: src_b += remainder; michael@0: } michael@0: remainder = count & 31; michael@0: if (remainder) { michael@0: sse += SumSquareError_C(src_a, src_b, remainder); michael@0: } michael@0: return sse; michael@0: } michael@0: michael@0: LIBYUV_API michael@0: uint64 ComputeSumSquareErrorPlane(const uint8* src_a, int stride_a, michael@0: const uint8* src_b, int stride_b, michael@0: int width, int height) { michael@0: uint64 sse = 0; michael@0: int h; michael@0: // Coalesce rows. michael@0: if (stride_a == width && michael@0: stride_b == width) { michael@0: width *= height; michael@0: height = 1; michael@0: stride_a = stride_b = 0; michael@0: } michael@0: for (h = 0; h < height; ++h) { michael@0: sse += ComputeSumSquareError(src_a, src_b, width); michael@0: src_a += stride_a; michael@0: src_b += stride_b; michael@0: } michael@0: return sse; michael@0: } michael@0: michael@0: LIBYUV_API michael@0: double SumSquareErrorToPsnr(uint64 sse, uint64 count) { michael@0: double psnr; michael@0: if (sse > 0) { michael@0: double mse = (double)(count) / (double)(sse); michael@0: psnr = 10.0 * log10(255.0 * 255.0 * mse); michael@0: } else { michael@0: psnr = kMaxPsnr; // Limit to prevent divide by 0 michael@0: } michael@0: michael@0: if (psnr > kMaxPsnr) michael@0: psnr = kMaxPsnr; michael@0: michael@0: return psnr; michael@0: } michael@0: michael@0: LIBYUV_API michael@0: double CalcFramePsnr(const uint8* src_a, int stride_a, michael@0: const uint8* src_b, int stride_b, michael@0: int width, int height) { michael@0: const uint64 samples = width * height; michael@0: const uint64 sse = ComputeSumSquareErrorPlane(src_a, stride_a, michael@0: src_b, stride_b, michael@0: width, height); michael@0: return SumSquareErrorToPsnr(sse, samples); michael@0: } michael@0: michael@0: LIBYUV_API michael@0: double I420Psnr(const uint8* src_y_a, int stride_y_a, michael@0: const uint8* src_u_a, int stride_u_a, michael@0: const uint8* src_v_a, int stride_v_a, michael@0: const uint8* src_y_b, int stride_y_b, michael@0: const uint8* src_u_b, int stride_u_b, michael@0: const uint8* src_v_b, int stride_v_b, michael@0: int width, int height) { michael@0: const uint64 sse_y = ComputeSumSquareErrorPlane(src_y_a, stride_y_a, michael@0: src_y_b, stride_y_b, michael@0: width, height); michael@0: const int width_uv = (width + 1) >> 1; michael@0: const int height_uv = (height + 1) >> 1; michael@0: const uint64 sse_u = ComputeSumSquareErrorPlane(src_u_a, stride_u_a, michael@0: src_u_b, stride_u_b, michael@0: width_uv, height_uv); michael@0: const uint64 sse_v = ComputeSumSquareErrorPlane(src_v_a, stride_v_a, michael@0: src_v_b, stride_v_b, michael@0: width_uv, height_uv); michael@0: const uint64 samples = width * height + 2 * (width_uv * height_uv); michael@0: const uint64 sse = sse_y + sse_u + sse_v; michael@0: return SumSquareErrorToPsnr(sse, samples); michael@0: } michael@0: michael@0: static const int64 cc1 = 26634; // (64^2*(.01*255)^2 michael@0: static const int64 cc2 = 239708; // (64^2*(.03*255)^2 michael@0: michael@0: static double Ssim8x8_C(const uint8* src_a, int stride_a, michael@0: const uint8* src_b, int stride_b) { michael@0: int64 sum_a = 0; michael@0: int64 sum_b = 0; michael@0: int64 sum_sq_a = 0; michael@0: int64 sum_sq_b = 0; michael@0: int64 sum_axb = 0; michael@0: michael@0: int i; michael@0: for (i = 0; i < 8; ++i) { michael@0: int j; michael@0: for (j = 0; j < 8; ++j) { michael@0: sum_a += src_a[j]; michael@0: sum_b += src_b[j]; michael@0: sum_sq_a += src_a[j] * src_a[j]; michael@0: sum_sq_b += src_b[j] * src_b[j]; michael@0: sum_axb += src_a[j] * src_b[j]; michael@0: } michael@0: michael@0: src_a += stride_a; michael@0: src_b += stride_b; michael@0: } michael@0: michael@0: { michael@0: const int64 count = 64; michael@0: // scale the constants by number of pixels michael@0: const int64 c1 = (cc1 * count * count) >> 12; michael@0: const int64 c2 = (cc2 * count * count) >> 12; michael@0: michael@0: const int64 sum_a_x_sum_b = sum_a * sum_b; michael@0: michael@0: const int64 ssim_n = (2 * sum_a_x_sum_b + c1) * michael@0: (2 * count * sum_axb - 2 * sum_a_x_sum_b + c2); michael@0: michael@0: const int64 sum_a_sq = sum_a*sum_a; michael@0: const int64 sum_b_sq = sum_b*sum_b; michael@0: michael@0: const int64 ssim_d = (sum_a_sq + sum_b_sq + c1) * michael@0: (count * sum_sq_a - sum_a_sq + michael@0: count * sum_sq_b - sum_b_sq + c2); michael@0: michael@0: if (ssim_d == 0.0) { michael@0: return DBL_MAX; michael@0: } michael@0: return ssim_n * 1.0 / ssim_d; michael@0: } michael@0: } michael@0: michael@0: // We are using a 8x8 moving window with starting location of each 8x8 window michael@0: // on the 4x4 pixel grid. Such arrangement allows the windows to overlap michael@0: // block boundaries to penalize blocking artifacts. michael@0: LIBYUV_API michael@0: double CalcFrameSsim(const uint8* src_a, int stride_a, michael@0: const uint8* src_b, int stride_b, michael@0: int width, int height) { michael@0: int samples = 0; michael@0: double ssim_total = 0; michael@0: double (*Ssim8x8)(const uint8* src_a, int stride_a, michael@0: const uint8* src_b, int stride_b) = Ssim8x8_C; michael@0: michael@0: // sample point start with each 4x4 location michael@0: int i; michael@0: for (i = 0; i < height - 8; i += 4) { michael@0: int j; michael@0: for (j = 0; j < width - 8; j += 4) { michael@0: ssim_total += Ssim8x8(src_a + j, stride_a, src_b + j, stride_b); michael@0: samples++; michael@0: } michael@0: michael@0: src_a += stride_a * 4; michael@0: src_b += stride_b * 4; michael@0: } michael@0: michael@0: ssim_total /= samples; michael@0: return ssim_total; michael@0: } michael@0: michael@0: LIBYUV_API michael@0: double I420Ssim(const uint8* src_y_a, int stride_y_a, michael@0: const uint8* src_u_a, int stride_u_a, michael@0: const uint8* src_v_a, int stride_v_a, michael@0: const uint8* src_y_b, int stride_y_b, michael@0: const uint8* src_u_b, int stride_u_b, michael@0: const uint8* src_v_b, int stride_v_b, michael@0: int width, int height) { michael@0: const double ssim_y = CalcFrameSsim(src_y_a, stride_y_a, michael@0: src_y_b, stride_y_b, width, height); michael@0: const int width_uv = (width + 1) >> 1; michael@0: const int height_uv = (height + 1) >> 1; michael@0: const double ssim_u = CalcFrameSsim(src_u_a, stride_u_a, michael@0: src_u_b, stride_u_b, michael@0: width_uv, height_uv); michael@0: const double ssim_v = CalcFrameSsim(src_v_a, stride_v_a, michael@0: src_v_b, stride_v_b, michael@0: width_uv, height_uv); michael@0: return ssim_y * 0.8 + 0.1 * (ssim_u + ssim_v); michael@0: } michael@0: michael@0: #ifdef __cplusplus michael@0: } // extern "C" michael@0: } // namespace libyuv michael@0: #endif