michael@0: /* michael@0: * Copyright 2011 The LibYuv Project Authors. All rights reserved. michael@0: * michael@0: * Use of this source code is governed by a BSD-style license michael@0: * that can be found in the LICENSE file in the root of the source michael@0: * tree. An additional intellectual property rights grant can be found michael@0: * in the file PATENTS. All contributing project authors may michael@0: * be found in the AUTHORS file in the root of the source tree. michael@0: */ michael@0: michael@0: #include michael@0: #include michael@0: michael@0: #include "libyuv/compare.h" michael@0: #include "libyuv/convert.h" michael@0: #include "libyuv/convert_argb.h" michael@0: #include "libyuv/convert_from.h" michael@0: #include "libyuv/convert_from_argb.h" michael@0: #include "libyuv/cpu_id.h" michael@0: #include "libyuv/format_conversion.h" michael@0: #ifdef HAVE_JPEG michael@0: #include "libyuv/mjpeg_decoder.h" michael@0: #endif michael@0: #include "libyuv/planar_functions.h" michael@0: #include "libyuv/rotate.h" michael@0: #include "libyuv/row.h" michael@0: #include "../unit_test/unit_test.h" michael@0: michael@0: #if defined(_MSC_VER) michael@0: #define SIMD_ALIGNED(var) __declspec(align(16)) var michael@0: #else // __GNUC__ michael@0: #define SIMD_ALIGNED(var) var __attribute__((aligned(16))) michael@0: #endif michael@0: michael@0: namespace libyuv { michael@0: michael@0: #define SUBSAMPLE(v, a) ((((v) + (a) - 1)) / (a)) michael@0: michael@0: #define TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, W1280, N, NEG, OFF) \ michael@0: TEST_F(libyuvTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \ michael@0: const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ michael@0: const int kHeight = benchmark_height_; \ michael@0: align_buffer_64(src_y, kWidth * kHeight + OFF); \ michael@0: align_buffer_64(src_u, \ michael@0: SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + OFF); \ michael@0: align_buffer_64(src_v, \ michael@0: SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + OFF); \ michael@0: align_buffer_64(dst_y_c, kWidth * kHeight); \ michael@0: align_buffer_64(dst_u_c, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: align_buffer_64(dst_v_c, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: align_buffer_64(dst_y_opt, kWidth * kHeight); \ michael@0: align_buffer_64(dst_u_opt, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: align_buffer_64(dst_v_opt, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: srandom(time(NULL)); \ michael@0: for (int i = 0; i < kHeight; ++i) \ michael@0: for (int j = 0; j < kWidth; ++j) \ michael@0: src_y[(i * kWidth) + j + OFF] = (random() & 0xff); \ michael@0: for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \ michael@0: for (int j = 0; j < SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \ michael@0: src_u[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \ michael@0: (random() & 0xff); \ michael@0: src_v[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \ michael@0: (random() & 0xff); \ michael@0: } \ michael@0: } \ michael@0: memset(dst_y_c, 1, kWidth * kHeight); \ michael@0: memset(dst_u_c, 2, SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: memset(dst_v_c, 3, SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: memset(dst_y_opt, 101, kWidth * kHeight); \ michael@0: memset(dst_u_opt, 102, SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: memset(dst_v_opt, 103, SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: MaskCpuFlags(0); \ michael@0: SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \ michael@0: src_u + OFF, \ michael@0: SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ michael@0: src_v + OFF, \ michael@0: SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ michael@0: dst_y_c, kWidth, \ michael@0: dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X), \ michael@0: dst_v_c, SUBSAMPLE(kWidth, SUBSAMP_X), \ michael@0: kWidth, NEG kHeight); \ michael@0: MaskCpuFlags(-1); \ michael@0: for (int i = 0; i < benchmark_iterations_; ++i) { \ michael@0: SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \ michael@0: src_u + OFF, \ michael@0: SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ michael@0: src_v + OFF, \ michael@0: SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ michael@0: dst_y_opt, kWidth, \ michael@0: dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \ michael@0: dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \ michael@0: kWidth, NEG kHeight); \ michael@0: } \ michael@0: int max_diff = 0; \ michael@0: for (int i = 0; i < kHeight; ++i) { \ michael@0: for (int j = 0; j < kWidth; ++j) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_y_c[i * kWidth + j]) - \ michael@0: static_cast(dst_y_opt[i * kWidth + j])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, 0); \ michael@0: for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ michael@0: for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_u_c[i * \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \ michael@0: static_cast(dst_u_opt[i * \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, 3); \ michael@0: for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ michael@0: for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_v_c[i * \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \ michael@0: static_cast(dst_v_opt[i * \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, 3); \ michael@0: free_aligned_buffer_64(dst_y_c); \ michael@0: free_aligned_buffer_64(dst_u_c); \ michael@0: free_aligned_buffer_64(dst_v_c); \ michael@0: free_aligned_buffer_64(dst_y_opt); \ michael@0: free_aligned_buffer_64(dst_u_opt); \ michael@0: free_aligned_buffer_64(dst_v_opt); \ michael@0: free_aligned_buffer_64(src_y); \ michael@0: free_aligned_buffer_64(src_u); \ michael@0: free_aligned_buffer_64(src_v); \ michael@0: } michael@0: michael@0: #define TESTPLANARTOP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \ michael@0: TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_ - 4, _Any, +, 0) \ michael@0: TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_, _Unaligned, +, 1) \ michael@0: TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_, _Invert, -, 0) \ michael@0: TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_, _Opt, +, 0) michael@0: michael@0: TESTPLANARTOP(I420, 2, 2, I420, 2, 2) michael@0: TESTPLANARTOP(I422, 2, 1, I420, 2, 2) michael@0: TESTPLANARTOP(I444, 1, 1, I420, 2, 2) michael@0: TESTPLANARTOP(I411, 4, 1, I420, 2, 2) michael@0: TESTPLANARTOP(I420, 2, 2, I422, 2, 1) michael@0: TESTPLANARTOP(I420, 2, 2, I444, 1, 1) michael@0: TESTPLANARTOP(I420, 2, 2, I411, 4, 1) michael@0: TESTPLANARTOP(I420, 2, 2, I420Mirror, 2, 2) michael@0: TESTPLANARTOP(I422, 2, 1, I422, 2, 1) michael@0: TESTPLANARTOP(I444, 1, 1, I444, 1, 1) michael@0: michael@0: #define TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, W1280, N, NEG, OFF) \ michael@0: TEST_F(libyuvTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \ michael@0: const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ michael@0: const int kHeight = benchmark_height_; \ michael@0: align_buffer_64(src_y, kWidth * kHeight + OFF); \ michael@0: align_buffer_64(src_u, \ michael@0: SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + OFF); \ michael@0: align_buffer_64(src_v, \ michael@0: SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + OFF); \ michael@0: align_buffer_64(dst_y_c, kWidth * kHeight); \ michael@0: align_buffer_64(dst_uv_c, SUBSAMPLE(kWidth * 2, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: align_buffer_64(dst_y_opt, kWidth * kHeight); \ michael@0: align_buffer_64(dst_uv_opt, SUBSAMPLE(kWidth * 2, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: srandom(time(NULL)); \ michael@0: for (int i = 0; i < kHeight; ++i) \ michael@0: for (int j = 0; j < kWidth; ++j) \ michael@0: src_y[(i * kWidth) + j + OFF] = (random() & 0xff); \ michael@0: for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \ michael@0: for (int j = 0; j < SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \ michael@0: src_u[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \ michael@0: (random() & 0xff); \ michael@0: src_v[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \ michael@0: (random() & 0xff); \ michael@0: } \ michael@0: } \ michael@0: memset(dst_y_c, 1, kWidth * kHeight); \ michael@0: memset(dst_uv_c, 2, SUBSAMPLE(kWidth * 2, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: memset(dst_y_opt, 101, kWidth * kHeight); \ michael@0: memset(dst_uv_opt, 102, SUBSAMPLE(kWidth * 2, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: MaskCpuFlags(0); \ michael@0: SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \ michael@0: src_u + OFF, \ michael@0: SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ michael@0: src_v + OFF, \ michael@0: SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ michael@0: dst_y_c, kWidth, \ michael@0: dst_uv_c, SUBSAMPLE(kWidth * 2, SUBSAMP_X), \ michael@0: kWidth, NEG kHeight); \ michael@0: MaskCpuFlags(-1); \ michael@0: for (int i = 0; i < benchmark_iterations_; ++i) { \ michael@0: SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \ michael@0: src_u + OFF, \ michael@0: SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ michael@0: src_v + OFF, \ michael@0: SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ michael@0: dst_y_opt, kWidth, \ michael@0: dst_uv_opt, \ michael@0: SUBSAMPLE(kWidth * 2, SUBSAMP_X), \ michael@0: kWidth, NEG kHeight); \ michael@0: } \ michael@0: int max_diff = 0; \ michael@0: for (int i = 0; i < kHeight; ++i) { \ michael@0: for (int j = 0; j < kWidth; ++j) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_y_c[i * kWidth + j]) - \ michael@0: static_cast(dst_y_opt[i * kWidth + j])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, 1); \ michael@0: for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ michael@0: for (int j = 0; j < SUBSAMPLE(kWidth * 2, SUBSAMP_X); ++j) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_uv_c[i * \ michael@0: SUBSAMPLE(kWidth * 2, SUBSAMP_X) + j]) - \ michael@0: static_cast(dst_uv_opt[i * \ michael@0: SUBSAMPLE(kWidth * 2, SUBSAMP_X) + j])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, 1); \ michael@0: free_aligned_buffer_64(dst_y_c); \ michael@0: free_aligned_buffer_64(dst_uv_c); \ michael@0: free_aligned_buffer_64(dst_y_opt); \ michael@0: free_aligned_buffer_64(dst_uv_opt); \ michael@0: free_aligned_buffer_64(src_y); \ michael@0: free_aligned_buffer_64(src_u); \ michael@0: free_aligned_buffer_64(src_v); \ michael@0: } michael@0: michael@0: #define TESTPLANARTOBP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \ michael@0: TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_ - 4, _Any, +, 0) \ michael@0: TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_, _Unaligned, +, 1) \ michael@0: TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_, _Invert, -, 0) \ michael@0: TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_, _Opt, +, 0) michael@0: michael@0: TESTPLANARTOBP(I420, 2, 2, NV12, 2, 2) michael@0: TESTPLANARTOBP(I420, 2, 2, NV21, 2, 2) michael@0: michael@0: #define TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, W1280, N, NEG, OFF) \ michael@0: TEST_F(libyuvTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \ michael@0: const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ michael@0: const int kHeight = benchmark_height_; \ michael@0: align_buffer_64(src_y, kWidth * kHeight + OFF); \ michael@0: align_buffer_64(src_uv, 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + OFF); \ michael@0: align_buffer_64(dst_y_c, kWidth * kHeight); \ michael@0: align_buffer_64(dst_u_c, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: align_buffer_64(dst_v_c, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: align_buffer_64(dst_y_opt, kWidth * kHeight); \ michael@0: align_buffer_64(dst_u_opt, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: align_buffer_64(dst_v_opt, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: srandom(time(NULL)); \ michael@0: for (int i = 0; i < kHeight; ++i) \ michael@0: for (int j = 0; j < kWidth; ++j) \ michael@0: src_y[(i * kWidth) + j + OFF] = (random() & 0xff); \ michael@0: for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \ michael@0: for (int j = 0; j < 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \ michael@0: src_uv[(i * 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \ michael@0: (random() & 0xff); \ michael@0: } \ michael@0: } \ michael@0: memset(dst_y_c, 1, kWidth * kHeight); \ michael@0: memset(dst_u_c, 2, SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: memset(dst_v_c, 3, SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: memset(dst_y_opt, 101, kWidth * kHeight); \ michael@0: memset(dst_u_opt, 102, SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: memset(dst_v_opt, 103, SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: MaskCpuFlags(0); \ michael@0: SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \ michael@0: src_uv + OFF, \ michael@0: 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ michael@0: dst_y_c, kWidth, \ michael@0: dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X), \ michael@0: dst_v_c, SUBSAMPLE(kWidth, SUBSAMP_X), \ michael@0: kWidth, NEG kHeight); \ michael@0: MaskCpuFlags(-1); \ michael@0: for (int i = 0; i < benchmark_iterations_; ++i) { \ michael@0: SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \ michael@0: src_uv + OFF, \ michael@0: 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \ michael@0: dst_y_opt, kWidth, \ michael@0: dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \ michael@0: dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \ michael@0: kWidth, NEG kHeight); \ michael@0: } \ michael@0: int max_diff = 0; \ michael@0: for (int i = 0; i < kHeight; ++i) { \ michael@0: for (int j = 0; j < kWidth; ++j) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_y_c[i * kWidth + j]) - \ michael@0: static_cast(dst_y_opt[i * kWidth + j])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, 1); \ michael@0: for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ michael@0: for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_u_c[i * \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \ michael@0: static_cast(dst_u_opt[i * \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, 1); \ michael@0: for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ michael@0: for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_v_c[i * \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \ michael@0: static_cast(dst_v_opt[i * \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, 1); \ michael@0: free_aligned_buffer_64(dst_y_c); \ michael@0: free_aligned_buffer_64(dst_u_c); \ michael@0: free_aligned_buffer_64(dst_v_c); \ michael@0: free_aligned_buffer_64(dst_y_opt); \ michael@0: free_aligned_buffer_64(dst_u_opt); \ michael@0: free_aligned_buffer_64(dst_v_opt); \ michael@0: free_aligned_buffer_64(src_y); \ michael@0: free_aligned_buffer_64(src_uv); \ michael@0: } michael@0: michael@0: #define TESTBIPLANARTOP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \ michael@0: TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_ - 4, _Any, +, 0) \ michael@0: TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_, _Unaligned, +, 1) \ michael@0: TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_, _Invert, -, 0) \ michael@0: TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \ michael@0: FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_, _Opt, +, 0) michael@0: michael@0: TESTBIPLANARTOP(NV12, 2, 2, I420, 2, 2) michael@0: TESTBIPLANARTOP(NV21, 2, 2, I420, 2, 2) michael@0: michael@0: #define ALIGNINT(V, ALIGN) (((V) + (ALIGN) - 1) / (ALIGN) * (ALIGN)) michael@0: michael@0: #define TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ michael@0: YALIGN, W1280, DIFF, N, NEG, OFF, FMT_C, BPP_C) \ michael@0: TEST_F(libyuvTest, FMT_PLANAR##To##FMT_B##N) { \ michael@0: const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ michael@0: const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \ michael@0: const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \ michael@0: const int kSizeUV = \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y); \ michael@0: align_buffer_64(src_y, kWidth * kHeight + OFF); \ michael@0: align_buffer_64(src_u, kSizeUV + OFF); \ michael@0: align_buffer_64(src_v, kSizeUV + OFF); \ michael@0: align_buffer_64(dst_argb_c, kStrideB * kHeight); \ michael@0: align_buffer_64(dst_argb_opt, kStrideB * kHeight); \ michael@0: srandom(time(NULL)); \ michael@0: for (int i = 0; i < kWidth * kHeight; ++i) { \ michael@0: src_y[i + OFF] = (random() & 0xff); \ michael@0: } \ michael@0: for (int i = 0; i < kSizeUV; ++i) { \ michael@0: src_u[i + OFF] = (random() & 0xff); \ michael@0: src_v[i + OFF] = (random() & 0xff); \ michael@0: } \ michael@0: memset(dst_argb_c, 1, kStrideB * kHeight); \ michael@0: memset(dst_argb_opt, 101, kStrideB * kHeight); \ michael@0: MaskCpuFlags(0); \ michael@0: FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, \ michael@0: src_u + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \ michael@0: src_v + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \ michael@0: dst_argb_c, kStrideB, \ michael@0: kWidth, NEG kHeight); \ michael@0: MaskCpuFlags(-1); \ michael@0: for (int i = 0; i < benchmark_iterations_; ++i) { \ michael@0: FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, \ michael@0: src_u + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \ michael@0: src_v + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \ michael@0: dst_argb_opt, kStrideB, \ michael@0: kWidth, NEG kHeight); \ michael@0: } \ michael@0: int max_diff = 0; \ michael@0: /* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \ michael@0: align_buffer_64(dst_argb32_c, kWidth * BPP_C * kHeight); \ michael@0: align_buffer_64(dst_argb32_opt, kWidth * BPP_C * kHeight); \ michael@0: memset(dst_argb32_c, 2, kWidth * BPP_C * kHeight); \ michael@0: memset(dst_argb32_opt, 102, kWidth * BPP_C * kHeight); \ michael@0: FMT_B##To##FMT_C(dst_argb_c, kStrideB, \ michael@0: dst_argb32_c, kWidth * BPP_C , \ michael@0: kWidth, kHeight); \ michael@0: FMT_B##To##FMT_C(dst_argb_opt, kStrideB, \ michael@0: dst_argb32_opt, kWidth * BPP_C , \ michael@0: kWidth, kHeight); \ michael@0: for (int i = 0; i < kWidth * BPP_C * kHeight; ++i) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_argb32_c[i]) - \ michael@0: static_cast(dst_argb32_opt[i])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, DIFF); \ michael@0: free_aligned_buffer_64(src_y); \ michael@0: free_aligned_buffer_64(src_u); \ michael@0: free_aligned_buffer_64(src_v); \ michael@0: free_aligned_buffer_64(dst_argb_c); \ michael@0: free_aligned_buffer_64(dst_argb_opt); \ michael@0: free_aligned_buffer_64(dst_argb32_c); \ michael@0: free_aligned_buffer_64(dst_argb32_opt); \ michael@0: } michael@0: michael@0: #define TESTPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ michael@0: YALIGN, DIFF, FMT_C, BPP_C) \ michael@0: TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ michael@0: YALIGN, benchmark_width_ - 4, DIFF, _Any, +, 0, FMT_C, BPP_C) \ michael@0: TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ michael@0: YALIGN, benchmark_width_, DIFF, _Unaligned, +, 1, FMT_C, BPP_C) \ michael@0: TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ michael@0: YALIGN, benchmark_width_, DIFF, _Invert, -, 0, FMT_C, BPP_C) \ michael@0: TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \ michael@0: YALIGN, benchmark_width_, DIFF, _Opt, +, 0, FMT_C, BPP_C) michael@0: michael@0: // TODO(fbarchard): Make vertical alignment unnecessary on bayer. michael@0: TESTPLANARTOB(I420, 2, 2, ARGB, 4, 4, 1, 2, ARGB, 4) michael@0: TESTPLANARTOB(I420, 2, 2, BGRA, 4, 4, 1, 2, ARGB, 4) michael@0: TESTPLANARTOB(I420, 2, 2, ABGR, 4, 4, 1, 2, ARGB, 4) michael@0: TESTPLANARTOB(I420, 2, 2, RGBA, 4, 4, 1, 2, ARGB, 4) michael@0: TESTPLANARTOB(I420, 2, 2, RAW, 3, 3, 1, 2, ARGB, 4) michael@0: TESTPLANARTOB(I420, 2, 2, RGB24, 3, 3, 1, 2, ARGB, 4) michael@0: TESTPLANARTOB(I420, 2, 2, RGB565, 2, 2, 1, 9, ARGB, 4) michael@0: TESTPLANARTOB(I420, 2, 2, ARGB1555, 2, 2, 1, 9, ARGB, 4) michael@0: TESTPLANARTOB(I420, 2, 2, ARGB4444, 2, 2, 1, 17, ARGB, 4) michael@0: TESTPLANARTOB(I422, 2, 1, ARGB, 4, 4, 1, 2, ARGB, 4) michael@0: TESTPLANARTOB(I422, 2, 1, BGRA, 4, 4, 1, 2, ARGB, 4) michael@0: TESTPLANARTOB(I422, 2, 1, ABGR, 4, 4, 1, 2, ARGB, 4) michael@0: TESTPLANARTOB(I422, 2, 1, RGBA, 4, 4, 1, 2, ARGB, 4) michael@0: TESTPLANARTOB(I411, 4, 1, ARGB, 4, 4, 1, 2, ARGB, 4) michael@0: TESTPLANARTOB(I444, 1, 1, ARGB, 4, 4, 1, 2, ARGB, 4) michael@0: TESTPLANARTOB(I420, 2, 2, YUY2, 2, 4, 1, 1, ARGB, 4) michael@0: TESTPLANARTOB(I420, 2, 2, UYVY, 2, 4, 1, 1, ARGB, 4) michael@0: TESTPLANARTOB(I422, 2, 1, YUY2, 2, 4, 1, 0, ARGB, 4) michael@0: TESTPLANARTOB(I422, 2, 1, UYVY, 2, 4, 1, 0, ARGB, 4) michael@0: TESTPLANARTOB(I420, 2, 2, I400, 1, 1, 1, 0, ARGB, 4) michael@0: TESTPLANARTOB(I420, 2, 2, BayerBGGR, 1, 2, 2, 2, ARGB, 4) michael@0: TESTPLANARTOB(I420, 2, 2, BayerRGGB, 1, 2, 2, 2, ARGB, 4) michael@0: TESTPLANARTOB(I420, 2, 2, BayerGBRG, 1, 2, 2, 2, ARGB, 4) michael@0: TESTPLANARTOB(I420, 2, 2, BayerGRBG, 1, 2, 2, 2, ARGB, 4) michael@0: michael@0: #define TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ michael@0: W1280, DIFF, N, NEG, OFF) \ michael@0: TEST_F(libyuvTest, FMT_PLANAR##To##FMT_B##N) { \ michael@0: const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ michael@0: const int kHeight = benchmark_height_; \ michael@0: const int kStrideB = kWidth * BPP_B; \ michael@0: align_buffer_64(src_y, kWidth * kHeight + OFF); \ michael@0: align_buffer_64(src_uv, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y) * 2 + OFF); \ michael@0: align_buffer_64(dst_argb_c, kStrideB * kHeight); \ michael@0: align_buffer_64(dst_argb_opt, kStrideB * kHeight); \ michael@0: srandom(time(NULL)); \ michael@0: for (int i = 0; i < kHeight; ++i) \ michael@0: for (int j = 0; j < kWidth; ++j) \ michael@0: src_y[(i * kWidth) + j + OFF] = (random() & 0xff); \ michael@0: for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ michael@0: for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X) * 2; ++j) { \ michael@0: src_uv[(i * SUBSAMPLE(kWidth, SUBSAMP_X)) * 2 + j + OFF] = \ michael@0: (random() & 0xff); \ michael@0: } \ michael@0: } \ michael@0: memset(dst_argb_c, 1, kStrideB * kHeight); \ michael@0: memset(dst_argb_opt, 101, kStrideB * kHeight); \ michael@0: MaskCpuFlags(0); \ michael@0: FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, \ michael@0: src_uv + OFF, SUBSAMPLE(kWidth, SUBSAMP_X) * 2, \ michael@0: dst_argb_c, kWidth * BPP_B, \ michael@0: kWidth, NEG kHeight); \ michael@0: MaskCpuFlags(-1); \ michael@0: for (int i = 0; i < benchmark_iterations_; ++i) { \ michael@0: FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, \ michael@0: src_uv + OFF, SUBSAMPLE(kWidth, SUBSAMP_X) * 2, \ michael@0: dst_argb_opt, kWidth * BPP_B, \ michael@0: kWidth, NEG kHeight); \ michael@0: } \ michael@0: /* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \ michael@0: align_buffer_64(dst_argb32_c, kWidth * 4 * kHeight); \ michael@0: align_buffer_64(dst_argb32_opt, kWidth * 4 * kHeight); \ michael@0: memset(dst_argb32_c, 2, kWidth * 4 * kHeight); \ michael@0: memset(dst_argb32_opt, 102, kWidth * 4 * kHeight); \ michael@0: FMT_B##ToARGB(dst_argb_c, kStrideB, \ michael@0: dst_argb32_c, kWidth * 4, \ michael@0: kWidth, kHeight); \ michael@0: FMT_B##ToARGB(dst_argb_opt, kStrideB, \ michael@0: dst_argb32_opt, kWidth * 4, \ michael@0: kWidth, kHeight); \ michael@0: int max_diff = 0; \ michael@0: for (int i = 0; i < kHeight; ++i) { \ michael@0: for (int j = 0; j < kWidth * 4; ++j) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_argb32_c[i * kWidth * 4 + j]) - \ michael@0: static_cast(dst_argb32_opt[i * kWidth * 4 + j])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, DIFF); \ michael@0: free_aligned_buffer_64(src_y); \ michael@0: free_aligned_buffer_64(src_uv); \ michael@0: free_aligned_buffer_64(dst_argb_c); \ michael@0: free_aligned_buffer_64(dst_argb_opt); \ michael@0: free_aligned_buffer_64(dst_argb32_c); \ michael@0: free_aligned_buffer_64(dst_argb32_opt); \ michael@0: } michael@0: michael@0: #define TESTBIPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, DIFF) \ michael@0: TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ michael@0: benchmark_width_ - 4, DIFF, _Any, +, 0) \ michael@0: TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ michael@0: benchmark_width_, DIFF, _Unaligned, +, 1) \ michael@0: TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ michael@0: benchmark_width_, DIFF, _Invert, -, 0) \ michael@0: TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \ michael@0: benchmark_width_, DIFF, _Opt, +, 0) michael@0: michael@0: TESTBIPLANARTOB(NV12, 2, 2, ARGB, 4, 2) michael@0: TESTBIPLANARTOB(NV21, 2, 2, ARGB, 4, 2) michael@0: TESTBIPLANARTOB(NV12, 2, 2, RGB565, 2, 9) michael@0: TESTBIPLANARTOB(NV21, 2, 2, RGB565, 2, 9) michael@0: michael@0: #define TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: W1280, DIFF, N, NEG, OFF) \ michael@0: TEST_F(libyuvTest, FMT_A##To##FMT_PLANAR##N) { \ michael@0: const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ michael@0: const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \ michael@0: const int kStride = \ michael@0: (SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMP_X * 8 * BPP_A + 7) / 8; \ michael@0: align_buffer_64(src_argb, kStride * kHeight + OFF); \ michael@0: align_buffer_64(dst_y_c, kWidth * kHeight); \ michael@0: align_buffer_64(dst_u_c, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: align_buffer_64(dst_v_c, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: align_buffer_64(dst_y_opt, kWidth * kHeight); \ michael@0: align_buffer_64(dst_u_opt, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: align_buffer_64(dst_v_opt, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: memset(dst_y_c, 1, kWidth * kHeight); \ michael@0: memset(dst_u_c, 2, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: memset(dst_v_c, 3, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: memset(dst_y_opt, 101, kWidth * kHeight); \ michael@0: memset(dst_u_opt, 102, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: memset(dst_v_opt, 103, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: srandom(time(NULL)); \ michael@0: for (int i = 0; i < kHeight; ++i) \ michael@0: for (int j = 0; j < kStride; ++j) \ michael@0: src_argb[(i * kStride) + j + OFF] = (random() & 0xff); \ michael@0: MaskCpuFlags(0); \ michael@0: FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, \ michael@0: dst_y_c, kWidth, \ michael@0: dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X), \ michael@0: dst_v_c, SUBSAMPLE(kWidth, SUBSAMP_X), \ michael@0: kWidth, NEG kHeight); \ michael@0: MaskCpuFlags(-1); \ michael@0: for (int i = 0; i < benchmark_iterations_; ++i) { \ michael@0: FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, \ michael@0: dst_y_opt, kWidth, \ michael@0: dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \ michael@0: dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \ michael@0: kWidth, NEG kHeight); \ michael@0: } \ michael@0: int max_diff = 0; \ michael@0: for (int i = 0; i < kHeight; ++i) { \ michael@0: for (int j = 0; j < kWidth; ++j) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_y_c[i * kWidth + j]) - \ michael@0: static_cast(dst_y_opt[i * kWidth + j])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, DIFF); \ michael@0: for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ michael@0: for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_u_c[i * \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \ michael@0: static_cast(dst_u_opt[i * \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, DIFF); \ michael@0: for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ michael@0: for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_v_c[i * \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \ michael@0: static_cast(dst_v_opt[i * \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, DIFF); \ michael@0: free_aligned_buffer_64(dst_y_c); \ michael@0: free_aligned_buffer_64(dst_u_c); \ michael@0: free_aligned_buffer_64(dst_v_c); \ michael@0: free_aligned_buffer_64(dst_y_opt); \ michael@0: free_aligned_buffer_64(dst_u_opt); \ michael@0: free_aligned_buffer_64(dst_v_opt); \ michael@0: free_aligned_buffer_64(src_argb); \ michael@0: } michael@0: michael@0: #define TESTATOPLANAR(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: DIFF) \ michael@0: TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_ - 4, DIFF, _Any, +, 0) \ michael@0: TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_, DIFF, _Unaligned, +, 1) \ michael@0: TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_, DIFF, _Invert, -, 0) \ michael@0: TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_, DIFF, _Opt, +, 0) michael@0: michael@0: TESTATOPLANAR(ARGB, 4, 1, I420, 2, 2, 4) michael@0: #ifdef __arm__ michael@0: TESTATOPLANAR(ARGB, 4, 1, J420, 2, 2, 4) michael@0: #else michael@0: TESTATOPLANAR(ARGB, 4, 1, J420, 2, 2, 0) michael@0: #endif michael@0: TESTATOPLANAR(BGRA, 4, 1, I420, 2, 2, 4) michael@0: TESTATOPLANAR(ABGR, 4, 1, I420, 2, 2, 4) michael@0: TESTATOPLANAR(RGBA, 4, 1, I420, 2, 2, 4) michael@0: TESTATOPLANAR(RAW, 3, 1, I420, 2, 2, 4) michael@0: TESTATOPLANAR(RGB24, 3, 1, I420, 2, 2, 4) michael@0: TESTATOPLANAR(RGB565, 2, 1, I420, 2, 2, 5) michael@0: // TODO(fbarchard): Make 1555 neon work same as C code, reduce to diff 9. michael@0: TESTATOPLANAR(ARGB1555, 2, 1, I420, 2, 2, 15) michael@0: TESTATOPLANAR(ARGB4444, 2, 1, I420, 2, 2, 17) michael@0: TESTATOPLANAR(ARGB, 4, 1, I411, 4, 1, 4) michael@0: TESTATOPLANAR(ARGB, 4, 1, I422, 2, 1, 2) michael@0: TESTATOPLANAR(ARGB, 4, 1, I444, 1, 1, 2) michael@0: TESTATOPLANAR(YUY2, 2, 1, I420, 2, 2, 2) michael@0: TESTATOPLANAR(UYVY, 2, 1, I420, 2, 2, 2) michael@0: TESTATOPLANAR(YUY2, 2, 1, I422, 2, 1, 2) michael@0: TESTATOPLANAR(UYVY, 2, 1, I422, 2, 1, 2) michael@0: TESTATOPLANAR(I400, 1, 1, I420, 2, 2, 2) michael@0: TESTATOPLANAR(BayerBGGR, 1, 2, I420, 2, 2, 4) michael@0: TESTATOPLANAR(BayerRGGB, 1, 2, I420, 2, 2, 4) michael@0: TESTATOPLANAR(BayerGBRG, 1, 2, I420, 2, 2, 4) michael@0: TESTATOPLANAR(BayerGRBG, 1, 2, I420, 2, 2, 4) michael@0: michael@0: #define TESTATOBIPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: W1280, N, NEG, OFF) \ michael@0: TEST_F(libyuvTest, FMT_A##To##FMT_PLANAR##N) { \ michael@0: const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ michael@0: const int kHeight = benchmark_height_; \ michael@0: const int kStride = (kWidth * 8 * BPP_A + 7) / 8; \ michael@0: align_buffer_64(src_argb, kStride * kHeight + OFF); \ michael@0: align_buffer_64(dst_y_c, kWidth * kHeight); \ michael@0: align_buffer_64(dst_uv_c, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * 2 * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: align_buffer_64(dst_y_opt, kWidth * kHeight); \ michael@0: align_buffer_64(dst_uv_opt, \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * 2 * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: srandom(time(NULL)); \ michael@0: for (int i = 0; i < kHeight; ++i) \ michael@0: for (int j = 0; j < kStride; ++j) \ michael@0: src_argb[(i * kStride) + j + OFF] = (random() & 0xff); \ michael@0: memset(dst_y_c, 1, kWidth * kHeight); \ michael@0: memset(dst_uv_c, 2, SUBSAMPLE(kWidth, SUBSAMP_X) * 2 * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: memset(dst_y_opt, 101, kWidth * kHeight); \ michael@0: memset(dst_uv_opt, 102, SUBSAMPLE(kWidth, SUBSAMP_X) * 2 * \ michael@0: SUBSAMPLE(kHeight, SUBSAMP_Y)); \ michael@0: MaskCpuFlags(0); \ michael@0: FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, \ michael@0: dst_y_c, kWidth, \ michael@0: dst_uv_c, SUBSAMPLE(kWidth, SUBSAMP_X) * 2, \ michael@0: kWidth, NEG kHeight); \ michael@0: MaskCpuFlags(-1); \ michael@0: for (int i = 0; i < benchmark_iterations_; ++i) { \ michael@0: FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, \ michael@0: dst_y_opt, kWidth, \ michael@0: dst_uv_opt, SUBSAMPLE(kWidth, SUBSAMP_X) * 2, \ michael@0: kWidth, NEG kHeight); \ michael@0: } \ michael@0: int max_diff = 0; \ michael@0: for (int i = 0; i < kHeight; ++i) { \ michael@0: for (int j = 0; j < kWidth; ++j) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_y_c[i * kWidth + j]) - \ michael@0: static_cast(dst_y_opt[i * kWidth + j])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, 4); \ michael@0: for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \ michael@0: for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X) * 2; ++j) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_uv_c[i * \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * 2 + j]) - \ michael@0: static_cast(dst_uv_opt[i * \ michael@0: SUBSAMPLE(kWidth, SUBSAMP_X) * 2 + j])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, 4); \ michael@0: free_aligned_buffer_64(dst_y_c); \ michael@0: free_aligned_buffer_64(dst_uv_c); \ michael@0: free_aligned_buffer_64(dst_y_opt); \ michael@0: free_aligned_buffer_64(dst_uv_opt); \ michael@0: free_aligned_buffer_64(src_argb); \ michael@0: } michael@0: michael@0: #define TESTATOBIPLANAR(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \ michael@0: TESTATOBIPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_ - 4, _Any, +, 0) \ michael@0: TESTATOBIPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_, _Unaligned, +, 1) \ michael@0: TESTATOBIPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_, _Invert, -, 0) \ michael@0: TESTATOBIPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \ michael@0: benchmark_width_, _Opt, +, 0) michael@0: michael@0: TESTATOBIPLANAR(ARGB, 4, NV12, 2, 2) michael@0: TESTATOBIPLANAR(ARGB, 4, NV21, 2, 2) michael@0: michael@0: #define TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \ michael@0: FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \ michael@0: W1280, DIFF, N, NEG, OFF) \ michael@0: TEST_F(libyuvTest, FMT_A##To##FMT_B##N) { \ michael@0: const int kWidth = ((W1280) > 0) ? (W1280) : 1; \ michael@0: const int kHeight = benchmark_height_; \ michael@0: const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \ michael@0: const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \ michael@0: const int kStrideA = (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \ michael@0: const int kStrideB = (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \ michael@0: align_buffer_64(src_argb, kStrideA * kHeightA + OFF); \ michael@0: align_buffer_64(dst_argb_c, kStrideB * kHeightB); \ michael@0: align_buffer_64(dst_argb_opt, kStrideB * kHeightB); \ michael@0: srandom(time(NULL)); \ michael@0: for (int i = 0; i < kStrideA * kHeightA; ++i) { \ michael@0: src_argb[i + OFF] = (random() & 0xff); \ michael@0: } \ michael@0: memset(dst_argb_c, 1, kStrideB * kHeightB); \ michael@0: memset(dst_argb_opt, 101, kStrideB * kHeightB); \ michael@0: MaskCpuFlags(0); \ michael@0: FMT_A##To##FMT_B(src_argb + OFF, kStrideA, \ michael@0: dst_argb_c, kStrideB, \ michael@0: kWidth, NEG kHeight); \ michael@0: MaskCpuFlags(-1); \ michael@0: for (int i = 0; i < benchmark_iterations_; ++i) { \ michael@0: FMT_A##To##FMT_B(src_argb + OFF, kStrideA, \ michael@0: dst_argb_opt, kStrideB, \ michael@0: kWidth, NEG kHeight); \ michael@0: } \ michael@0: int max_diff = 0; \ michael@0: for (int i = 0; i < kStrideB * kHeightB; ++i) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_argb_c[i]) - \ michael@0: static_cast(dst_argb_opt[i])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, DIFF); \ michael@0: free_aligned_buffer_64(src_argb); \ michael@0: free_aligned_buffer_64(dst_argb_c); \ michael@0: free_aligned_buffer_64(dst_argb_opt); \ michael@0: } michael@0: michael@0: #define TESTATOBRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \ michael@0: FMT_B, BPP_B, STRIDE_B, HEIGHT_B, DIFF) \ michael@0: TEST_F(libyuvTest, FMT_A##To##FMT_B##_Random) { \ michael@0: srandom(time(NULL)); \ michael@0: for (int times = 0; times < benchmark_iterations_; ++times) { \ michael@0: const int kWidth = (random() & 63) + 1; \ michael@0: const int kHeight = (random() & 31) + 1; \ michael@0: const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \ michael@0: const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \ michael@0: const int kStrideA = (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A;\ michael@0: const int kStrideB = (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B;\ michael@0: align_buffer_page_end(src_argb, kStrideA * kHeightA); \ michael@0: align_buffer_page_end(dst_argb_c, kStrideB * kHeightB); \ michael@0: align_buffer_page_end(dst_argb_opt, kStrideB * kHeightB); \ michael@0: for (int i = 0; i < kStrideA * kHeightA; ++i) { \ michael@0: src_argb[i] = (random() & 0xff); \ michael@0: } \ michael@0: memset(dst_argb_c, 123, kStrideB * kHeightB); \ michael@0: memset(dst_argb_opt, 123, kStrideB * kHeightB); \ michael@0: MaskCpuFlags(0); \ michael@0: FMT_A##To##FMT_B(src_argb, kStrideA, \ michael@0: dst_argb_c, kStrideB, \ michael@0: kWidth, kHeight); \ michael@0: MaskCpuFlags(-1); \ michael@0: FMT_A##To##FMT_B(src_argb, kStrideA, \ michael@0: dst_argb_opt, kStrideB, \ michael@0: kWidth, kHeight); \ michael@0: int max_diff = 0; \ michael@0: for (int i = 0; i < kStrideB * kHeightB; ++i) { \ michael@0: int abs_diff = \ michael@0: abs(static_cast(dst_argb_c[i]) - \ michael@0: static_cast(dst_argb_opt[i])); \ michael@0: if (abs_diff > max_diff) { \ michael@0: max_diff = abs_diff; \ michael@0: } \ michael@0: } \ michael@0: EXPECT_LE(max_diff, DIFF); \ michael@0: free_aligned_buffer_page_end(src_argb); \ michael@0: free_aligned_buffer_page_end(dst_argb_c); \ michael@0: free_aligned_buffer_page_end(dst_argb_opt); \ michael@0: } \ michael@0: } michael@0: michael@0: #define TESTATOB(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \ michael@0: FMT_B, BPP_B, STRIDE_B, HEIGHT_B, DIFF) \ michael@0: TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \ michael@0: FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \ michael@0: benchmark_width_ - 4, DIFF, _Any, +, 0) \ michael@0: TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \ michael@0: FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \ michael@0: benchmark_width_, DIFF, _Unaligned, +, 1) \ michael@0: TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \ michael@0: FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \ michael@0: benchmark_width_, DIFF, _Invert, -, 0) \ michael@0: TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \ michael@0: FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \ michael@0: benchmark_width_, DIFF, _Opt, +, 0) \ michael@0: TESTATOBRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \ michael@0: FMT_B, BPP_B, STRIDE_B, HEIGHT_B, DIFF) michael@0: michael@0: TESTATOB(ARGB, 4, 4, 1, ARGB, 4, 4, 1, 0) michael@0: TESTATOB(ARGB, 4, 4, 1, BGRA, 4, 4, 1, 0) michael@0: TESTATOB(ARGB, 4, 4, 1, ABGR, 4, 4, 1, 0) michael@0: TESTATOB(ARGB, 4, 4, 1, RGBA, 4, 4, 1, 0) michael@0: TESTATOB(ARGB, 4, 4, 1, RAW, 3, 3, 1, 0) michael@0: TESTATOB(ARGB, 4, 4, 1, RGB24, 3, 3, 1, 0) michael@0: TESTATOB(ARGB, 4, 4, 1, RGB565, 2, 2, 1, 0) michael@0: TESTATOB(ARGB, 4, 4, 1, ARGB1555, 2, 2, 1, 0) michael@0: TESTATOB(ARGB, 4, 4, 1, ARGB4444, 2, 2, 1, 0) michael@0: TESTATOB(ARGB, 4, 4, 1, BayerBGGR, 1, 1, 1, 0) michael@0: TESTATOB(ARGB, 4, 4, 1, BayerRGGB, 1, 1, 1, 0) michael@0: TESTATOB(ARGB, 4, 4, 1, BayerGBRG, 1, 1, 1, 0) michael@0: TESTATOB(ARGB, 4, 4, 1, BayerGRBG, 1, 1, 1, 0) michael@0: TESTATOB(ARGB, 4, 4, 1, YUY2, 2, 4, 1, 4) michael@0: TESTATOB(ARGB, 4, 4, 1, UYVY, 2, 4, 1, 4) michael@0: TESTATOB(ARGB, 4, 4, 1, I400, 1, 1, 1, 2) michael@0: TESTATOB(ARGB, 4, 4, 1, J400, 1, 1, 1, 2) michael@0: TESTATOB(BGRA, 4, 4, 1, ARGB, 4, 4, 1, 0) michael@0: TESTATOB(ABGR, 4, 4, 1, ARGB, 4, 4, 1, 0) michael@0: TESTATOB(RGBA, 4, 4, 1, ARGB, 4, 4, 1, 0) michael@0: TESTATOB(RAW, 3, 3, 1, ARGB, 4, 4, 1, 0) michael@0: TESTATOB(RGB24, 3, 3, 1, ARGB, 4, 4, 1, 0) michael@0: TESTATOB(RGB565, 2, 2, 1, ARGB, 4, 4, 1, 0) michael@0: TESTATOB(ARGB1555, 2, 2, 1, ARGB, 4, 4, 1, 0) michael@0: TESTATOB(ARGB4444, 2, 2, 1, ARGB, 4, 4, 1, 0) michael@0: TESTATOB(YUY2, 2, 4, 1, ARGB, 4, 4, 1, 4) michael@0: TESTATOB(UYVY, 2, 4, 1, ARGB, 4, 4, 1, 4) michael@0: TESTATOB(BayerBGGR, 1, 2, 2, ARGB, 4, 4, 1, 0) michael@0: TESTATOB(BayerRGGB, 1, 2, 2, ARGB, 4, 4, 1, 0) michael@0: TESTATOB(BayerGBRG, 1, 2, 2, ARGB, 4, 4, 1, 0) michael@0: TESTATOB(BayerGRBG, 1, 2, 2, ARGB, 4, 4, 1, 0) michael@0: TESTATOB(I400, 1, 1, 1, ARGB, 4, 4, 1, 0) michael@0: TESTATOB(I400, 1, 1, 1, I400, 1, 1, 1, 0) michael@0: TESTATOB(I400, 1, 1, 1, I400Mirror, 1, 1, 1, 0) michael@0: TESTATOB(Y, 1, 1, 1, ARGB, 4, 4, 1, 0) michael@0: TESTATOB(ARGB, 4, 4, 1, ARGBMirror, 4, 4, 1, 0) michael@0: michael@0: TEST_F(libyuvTest, Test565) { michael@0: SIMD_ALIGNED(uint8 orig_pixels[256][4]); michael@0: SIMD_ALIGNED(uint8 pixels565[256][2]); michael@0: michael@0: for (int i = 0; i < 256; ++i) { michael@0: for (int j = 0; j < 4; ++j) { michael@0: orig_pixels[i][j] = i; michael@0: } michael@0: } michael@0: ARGBToRGB565(&orig_pixels[0][0], 0, &pixels565[0][0], 0, 256, 1); michael@0: uint32 checksum = HashDjb2(&pixels565[0][0], sizeof(pixels565), 5381); michael@0: EXPECT_EQ(610919429u, checksum); michael@0: } michael@0: michael@0: #ifdef HAVE_JPEG michael@0: TEST_F(libyuvTest, ValidateJpeg) { michael@0: const int kOff = 10; michael@0: const int kMinJpeg = 64; michael@0: const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg ? michael@0: benchmark_width_ * benchmark_height_ : kMinJpeg; michael@0: const int kSize = kImageSize + kOff; michael@0: align_buffer_64(orig_pixels, kSize); michael@0: michael@0: // No SOI or EOI. Expect fail. michael@0: memset(orig_pixels, 0, kSize); michael@0: michael@0: // EOI, SOI. Expect pass. michael@0: orig_pixels[0] = 0xff; michael@0: orig_pixels[1] = 0xd8; // SOI. michael@0: orig_pixels[kSize - kOff + 0] = 0xff; michael@0: orig_pixels[kSize - kOff + 1] = 0xd9; // EOI. michael@0: for (int times = 0; times < benchmark_iterations_; ++times) { michael@0: EXPECT_TRUE(ValidateJpeg(orig_pixels, kSize)); michael@0: } michael@0: free_aligned_buffer_page_end(orig_pixels); michael@0: } michael@0: michael@0: TEST_F(libyuvTest, InvalidateJpeg) { michael@0: const int kOff = 10; michael@0: const int kMinJpeg = 64; michael@0: const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg ? michael@0: benchmark_width_ * benchmark_height_ : kMinJpeg; michael@0: const int kSize = kImageSize + kOff; michael@0: align_buffer_64(orig_pixels, kSize); michael@0: michael@0: // No SOI or EOI. Expect fail. michael@0: memset(orig_pixels, 0, kSize); michael@0: EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize)); michael@0: michael@0: // SOI but no EOI. Expect fail. michael@0: orig_pixels[0] = 0xff; michael@0: orig_pixels[1] = 0xd8; // SOI. michael@0: for (int times = 0; times < benchmark_iterations_; ++times) { michael@0: EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize)); michael@0: } michael@0: // EOI but no SOI. Expect fail. michael@0: orig_pixels[0] = 0; michael@0: orig_pixels[1] = 0; michael@0: orig_pixels[kSize - kOff + 0] = 0xff; michael@0: orig_pixels[kSize - kOff + 1] = 0xd9; // EOI. michael@0: EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize)); michael@0: michael@0: free_aligned_buffer_page_end(orig_pixels); michael@0: } michael@0: michael@0: #endif michael@0: michael@0: } // namespace libyuv