media/libyuv/unit_test/convert_test.cc

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 /*
michael@0 2 * Copyright 2011 The LibYuv Project Authors. All rights reserved.
michael@0 3 *
michael@0 4 * Use of this source code is governed by a BSD-style license
michael@0 5 * that can be found in the LICENSE file in the root of the source
michael@0 6 * tree. An additional intellectual property rights grant can be found
michael@0 7 * in the file PATENTS. All contributing project authors may
michael@0 8 * be found in the AUTHORS file in the root of the source tree.
michael@0 9 */
michael@0 10
michael@0 11 #include <stdlib.h>
michael@0 12 #include <time.h>
michael@0 13
michael@0 14 #include "libyuv/compare.h"
michael@0 15 #include "libyuv/convert.h"
michael@0 16 #include "libyuv/convert_argb.h"
michael@0 17 #include "libyuv/convert_from.h"
michael@0 18 #include "libyuv/convert_from_argb.h"
michael@0 19 #include "libyuv/cpu_id.h"
michael@0 20 #include "libyuv/format_conversion.h"
michael@0 21 #ifdef HAVE_JPEG
michael@0 22 #include "libyuv/mjpeg_decoder.h"
michael@0 23 #endif
michael@0 24 #include "libyuv/planar_functions.h"
michael@0 25 #include "libyuv/rotate.h"
michael@0 26 #include "libyuv/row.h"
michael@0 27 #include "../unit_test/unit_test.h"
michael@0 28
michael@0 29 #if defined(_MSC_VER)
michael@0 30 #define SIMD_ALIGNED(var) __declspec(align(16)) var
michael@0 31 #else // __GNUC__
michael@0 32 #define SIMD_ALIGNED(var) var __attribute__((aligned(16)))
michael@0 33 #endif
michael@0 34
michael@0 35 namespace libyuv {
michael@0 36
michael@0 37 #define SUBSAMPLE(v, a) ((((v) + (a) - 1)) / (a))
michael@0 38
michael@0 39 #define TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 40 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, W1280, N, NEG, OFF) \
michael@0 41 TEST_F(libyuvTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \
michael@0 42 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
michael@0 43 const int kHeight = benchmark_height_; \
michael@0 44 align_buffer_64(src_y, kWidth * kHeight + OFF); \
michael@0 45 align_buffer_64(src_u, \
michael@0 46 SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \
michael@0 47 SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + OFF); \
michael@0 48 align_buffer_64(src_v, \
michael@0 49 SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \
michael@0 50 SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + OFF); \
michael@0 51 align_buffer_64(dst_y_c, kWidth * kHeight); \
michael@0 52 align_buffer_64(dst_u_c, \
michael@0 53 SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 54 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 55 align_buffer_64(dst_v_c, \
michael@0 56 SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 57 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 58 align_buffer_64(dst_y_opt, kWidth * kHeight); \
michael@0 59 align_buffer_64(dst_u_opt, \
michael@0 60 SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 61 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 62 align_buffer_64(dst_v_opt, \
michael@0 63 SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 64 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 65 srandom(time(NULL)); \
michael@0 66 for (int i = 0; i < kHeight; ++i) \
michael@0 67 for (int j = 0; j < kWidth; ++j) \
michael@0 68 src_y[(i * kWidth) + j + OFF] = (random() & 0xff); \
michael@0 69 for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \
michael@0 70 for (int j = 0; j < SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \
michael@0 71 src_u[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \
michael@0 72 (random() & 0xff); \
michael@0 73 src_v[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \
michael@0 74 (random() & 0xff); \
michael@0 75 } \
michael@0 76 } \
michael@0 77 memset(dst_y_c, 1, kWidth * kHeight); \
michael@0 78 memset(dst_u_c, 2, SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 79 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 80 memset(dst_v_c, 3, SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 81 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 82 memset(dst_y_opt, 101, kWidth * kHeight); \
michael@0 83 memset(dst_u_opt, 102, SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 84 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 85 memset(dst_v_opt, 103, SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 86 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 87 MaskCpuFlags(0); \
michael@0 88 SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \
michael@0 89 src_u + OFF, \
michael@0 90 SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
michael@0 91 src_v + OFF, \
michael@0 92 SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
michael@0 93 dst_y_c, kWidth, \
michael@0 94 dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X), \
michael@0 95 dst_v_c, SUBSAMPLE(kWidth, SUBSAMP_X), \
michael@0 96 kWidth, NEG kHeight); \
michael@0 97 MaskCpuFlags(-1); \
michael@0 98 for (int i = 0; i < benchmark_iterations_; ++i) { \
michael@0 99 SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \
michael@0 100 src_u + OFF, \
michael@0 101 SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
michael@0 102 src_v + OFF, \
michael@0 103 SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
michael@0 104 dst_y_opt, kWidth, \
michael@0 105 dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \
michael@0 106 dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \
michael@0 107 kWidth, NEG kHeight); \
michael@0 108 } \
michael@0 109 int max_diff = 0; \
michael@0 110 for (int i = 0; i < kHeight; ++i) { \
michael@0 111 for (int j = 0; j < kWidth; ++j) { \
michael@0 112 int abs_diff = \
michael@0 113 abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \
michael@0 114 static_cast<int>(dst_y_opt[i * kWidth + j])); \
michael@0 115 if (abs_diff > max_diff) { \
michael@0 116 max_diff = abs_diff; \
michael@0 117 } \
michael@0 118 } \
michael@0 119 } \
michael@0 120 EXPECT_LE(max_diff, 0); \
michael@0 121 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
michael@0 122 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \
michael@0 123 int abs_diff = \
michael@0 124 abs(static_cast<int>(dst_u_c[i * \
michael@0 125 SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \
michael@0 126 static_cast<int>(dst_u_opt[i * \
michael@0 127 SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \
michael@0 128 if (abs_diff > max_diff) { \
michael@0 129 max_diff = abs_diff; \
michael@0 130 } \
michael@0 131 } \
michael@0 132 } \
michael@0 133 EXPECT_LE(max_diff, 3); \
michael@0 134 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
michael@0 135 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \
michael@0 136 int abs_diff = \
michael@0 137 abs(static_cast<int>(dst_v_c[i * \
michael@0 138 SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \
michael@0 139 static_cast<int>(dst_v_opt[i * \
michael@0 140 SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \
michael@0 141 if (abs_diff > max_diff) { \
michael@0 142 max_diff = abs_diff; \
michael@0 143 } \
michael@0 144 } \
michael@0 145 } \
michael@0 146 EXPECT_LE(max_diff, 3); \
michael@0 147 free_aligned_buffer_64(dst_y_c); \
michael@0 148 free_aligned_buffer_64(dst_u_c); \
michael@0 149 free_aligned_buffer_64(dst_v_c); \
michael@0 150 free_aligned_buffer_64(dst_y_opt); \
michael@0 151 free_aligned_buffer_64(dst_u_opt); \
michael@0 152 free_aligned_buffer_64(dst_v_opt); \
michael@0 153 free_aligned_buffer_64(src_y); \
michael@0 154 free_aligned_buffer_64(src_u); \
michael@0 155 free_aligned_buffer_64(src_v); \
michael@0 156 }
michael@0 157
michael@0 158 #define TESTPLANARTOP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 159 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \
michael@0 160 TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 161 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 162 benchmark_width_ - 4, _Any, +, 0) \
michael@0 163 TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 164 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 165 benchmark_width_, _Unaligned, +, 1) \
michael@0 166 TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 167 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 168 benchmark_width_, _Invert, -, 0) \
michael@0 169 TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 170 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 171 benchmark_width_, _Opt, +, 0)
michael@0 172
michael@0 173 TESTPLANARTOP(I420, 2, 2, I420, 2, 2)
michael@0 174 TESTPLANARTOP(I422, 2, 1, I420, 2, 2)
michael@0 175 TESTPLANARTOP(I444, 1, 1, I420, 2, 2)
michael@0 176 TESTPLANARTOP(I411, 4, 1, I420, 2, 2)
michael@0 177 TESTPLANARTOP(I420, 2, 2, I422, 2, 1)
michael@0 178 TESTPLANARTOP(I420, 2, 2, I444, 1, 1)
michael@0 179 TESTPLANARTOP(I420, 2, 2, I411, 4, 1)
michael@0 180 TESTPLANARTOP(I420, 2, 2, I420Mirror, 2, 2)
michael@0 181 TESTPLANARTOP(I422, 2, 1, I422, 2, 1)
michael@0 182 TESTPLANARTOP(I444, 1, 1, I444, 1, 1)
michael@0 183
michael@0 184 #define TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 185 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, W1280, N, NEG, OFF) \
michael@0 186 TEST_F(libyuvTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \
michael@0 187 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
michael@0 188 const int kHeight = benchmark_height_; \
michael@0 189 align_buffer_64(src_y, kWidth * kHeight + OFF); \
michael@0 190 align_buffer_64(src_u, \
michael@0 191 SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \
michael@0 192 SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + OFF); \
michael@0 193 align_buffer_64(src_v, \
michael@0 194 SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \
michael@0 195 SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + OFF); \
michael@0 196 align_buffer_64(dst_y_c, kWidth * kHeight); \
michael@0 197 align_buffer_64(dst_uv_c, SUBSAMPLE(kWidth * 2, SUBSAMP_X) * \
michael@0 198 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 199 align_buffer_64(dst_y_opt, kWidth * kHeight); \
michael@0 200 align_buffer_64(dst_uv_opt, SUBSAMPLE(kWidth * 2, SUBSAMP_X) * \
michael@0 201 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 202 srandom(time(NULL)); \
michael@0 203 for (int i = 0; i < kHeight; ++i) \
michael@0 204 for (int j = 0; j < kWidth; ++j) \
michael@0 205 src_y[(i * kWidth) + j + OFF] = (random() & 0xff); \
michael@0 206 for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \
michael@0 207 for (int j = 0; j < SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \
michael@0 208 src_u[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \
michael@0 209 (random() & 0xff); \
michael@0 210 src_v[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \
michael@0 211 (random() & 0xff); \
michael@0 212 } \
michael@0 213 } \
michael@0 214 memset(dst_y_c, 1, kWidth * kHeight); \
michael@0 215 memset(dst_uv_c, 2, SUBSAMPLE(kWidth * 2, SUBSAMP_X) * \
michael@0 216 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 217 memset(dst_y_opt, 101, kWidth * kHeight); \
michael@0 218 memset(dst_uv_opt, 102, SUBSAMPLE(kWidth * 2, SUBSAMP_X) * \
michael@0 219 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 220 MaskCpuFlags(0); \
michael@0 221 SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \
michael@0 222 src_u + OFF, \
michael@0 223 SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
michael@0 224 src_v + OFF, \
michael@0 225 SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
michael@0 226 dst_y_c, kWidth, \
michael@0 227 dst_uv_c, SUBSAMPLE(kWidth * 2, SUBSAMP_X), \
michael@0 228 kWidth, NEG kHeight); \
michael@0 229 MaskCpuFlags(-1); \
michael@0 230 for (int i = 0; i < benchmark_iterations_; ++i) { \
michael@0 231 SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \
michael@0 232 src_u + OFF, \
michael@0 233 SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
michael@0 234 src_v + OFF, \
michael@0 235 SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
michael@0 236 dst_y_opt, kWidth, \
michael@0 237 dst_uv_opt, \
michael@0 238 SUBSAMPLE(kWidth * 2, SUBSAMP_X), \
michael@0 239 kWidth, NEG kHeight); \
michael@0 240 } \
michael@0 241 int max_diff = 0; \
michael@0 242 for (int i = 0; i < kHeight; ++i) { \
michael@0 243 for (int j = 0; j < kWidth; ++j) { \
michael@0 244 int abs_diff = \
michael@0 245 abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \
michael@0 246 static_cast<int>(dst_y_opt[i * kWidth + j])); \
michael@0 247 if (abs_diff > max_diff) { \
michael@0 248 max_diff = abs_diff; \
michael@0 249 } \
michael@0 250 } \
michael@0 251 } \
michael@0 252 EXPECT_LE(max_diff, 1); \
michael@0 253 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
michael@0 254 for (int j = 0; j < SUBSAMPLE(kWidth * 2, SUBSAMP_X); ++j) { \
michael@0 255 int abs_diff = \
michael@0 256 abs(static_cast<int>(dst_uv_c[i * \
michael@0 257 SUBSAMPLE(kWidth * 2, SUBSAMP_X) + j]) - \
michael@0 258 static_cast<int>(dst_uv_opt[i * \
michael@0 259 SUBSAMPLE(kWidth * 2, SUBSAMP_X) + j])); \
michael@0 260 if (abs_diff > max_diff) { \
michael@0 261 max_diff = abs_diff; \
michael@0 262 } \
michael@0 263 } \
michael@0 264 } \
michael@0 265 EXPECT_LE(max_diff, 1); \
michael@0 266 free_aligned_buffer_64(dst_y_c); \
michael@0 267 free_aligned_buffer_64(dst_uv_c); \
michael@0 268 free_aligned_buffer_64(dst_y_opt); \
michael@0 269 free_aligned_buffer_64(dst_uv_opt); \
michael@0 270 free_aligned_buffer_64(src_y); \
michael@0 271 free_aligned_buffer_64(src_u); \
michael@0 272 free_aligned_buffer_64(src_v); \
michael@0 273 }
michael@0 274
michael@0 275 #define TESTPLANARTOBP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 276 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \
michael@0 277 TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 278 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 279 benchmark_width_ - 4, _Any, +, 0) \
michael@0 280 TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 281 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 282 benchmark_width_, _Unaligned, +, 1) \
michael@0 283 TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 284 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 285 benchmark_width_, _Invert, -, 0) \
michael@0 286 TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 287 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 288 benchmark_width_, _Opt, +, 0)
michael@0 289
michael@0 290 TESTPLANARTOBP(I420, 2, 2, NV12, 2, 2)
michael@0 291 TESTPLANARTOBP(I420, 2, 2, NV21, 2, 2)
michael@0 292
michael@0 293 #define TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 294 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, W1280, N, NEG, OFF) \
michael@0 295 TEST_F(libyuvTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \
michael@0 296 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
michael@0 297 const int kHeight = benchmark_height_; \
michael@0 298 align_buffer_64(src_y, kWidth * kHeight + OFF); \
michael@0 299 align_buffer_64(src_uv, 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \
michael@0 300 SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + OFF); \
michael@0 301 align_buffer_64(dst_y_c, kWidth * kHeight); \
michael@0 302 align_buffer_64(dst_u_c, \
michael@0 303 SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 304 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 305 align_buffer_64(dst_v_c, \
michael@0 306 SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 307 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 308 align_buffer_64(dst_y_opt, kWidth * kHeight); \
michael@0 309 align_buffer_64(dst_u_opt, \
michael@0 310 SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 311 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 312 align_buffer_64(dst_v_opt, \
michael@0 313 SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 314 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 315 srandom(time(NULL)); \
michael@0 316 for (int i = 0; i < kHeight; ++i) \
michael@0 317 for (int j = 0; j < kWidth; ++j) \
michael@0 318 src_y[(i * kWidth) + j + OFF] = (random() & 0xff); \
michael@0 319 for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \
michael@0 320 for (int j = 0; j < 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \
michael@0 321 src_uv[(i * 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \
michael@0 322 (random() & 0xff); \
michael@0 323 } \
michael@0 324 } \
michael@0 325 memset(dst_y_c, 1, kWidth * kHeight); \
michael@0 326 memset(dst_u_c, 2, SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 327 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 328 memset(dst_v_c, 3, SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 329 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 330 memset(dst_y_opt, 101, kWidth * kHeight); \
michael@0 331 memset(dst_u_opt, 102, SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 332 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 333 memset(dst_v_opt, 103, SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 334 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 335 MaskCpuFlags(0); \
michael@0 336 SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \
michael@0 337 src_uv + OFF, \
michael@0 338 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
michael@0 339 dst_y_c, kWidth, \
michael@0 340 dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X), \
michael@0 341 dst_v_c, SUBSAMPLE(kWidth, SUBSAMP_X), \
michael@0 342 kWidth, NEG kHeight); \
michael@0 343 MaskCpuFlags(-1); \
michael@0 344 for (int i = 0; i < benchmark_iterations_; ++i) { \
michael@0 345 SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \
michael@0 346 src_uv + OFF, \
michael@0 347 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
michael@0 348 dst_y_opt, kWidth, \
michael@0 349 dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \
michael@0 350 dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \
michael@0 351 kWidth, NEG kHeight); \
michael@0 352 } \
michael@0 353 int max_diff = 0; \
michael@0 354 for (int i = 0; i < kHeight; ++i) { \
michael@0 355 for (int j = 0; j < kWidth; ++j) { \
michael@0 356 int abs_diff = \
michael@0 357 abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \
michael@0 358 static_cast<int>(dst_y_opt[i * kWidth + j])); \
michael@0 359 if (abs_diff > max_diff) { \
michael@0 360 max_diff = abs_diff; \
michael@0 361 } \
michael@0 362 } \
michael@0 363 } \
michael@0 364 EXPECT_LE(max_diff, 1); \
michael@0 365 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
michael@0 366 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \
michael@0 367 int abs_diff = \
michael@0 368 abs(static_cast<int>(dst_u_c[i * \
michael@0 369 SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \
michael@0 370 static_cast<int>(dst_u_opt[i * \
michael@0 371 SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \
michael@0 372 if (abs_diff > max_diff) { \
michael@0 373 max_diff = abs_diff; \
michael@0 374 } \
michael@0 375 } \
michael@0 376 } \
michael@0 377 EXPECT_LE(max_diff, 1); \
michael@0 378 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
michael@0 379 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \
michael@0 380 int abs_diff = \
michael@0 381 abs(static_cast<int>(dst_v_c[i * \
michael@0 382 SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \
michael@0 383 static_cast<int>(dst_v_opt[i * \
michael@0 384 SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \
michael@0 385 if (abs_diff > max_diff) { \
michael@0 386 max_diff = abs_diff; \
michael@0 387 } \
michael@0 388 } \
michael@0 389 } \
michael@0 390 EXPECT_LE(max_diff, 1); \
michael@0 391 free_aligned_buffer_64(dst_y_c); \
michael@0 392 free_aligned_buffer_64(dst_u_c); \
michael@0 393 free_aligned_buffer_64(dst_v_c); \
michael@0 394 free_aligned_buffer_64(dst_y_opt); \
michael@0 395 free_aligned_buffer_64(dst_u_opt); \
michael@0 396 free_aligned_buffer_64(dst_v_opt); \
michael@0 397 free_aligned_buffer_64(src_y); \
michael@0 398 free_aligned_buffer_64(src_uv); \
michael@0 399 }
michael@0 400
michael@0 401 #define TESTBIPLANARTOP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 402 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \
michael@0 403 TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 404 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 405 benchmark_width_ - 4, _Any, +, 0) \
michael@0 406 TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 407 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 408 benchmark_width_, _Unaligned, +, 1) \
michael@0 409 TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 410 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 411 benchmark_width_, _Invert, -, 0) \
michael@0 412 TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
michael@0 413 FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 414 benchmark_width_, _Opt, +, 0)
michael@0 415
michael@0 416 TESTBIPLANARTOP(NV12, 2, 2, I420, 2, 2)
michael@0 417 TESTBIPLANARTOP(NV21, 2, 2, I420, 2, 2)
michael@0 418
michael@0 419 #define ALIGNINT(V, ALIGN) (((V) + (ALIGN) - 1) / (ALIGN) * (ALIGN))
michael@0 420
michael@0 421 #define TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
michael@0 422 YALIGN, W1280, DIFF, N, NEG, OFF, FMT_C, BPP_C) \
michael@0 423 TEST_F(libyuvTest, FMT_PLANAR##To##FMT_B##N) { \
michael@0 424 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
michael@0 425 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
michael@0 426 const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \
michael@0 427 const int kSizeUV = \
michael@0 428 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y); \
michael@0 429 align_buffer_64(src_y, kWidth * kHeight + OFF); \
michael@0 430 align_buffer_64(src_u, kSizeUV + OFF); \
michael@0 431 align_buffer_64(src_v, kSizeUV + OFF); \
michael@0 432 align_buffer_64(dst_argb_c, kStrideB * kHeight); \
michael@0 433 align_buffer_64(dst_argb_opt, kStrideB * kHeight); \
michael@0 434 srandom(time(NULL)); \
michael@0 435 for (int i = 0; i < kWidth * kHeight; ++i) { \
michael@0 436 src_y[i + OFF] = (random() & 0xff); \
michael@0 437 } \
michael@0 438 for (int i = 0; i < kSizeUV; ++i) { \
michael@0 439 src_u[i + OFF] = (random() & 0xff); \
michael@0 440 src_v[i + OFF] = (random() & 0xff); \
michael@0 441 } \
michael@0 442 memset(dst_argb_c, 1, kStrideB * kHeight); \
michael@0 443 memset(dst_argb_opt, 101, kStrideB * kHeight); \
michael@0 444 MaskCpuFlags(0); \
michael@0 445 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, \
michael@0 446 src_u + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \
michael@0 447 src_v + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \
michael@0 448 dst_argb_c, kStrideB, \
michael@0 449 kWidth, NEG kHeight); \
michael@0 450 MaskCpuFlags(-1); \
michael@0 451 for (int i = 0; i < benchmark_iterations_; ++i) { \
michael@0 452 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, \
michael@0 453 src_u + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \
michael@0 454 src_v + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \
michael@0 455 dst_argb_opt, kStrideB, \
michael@0 456 kWidth, NEG kHeight); \
michael@0 457 } \
michael@0 458 int max_diff = 0; \
michael@0 459 /* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \
michael@0 460 align_buffer_64(dst_argb32_c, kWidth * BPP_C * kHeight); \
michael@0 461 align_buffer_64(dst_argb32_opt, kWidth * BPP_C * kHeight); \
michael@0 462 memset(dst_argb32_c, 2, kWidth * BPP_C * kHeight); \
michael@0 463 memset(dst_argb32_opt, 102, kWidth * BPP_C * kHeight); \
michael@0 464 FMT_B##To##FMT_C(dst_argb_c, kStrideB, \
michael@0 465 dst_argb32_c, kWidth * BPP_C , \
michael@0 466 kWidth, kHeight); \
michael@0 467 FMT_B##To##FMT_C(dst_argb_opt, kStrideB, \
michael@0 468 dst_argb32_opt, kWidth * BPP_C , \
michael@0 469 kWidth, kHeight); \
michael@0 470 for (int i = 0; i < kWidth * BPP_C * kHeight; ++i) { \
michael@0 471 int abs_diff = \
michael@0 472 abs(static_cast<int>(dst_argb32_c[i]) - \
michael@0 473 static_cast<int>(dst_argb32_opt[i])); \
michael@0 474 if (abs_diff > max_diff) { \
michael@0 475 max_diff = abs_diff; \
michael@0 476 } \
michael@0 477 } \
michael@0 478 EXPECT_LE(max_diff, DIFF); \
michael@0 479 free_aligned_buffer_64(src_y); \
michael@0 480 free_aligned_buffer_64(src_u); \
michael@0 481 free_aligned_buffer_64(src_v); \
michael@0 482 free_aligned_buffer_64(dst_argb_c); \
michael@0 483 free_aligned_buffer_64(dst_argb_opt); \
michael@0 484 free_aligned_buffer_64(dst_argb32_c); \
michael@0 485 free_aligned_buffer_64(dst_argb32_opt); \
michael@0 486 }
michael@0 487
michael@0 488 #define TESTPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
michael@0 489 YALIGN, DIFF, FMT_C, BPP_C) \
michael@0 490 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
michael@0 491 YALIGN, benchmark_width_ - 4, DIFF, _Any, +, 0, FMT_C, BPP_C) \
michael@0 492 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
michael@0 493 YALIGN, benchmark_width_, DIFF, _Unaligned, +, 1, FMT_C, BPP_C) \
michael@0 494 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
michael@0 495 YALIGN, benchmark_width_, DIFF, _Invert, -, 0, FMT_C, BPP_C) \
michael@0 496 TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
michael@0 497 YALIGN, benchmark_width_, DIFF, _Opt, +, 0, FMT_C, BPP_C)
michael@0 498
michael@0 499 // TODO(fbarchard): Make vertical alignment unnecessary on bayer.
michael@0 500 TESTPLANARTOB(I420, 2, 2, ARGB, 4, 4, 1, 2, ARGB, 4)
michael@0 501 TESTPLANARTOB(I420, 2, 2, BGRA, 4, 4, 1, 2, ARGB, 4)
michael@0 502 TESTPLANARTOB(I420, 2, 2, ABGR, 4, 4, 1, 2, ARGB, 4)
michael@0 503 TESTPLANARTOB(I420, 2, 2, RGBA, 4, 4, 1, 2, ARGB, 4)
michael@0 504 TESTPLANARTOB(I420, 2, 2, RAW, 3, 3, 1, 2, ARGB, 4)
michael@0 505 TESTPLANARTOB(I420, 2, 2, RGB24, 3, 3, 1, 2, ARGB, 4)
michael@0 506 TESTPLANARTOB(I420, 2, 2, RGB565, 2, 2, 1, 9, ARGB, 4)
michael@0 507 TESTPLANARTOB(I420, 2, 2, ARGB1555, 2, 2, 1, 9, ARGB, 4)
michael@0 508 TESTPLANARTOB(I420, 2, 2, ARGB4444, 2, 2, 1, 17, ARGB, 4)
michael@0 509 TESTPLANARTOB(I422, 2, 1, ARGB, 4, 4, 1, 2, ARGB, 4)
michael@0 510 TESTPLANARTOB(I422, 2, 1, BGRA, 4, 4, 1, 2, ARGB, 4)
michael@0 511 TESTPLANARTOB(I422, 2, 1, ABGR, 4, 4, 1, 2, ARGB, 4)
michael@0 512 TESTPLANARTOB(I422, 2, 1, RGBA, 4, 4, 1, 2, ARGB, 4)
michael@0 513 TESTPLANARTOB(I411, 4, 1, ARGB, 4, 4, 1, 2, ARGB, 4)
michael@0 514 TESTPLANARTOB(I444, 1, 1, ARGB, 4, 4, 1, 2, ARGB, 4)
michael@0 515 TESTPLANARTOB(I420, 2, 2, YUY2, 2, 4, 1, 1, ARGB, 4)
michael@0 516 TESTPLANARTOB(I420, 2, 2, UYVY, 2, 4, 1, 1, ARGB, 4)
michael@0 517 TESTPLANARTOB(I422, 2, 1, YUY2, 2, 4, 1, 0, ARGB, 4)
michael@0 518 TESTPLANARTOB(I422, 2, 1, UYVY, 2, 4, 1, 0, ARGB, 4)
michael@0 519 TESTPLANARTOB(I420, 2, 2, I400, 1, 1, 1, 0, ARGB, 4)
michael@0 520 TESTPLANARTOB(I420, 2, 2, BayerBGGR, 1, 2, 2, 2, ARGB, 4)
michael@0 521 TESTPLANARTOB(I420, 2, 2, BayerRGGB, 1, 2, 2, 2, ARGB, 4)
michael@0 522 TESTPLANARTOB(I420, 2, 2, BayerGBRG, 1, 2, 2, 2, ARGB, 4)
michael@0 523 TESTPLANARTOB(I420, 2, 2, BayerGRBG, 1, 2, 2, 2, ARGB, 4)
michael@0 524
michael@0 525 #define TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
michael@0 526 W1280, DIFF, N, NEG, OFF) \
michael@0 527 TEST_F(libyuvTest, FMT_PLANAR##To##FMT_B##N) { \
michael@0 528 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
michael@0 529 const int kHeight = benchmark_height_; \
michael@0 530 const int kStrideB = kWidth * BPP_B; \
michael@0 531 align_buffer_64(src_y, kWidth * kHeight + OFF); \
michael@0 532 align_buffer_64(src_uv, \
michael@0 533 SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 534 SUBSAMPLE(kHeight, SUBSAMP_Y) * 2 + OFF); \
michael@0 535 align_buffer_64(dst_argb_c, kStrideB * kHeight); \
michael@0 536 align_buffer_64(dst_argb_opt, kStrideB * kHeight); \
michael@0 537 srandom(time(NULL)); \
michael@0 538 for (int i = 0; i < kHeight; ++i) \
michael@0 539 for (int j = 0; j < kWidth; ++j) \
michael@0 540 src_y[(i * kWidth) + j + OFF] = (random() & 0xff); \
michael@0 541 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
michael@0 542 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X) * 2; ++j) { \
michael@0 543 src_uv[(i * SUBSAMPLE(kWidth, SUBSAMP_X)) * 2 + j + OFF] = \
michael@0 544 (random() & 0xff); \
michael@0 545 } \
michael@0 546 } \
michael@0 547 memset(dst_argb_c, 1, kStrideB * kHeight); \
michael@0 548 memset(dst_argb_opt, 101, kStrideB * kHeight); \
michael@0 549 MaskCpuFlags(0); \
michael@0 550 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, \
michael@0 551 src_uv + OFF, SUBSAMPLE(kWidth, SUBSAMP_X) * 2, \
michael@0 552 dst_argb_c, kWidth * BPP_B, \
michael@0 553 kWidth, NEG kHeight); \
michael@0 554 MaskCpuFlags(-1); \
michael@0 555 for (int i = 0; i < benchmark_iterations_; ++i) { \
michael@0 556 FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, \
michael@0 557 src_uv + OFF, SUBSAMPLE(kWidth, SUBSAMP_X) * 2, \
michael@0 558 dst_argb_opt, kWidth * BPP_B, \
michael@0 559 kWidth, NEG kHeight); \
michael@0 560 } \
michael@0 561 /* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \
michael@0 562 align_buffer_64(dst_argb32_c, kWidth * 4 * kHeight); \
michael@0 563 align_buffer_64(dst_argb32_opt, kWidth * 4 * kHeight); \
michael@0 564 memset(dst_argb32_c, 2, kWidth * 4 * kHeight); \
michael@0 565 memset(dst_argb32_opt, 102, kWidth * 4 * kHeight); \
michael@0 566 FMT_B##ToARGB(dst_argb_c, kStrideB, \
michael@0 567 dst_argb32_c, kWidth * 4, \
michael@0 568 kWidth, kHeight); \
michael@0 569 FMT_B##ToARGB(dst_argb_opt, kStrideB, \
michael@0 570 dst_argb32_opt, kWidth * 4, \
michael@0 571 kWidth, kHeight); \
michael@0 572 int max_diff = 0; \
michael@0 573 for (int i = 0; i < kHeight; ++i) { \
michael@0 574 for (int j = 0; j < kWidth * 4; ++j) { \
michael@0 575 int abs_diff = \
michael@0 576 abs(static_cast<int>(dst_argb32_c[i * kWidth * 4 + j]) - \
michael@0 577 static_cast<int>(dst_argb32_opt[i * kWidth * 4 + j])); \
michael@0 578 if (abs_diff > max_diff) { \
michael@0 579 max_diff = abs_diff; \
michael@0 580 } \
michael@0 581 } \
michael@0 582 } \
michael@0 583 EXPECT_LE(max_diff, DIFF); \
michael@0 584 free_aligned_buffer_64(src_y); \
michael@0 585 free_aligned_buffer_64(src_uv); \
michael@0 586 free_aligned_buffer_64(dst_argb_c); \
michael@0 587 free_aligned_buffer_64(dst_argb_opt); \
michael@0 588 free_aligned_buffer_64(dst_argb32_c); \
michael@0 589 free_aligned_buffer_64(dst_argb32_opt); \
michael@0 590 }
michael@0 591
michael@0 592 #define TESTBIPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, DIFF) \
michael@0 593 TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
michael@0 594 benchmark_width_ - 4, DIFF, _Any, +, 0) \
michael@0 595 TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
michael@0 596 benchmark_width_, DIFF, _Unaligned, +, 1) \
michael@0 597 TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
michael@0 598 benchmark_width_, DIFF, _Invert, -, 0) \
michael@0 599 TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
michael@0 600 benchmark_width_, DIFF, _Opt, +, 0)
michael@0 601
michael@0 602 TESTBIPLANARTOB(NV12, 2, 2, ARGB, 4, 2)
michael@0 603 TESTBIPLANARTOB(NV21, 2, 2, ARGB, 4, 2)
michael@0 604 TESTBIPLANARTOB(NV12, 2, 2, RGB565, 2, 9)
michael@0 605 TESTBIPLANARTOB(NV21, 2, 2, RGB565, 2, 9)
michael@0 606
michael@0 607 #define TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 608 W1280, DIFF, N, NEG, OFF) \
michael@0 609 TEST_F(libyuvTest, FMT_A##To##FMT_PLANAR##N) { \
michael@0 610 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
michael@0 611 const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
michael@0 612 const int kStride = \
michael@0 613 (SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMP_X * 8 * BPP_A + 7) / 8; \
michael@0 614 align_buffer_64(src_argb, kStride * kHeight + OFF); \
michael@0 615 align_buffer_64(dst_y_c, kWidth * kHeight); \
michael@0 616 align_buffer_64(dst_u_c, \
michael@0 617 SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 618 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 619 align_buffer_64(dst_v_c, \
michael@0 620 SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 621 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 622 align_buffer_64(dst_y_opt, kWidth * kHeight); \
michael@0 623 align_buffer_64(dst_u_opt, \
michael@0 624 SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 625 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 626 align_buffer_64(dst_v_opt, \
michael@0 627 SUBSAMPLE(kWidth, SUBSAMP_X) * \
michael@0 628 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 629 memset(dst_y_c, 1, kWidth * kHeight); \
michael@0 630 memset(dst_u_c, 2, \
michael@0 631 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 632 memset(dst_v_c, 3, \
michael@0 633 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 634 memset(dst_y_opt, 101, kWidth * kHeight); \
michael@0 635 memset(dst_u_opt, 102, \
michael@0 636 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 637 memset(dst_v_opt, 103, \
michael@0 638 SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 639 srandom(time(NULL)); \
michael@0 640 for (int i = 0; i < kHeight; ++i) \
michael@0 641 for (int j = 0; j < kStride; ++j) \
michael@0 642 src_argb[(i * kStride) + j + OFF] = (random() & 0xff); \
michael@0 643 MaskCpuFlags(0); \
michael@0 644 FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, \
michael@0 645 dst_y_c, kWidth, \
michael@0 646 dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X), \
michael@0 647 dst_v_c, SUBSAMPLE(kWidth, SUBSAMP_X), \
michael@0 648 kWidth, NEG kHeight); \
michael@0 649 MaskCpuFlags(-1); \
michael@0 650 for (int i = 0; i < benchmark_iterations_; ++i) { \
michael@0 651 FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, \
michael@0 652 dst_y_opt, kWidth, \
michael@0 653 dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \
michael@0 654 dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \
michael@0 655 kWidth, NEG kHeight); \
michael@0 656 } \
michael@0 657 int max_diff = 0; \
michael@0 658 for (int i = 0; i < kHeight; ++i) { \
michael@0 659 for (int j = 0; j < kWidth; ++j) { \
michael@0 660 int abs_diff = \
michael@0 661 abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \
michael@0 662 static_cast<int>(dst_y_opt[i * kWidth + j])); \
michael@0 663 if (abs_diff > max_diff) { \
michael@0 664 max_diff = abs_diff; \
michael@0 665 } \
michael@0 666 } \
michael@0 667 } \
michael@0 668 EXPECT_LE(max_diff, DIFF); \
michael@0 669 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
michael@0 670 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \
michael@0 671 int abs_diff = \
michael@0 672 abs(static_cast<int>(dst_u_c[i * \
michael@0 673 SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \
michael@0 674 static_cast<int>(dst_u_opt[i * \
michael@0 675 SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \
michael@0 676 if (abs_diff > max_diff) { \
michael@0 677 max_diff = abs_diff; \
michael@0 678 } \
michael@0 679 } \
michael@0 680 } \
michael@0 681 EXPECT_LE(max_diff, DIFF); \
michael@0 682 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
michael@0 683 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \
michael@0 684 int abs_diff = \
michael@0 685 abs(static_cast<int>(dst_v_c[i * \
michael@0 686 SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \
michael@0 687 static_cast<int>(dst_v_opt[i * \
michael@0 688 SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \
michael@0 689 if (abs_diff > max_diff) { \
michael@0 690 max_diff = abs_diff; \
michael@0 691 } \
michael@0 692 } \
michael@0 693 } \
michael@0 694 EXPECT_LE(max_diff, DIFF); \
michael@0 695 free_aligned_buffer_64(dst_y_c); \
michael@0 696 free_aligned_buffer_64(dst_u_c); \
michael@0 697 free_aligned_buffer_64(dst_v_c); \
michael@0 698 free_aligned_buffer_64(dst_y_opt); \
michael@0 699 free_aligned_buffer_64(dst_u_opt); \
michael@0 700 free_aligned_buffer_64(dst_v_opt); \
michael@0 701 free_aligned_buffer_64(src_argb); \
michael@0 702 }
michael@0 703
michael@0 704 #define TESTATOPLANAR(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 705 DIFF) \
michael@0 706 TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 707 benchmark_width_ - 4, DIFF, _Any, +, 0) \
michael@0 708 TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 709 benchmark_width_, DIFF, _Unaligned, +, 1) \
michael@0 710 TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 711 benchmark_width_, DIFF, _Invert, -, 0) \
michael@0 712 TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 713 benchmark_width_, DIFF, _Opt, +, 0)
michael@0 714
michael@0 715 TESTATOPLANAR(ARGB, 4, 1, I420, 2, 2, 4)
michael@0 716 #ifdef __arm__
michael@0 717 TESTATOPLANAR(ARGB, 4, 1, J420, 2, 2, 4)
michael@0 718 #else
michael@0 719 TESTATOPLANAR(ARGB, 4, 1, J420, 2, 2, 0)
michael@0 720 #endif
michael@0 721 TESTATOPLANAR(BGRA, 4, 1, I420, 2, 2, 4)
michael@0 722 TESTATOPLANAR(ABGR, 4, 1, I420, 2, 2, 4)
michael@0 723 TESTATOPLANAR(RGBA, 4, 1, I420, 2, 2, 4)
michael@0 724 TESTATOPLANAR(RAW, 3, 1, I420, 2, 2, 4)
michael@0 725 TESTATOPLANAR(RGB24, 3, 1, I420, 2, 2, 4)
michael@0 726 TESTATOPLANAR(RGB565, 2, 1, I420, 2, 2, 5)
michael@0 727 // TODO(fbarchard): Make 1555 neon work same as C code, reduce to diff 9.
michael@0 728 TESTATOPLANAR(ARGB1555, 2, 1, I420, 2, 2, 15)
michael@0 729 TESTATOPLANAR(ARGB4444, 2, 1, I420, 2, 2, 17)
michael@0 730 TESTATOPLANAR(ARGB, 4, 1, I411, 4, 1, 4)
michael@0 731 TESTATOPLANAR(ARGB, 4, 1, I422, 2, 1, 2)
michael@0 732 TESTATOPLANAR(ARGB, 4, 1, I444, 1, 1, 2)
michael@0 733 TESTATOPLANAR(YUY2, 2, 1, I420, 2, 2, 2)
michael@0 734 TESTATOPLANAR(UYVY, 2, 1, I420, 2, 2, 2)
michael@0 735 TESTATOPLANAR(YUY2, 2, 1, I422, 2, 1, 2)
michael@0 736 TESTATOPLANAR(UYVY, 2, 1, I422, 2, 1, 2)
michael@0 737 TESTATOPLANAR(I400, 1, 1, I420, 2, 2, 2)
michael@0 738 TESTATOPLANAR(BayerBGGR, 1, 2, I420, 2, 2, 4)
michael@0 739 TESTATOPLANAR(BayerRGGB, 1, 2, I420, 2, 2, 4)
michael@0 740 TESTATOPLANAR(BayerGBRG, 1, 2, I420, 2, 2, 4)
michael@0 741 TESTATOPLANAR(BayerGRBG, 1, 2, I420, 2, 2, 4)
michael@0 742
michael@0 743 #define TESTATOBIPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 744 W1280, N, NEG, OFF) \
michael@0 745 TEST_F(libyuvTest, FMT_A##To##FMT_PLANAR##N) { \
michael@0 746 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
michael@0 747 const int kHeight = benchmark_height_; \
michael@0 748 const int kStride = (kWidth * 8 * BPP_A + 7) / 8; \
michael@0 749 align_buffer_64(src_argb, kStride * kHeight + OFF); \
michael@0 750 align_buffer_64(dst_y_c, kWidth * kHeight); \
michael@0 751 align_buffer_64(dst_uv_c, \
michael@0 752 SUBSAMPLE(kWidth, SUBSAMP_X) * 2 * \
michael@0 753 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 754 align_buffer_64(dst_y_opt, kWidth * kHeight); \
michael@0 755 align_buffer_64(dst_uv_opt, \
michael@0 756 SUBSAMPLE(kWidth, SUBSAMP_X) * 2 * \
michael@0 757 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 758 srandom(time(NULL)); \
michael@0 759 for (int i = 0; i < kHeight; ++i) \
michael@0 760 for (int j = 0; j < kStride; ++j) \
michael@0 761 src_argb[(i * kStride) + j + OFF] = (random() & 0xff); \
michael@0 762 memset(dst_y_c, 1, kWidth * kHeight); \
michael@0 763 memset(dst_uv_c, 2, SUBSAMPLE(kWidth, SUBSAMP_X) * 2 * \
michael@0 764 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 765 memset(dst_y_opt, 101, kWidth * kHeight); \
michael@0 766 memset(dst_uv_opt, 102, SUBSAMPLE(kWidth, SUBSAMP_X) * 2 * \
michael@0 767 SUBSAMPLE(kHeight, SUBSAMP_Y)); \
michael@0 768 MaskCpuFlags(0); \
michael@0 769 FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, \
michael@0 770 dst_y_c, kWidth, \
michael@0 771 dst_uv_c, SUBSAMPLE(kWidth, SUBSAMP_X) * 2, \
michael@0 772 kWidth, NEG kHeight); \
michael@0 773 MaskCpuFlags(-1); \
michael@0 774 for (int i = 0; i < benchmark_iterations_; ++i) { \
michael@0 775 FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, \
michael@0 776 dst_y_opt, kWidth, \
michael@0 777 dst_uv_opt, SUBSAMPLE(kWidth, SUBSAMP_X) * 2, \
michael@0 778 kWidth, NEG kHeight); \
michael@0 779 } \
michael@0 780 int max_diff = 0; \
michael@0 781 for (int i = 0; i < kHeight; ++i) { \
michael@0 782 for (int j = 0; j < kWidth; ++j) { \
michael@0 783 int abs_diff = \
michael@0 784 abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \
michael@0 785 static_cast<int>(dst_y_opt[i * kWidth + j])); \
michael@0 786 if (abs_diff > max_diff) { \
michael@0 787 max_diff = abs_diff; \
michael@0 788 } \
michael@0 789 } \
michael@0 790 } \
michael@0 791 EXPECT_LE(max_diff, 4); \
michael@0 792 for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
michael@0 793 for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X) * 2; ++j) { \
michael@0 794 int abs_diff = \
michael@0 795 abs(static_cast<int>(dst_uv_c[i * \
michael@0 796 SUBSAMPLE(kWidth, SUBSAMP_X) * 2 + j]) - \
michael@0 797 static_cast<int>(dst_uv_opt[i * \
michael@0 798 SUBSAMPLE(kWidth, SUBSAMP_X) * 2 + j])); \
michael@0 799 if (abs_diff > max_diff) { \
michael@0 800 max_diff = abs_diff; \
michael@0 801 } \
michael@0 802 } \
michael@0 803 } \
michael@0 804 EXPECT_LE(max_diff, 4); \
michael@0 805 free_aligned_buffer_64(dst_y_c); \
michael@0 806 free_aligned_buffer_64(dst_uv_c); \
michael@0 807 free_aligned_buffer_64(dst_y_opt); \
michael@0 808 free_aligned_buffer_64(dst_uv_opt); \
michael@0 809 free_aligned_buffer_64(src_argb); \
michael@0 810 }
michael@0 811
michael@0 812 #define TESTATOBIPLANAR(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \
michael@0 813 TESTATOBIPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 814 benchmark_width_ - 4, _Any, +, 0) \
michael@0 815 TESTATOBIPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 816 benchmark_width_, _Unaligned, +, 1) \
michael@0 817 TESTATOBIPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 818 benchmark_width_, _Invert, -, 0) \
michael@0 819 TESTATOBIPLANARI(FMT_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
michael@0 820 benchmark_width_, _Opt, +, 0)
michael@0 821
michael@0 822 TESTATOBIPLANAR(ARGB, 4, NV12, 2, 2)
michael@0 823 TESTATOBIPLANAR(ARGB, 4, NV21, 2, 2)
michael@0 824
michael@0 825 #define TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
michael@0 826 FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \
michael@0 827 W1280, DIFF, N, NEG, OFF) \
michael@0 828 TEST_F(libyuvTest, FMT_A##To##FMT_B##N) { \
michael@0 829 const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
michael@0 830 const int kHeight = benchmark_height_; \
michael@0 831 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
michael@0 832 const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \
michael@0 833 const int kStrideA = (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \
michael@0 834 const int kStrideB = (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \
michael@0 835 align_buffer_64(src_argb, kStrideA * kHeightA + OFF); \
michael@0 836 align_buffer_64(dst_argb_c, kStrideB * kHeightB); \
michael@0 837 align_buffer_64(dst_argb_opt, kStrideB * kHeightB); \
michael@0 838 srandom(time(NULL)); \
michael@0 839 for (int i = 0; i < kStrideA * kHeightA; ++i) { \
michael@0 840 src_argb[i + OFF] = (random() & 0xff); \
michael@0 841 } \
michael@0 842 memset(dst_argb_c, 1, kStrideB * kHeightB); \
michael@0 843 memset(dst_argb_opt, 101, kStrideB * kHeightB); \
michael@0 844 MaskCpuFlags(0); \
michael@0 845 FMT_A##To##FMT_B(src_argb + OFF, kStrideA, \
michael@0 846 dst_argb_c, kStrideB, \
michael@0 847 kWidth, NEG kHeight); \
michael@0 848 MaskCpuFlags(-1); \
michael@0 849 for (int i = 0; i < benchmark_iterations_; ++i) { \
michael@0 850 FMT_A##To##FMT_B(src_argb + OFF, kStrideA, \
michael@0 851 dst_argb_opt, kStrideB, \
michael@0 852 kWidth, NEG kHeight); \
michael@0 853 } \
michael@0 854 int max_diff = 0; \
michael@0 855 for (int i = 0; i < kStrideB * kHeightB; ++i) { \
michael@0 856 int abs_diff = \
michael@0 857 abs(static_cast<int>(dst_argb_c[i]) - \
michael@0 858 static_cast<int>(dst_argb_opt[i])); \
michael@0 859 if (abs_diff > max_diff) { \
michael@0 860 max_diff = abs_diff; \
michael@0 861 } \
michael@0 862 } \
michael@0 863 EXPECT_LE(max_diff, DIFF); \
michael@0 864 free_aligned_buffer_64(src_argb); \
michael@0 865 free_aligned_buffer_64(dst_argb_c); \
michael@0 866 free_aligned_buffer_64(dst_argb_opt); \
michael@0 867 }
michael@0 868
michael@0 869 #define TESTATOBRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
michael@0 870 FMT_B, BPP_B, STRIDE_B, HEIGHT_B, DIFF) \
michael@0 871 TEST_F(libyuvTest, FMT_A##To##FMT_B##_Random) { \
michael@0 872 srandom(time(NULL)); \
michael@0 873 for (int times = 0; times < benchmark_iterations_; ++times) { \
michael@0 874 const int kWidth = (random() & 63) + 1; \
michael@0 875 const int kHeight = (random() & 31) + 1; \
michael@0 876 const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
michael@0 877 const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \
michael@0 878 const int kStrideA = (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A;\
michael@0 879 const int kStrideB = (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B;\
michael@0 880 align_buffer_page_end(src_argb, kStrideA * kHeightA); \
michael@0 881 align_buffer_page_end(dst_argb_c, kStrideB * kHeightB); \
michael@0 882 align_buffer_page_end(dst_argb_opt, kStrideB * kHeightB); \
michael@0 883 for (int i = 0; i < kStrideA * kHeightA; ++i) { \
michael@0 884 src_argb[i] = (random() & 0xff); \
michael@0 885 } \
michael@0 886 memset(dst_argb_c, 123, kStrideB * kHeightB); \
michael@0 887 memset(dst_argb_opt, 123, kStrideB * kHeightB); \
michael@0 888 MaskCpuFlags(0); \
michael@0 889 FMT_A##To##FMT_B(src_argb, kStrideA, \
michael@0 890 dst_argb_c, kStrideB, \
michael@0 891 kWidth, kHeight); \
michael@0 892 MaskCpuFlags(-1); \
michael@0 893 FMT_A##To##FMT_B(src_argb, kStrideA, \
michael@0 894 dst_argb_opt, kStrideB, \
michael@0 895 kWidth, kHeight); \
michael@0 896 int max_diff = 0; \
michael@0 897 for (int i = 0; i < kStrideB * kHeightB; ++i) { \
michael@0 898 int abs_diff = \
michael@0 899 abs(static_cast<int>(dst_argb_c[i]) - \
michael@0 900 static_cast<int>(dst_argb_opt[i])); \
michael@0 901 if (abs_diff > max_diff) { \
michael@0 902 max_diff = abs_diff; \
michael@0 903 } \
michael@0 904 } \
michael@0 905 EXPECT_LE(max_diff, DIFF); \
michael@0 906 free_aligned_buffer_page_end(src_argb); \
michael@0 907 free_aligned_buffer_page_end(dst_argb_c); \
michael@0 908 free_aligned_buffer_page_end(dst_argb_opt); \
michael@0 909 } \
michael@0 910 }
michael@0 911
michael@0 912 #define TESTATOB(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
michael@0 913 FMT_B, BPP_B, STRIDE_B, HEIGHT_B, DIFF) \
michael@0 914 TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
michael@0 915 FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \
michael@0 916 benchmark_width_ - 4, DIFF, _Any, +, 0) \
michael@0 917 TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
michael@0 918 FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \
michael@0 919 benchmark_width_, DIFF, _Unaligned, +, 1) \
michael@0 920 TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
michael@0 921 FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \
michael@0 922 benchmark_width_, DIFF, _Invert, -, 0) \
michael@0 923 TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
michael@0 924 FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \
michael@0 925 benchmark_width_, DIFF, _Opt, +, 0) \
michael@0 926 TESTATOBRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
michael@0 927 FMT_B, BPP_B, STRIDE_B, HEIGHT_B, DIFF)
michael@0 928
michael@0 929 TESTATOB(ARGB, 4, 4, 1, ARGB, 4, 4, 1, 0)
michael@0 930 TESTATOB(ARGB, 4, 4, 1, BGRA, 4, 4, 1, 0)
michael@0 931 TESTATOB(ARGB, 4, 4, 1, ABGR, 4, 4, 1, 0)
michael@0 932 TESTATOB(ARGB, 4, 4, 1, RGBA, 4, 4, 1, 0)
michael@0 933 TESTATOB(ARGB, 4, 4, 1, RAW, 3, 3, 1, 0)
michael@0 934 TESTATOB(ARGB, 4, 4, 1, RGB24, 3, 3, 1, 0)
michael@0 935 TESTATOB(ARGB, 4, 4, 1, RGB565, 2, 2, 1, 0)
michael@0 936 TESTATOB(ARGB, 4, 4, 1, ARGB1555, 2, 2, 1, 0)
michael@0 937 TESTATOB(ARGB, 4, 4, 1, ARGB4444, 2, 2, 1, 0)
michael@0 938 TESTATOB(ARGB, 4, 4, 1, BayerBGGR, 1, 1, 1, 0)
michael@0 939 TESTATOB(ARGB, 4, 4, 1, BayerRGGB, 1, 1, 1, 0)
michael@0 940 TESTATOB(ARGB, 4, 4, 1, BayerGBRG, 1, 1, 1, 0)
michael@0 941 TESTATOB(ARGB, 4, 4, 1, BayerGRBG, 1, 1, 1, 0)
michael@0 942 TESTATOB(ARGB, 4, 4, 1, YUY2, 2, 4, 1, 4)
michael@0 943 TESTATOB(ARGB, 4, 4, 1, UYVY, 2, 4, 1, 4)
michael@0 944 TESTATOB(ARGB, 4, 4, 1, I400, 1, 1, 1, 2)
michael@0 945 TESTATOB(ARGB, 4, 4, 1, J400, 1, 1, 1, 2)
michael@0 946 TESTATOB(BGRA, 4, 4, 1, ARGB, 4, 4, 1, 0)
michael@0 947 TESTATOB(ABGR, 4, 4, 1, ARGB, 4, 4, 1, 0)
michael@0 948 TESTATOB(RGBA, 4, 4, 1, ARGB, 4, 4, 1, 0)
michael@0 949 TESTATOB(RAW, 3, 3, 1, ARGB, 4, 4, 1, 0)
michael@0 950 TESTATOB(RGB24, 3, 3, 1, ARGB, 4, 4, 1, 0)
michael@0 951 TESTATOB(RGB565, 2, 2, 1, ARGB, 4, 4, 1, 0)
michael@0 952 TESTATOB(ARGB1555, 2, 2, 1, ARGB, 4, 4, 1, 0)
michael@0 953 TESTATOB(ARGB4444, 2, 2, 1, ARGB, 4, 4, 1, 0)
michael@0 954 TESTATOB(YUY2, 2, 4, 1, ARGB, 4, 4, 1, 4)
michael@0 955 TESTATOB(UYVY, 2, 4, 1, ARGB, 4, 4, 1, 4)
michael@0 956 TESTATOB(BayerBGGR, 1, 2, 2, ARGB, 4, 4, 1, 0)
michael@0 957 TESTATOB(BayerRGGB, 1, 2, 2, ARGB, 4, 4, 1, 0)
michael@0 958 TESTATOB(BayerGBRG, 1, 2, 2, ARGB, 4, 4, 1, 0)
michael@0 959 TESTATOB(BayerGRBG, 1, 2, 2, ARGB, 4, 4, 1, 0)
michael@0 960 TESTATOB(I400, 1, 1, 1, ARGB, 4, 4, 1, 0)
michael@0 961 TESTATOB(I400, 1, 1, 1, I400, 1, 1, 1, 0)
michael@0 962 TESTATOB(I400, 1, 1, 1, I400Mirror, 1, 1, 1, 0)
michael@0 963 TESTATOB(Y, 1, 1, 1, ARGB, 4, 4, 1, 0)
michael@0 964 TESTATOB(ARGB, 4, 4, 1, ARGBMirror, 4, 4, 1, 0)
michael@0 965
michael@0 966 TEST_F(libyuvTest, Test565) {
michael@0 967 SIMD_ALIGNED(uint8 orig_pixels[256][4]);
michael@0 968 SIMD_ALIGNED(uint8 pixels565[256][2]);
michael@0 969
michael@0 970 for (int i = 0; i < 256; ++i) {
michael@0 971 for (int j = 0; j < 4; ++j) {
michael@0 972 orig_pixels[i][j] = i;
michael@0 973 }
michael@0 974 }
michael@0 975 ARGBToRGB565(&orig_pixels[0][0], 0, &pixels565[0][0], 0, 256, 1);
michael@0 976 uint32 checksum = HashDjb2(&pixels565[0][0], sizeof(pixels565), 5381);
michael@0 977 EXPECT_EQ(610919429u, checksum);
michael@0 978 }
michael@0 979
michael@0 980 #ifdef HAVE_JPEG
michael@0 981 TEST_F(libyuvTest, ValidateJpeg) {
michael@0 982 const int kOff = 10;
michael@0 983 const int kMinJpeg = 64;
michael@0 984 const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg ?
michael@0 985 benchmark_width_ * benchmark_height_ : kMinJpeg;
michael@0 986 const int kSize = kImageSize + kOff;
michael@0 987 align_buffer_64(orig_pixels, kSize);
michael@0 988
michael@0 989 // No SOI or EOI. Expect fail.
michael@0 990 memset(orig_pixels, 0, kSize);
michael@0 991
michael@0 992 // EOI, SOI. Expect pass.
michael@0 993 orig_pixels[0] = 0xff;
michael@0 994 orig_pixels[1] = 0xd8; // SOI.
michael@0 995 orig_pixels[kSize - kOff + 0] = 0xff;
michael@0 996 orig_pixels[kSize - kOff + 1] = 0xd9; // EOI.
michael@0 997 for (int times = 0; times < benchmark_iterations_; ++times) {
michael@0 998 EXPECT_TRUE(ValidateJpeg(orig_pixels, kSize));
michael@0 999 }
michael@0 1000 free_aligned_buffer_page_end(orig_pixels);
michael@0 1001 }
michael@0 1002
michael@0 1003 TEST_F(libyuvTest, InvalidateJpeg) {
michael@0 1004 const int kOff = 10;
michael@0 1005 const int kMinJpeg = 64;
michael@0 1006 const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg ?
michael@0 1007 benchmark_width_ * benchmark_height_ : kMinJpeg;
michael@0 1008 const int kSize = kImageSize + kOff;
michael@0 1009 align_buffer_64(orig_pixels, kSize);
michael@0 1010
michael@0 1011 // No SOI or EOI. Expect fail.
michael@0 1012 memset(orig_pixels, 0, kSize);
michael@0 1013 EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize));
michael@0 1014
michael@0 1015 // SOI but no EOI. Expect fail.
michael@0 1016 orig_pixels[0] = 0xff;
michael@0 1017 orig_pixels[1] = 0xd8; // SOI.
michael@0 1018 for (int times = 0; times < benchmark_iterations_; ++times) {
michael@0 1019 EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize));
michael@0 1020 }
michael@0 1021 // EOI but no SOI. Expect fail.
michael@0 1022 orig_pixels[0] = 0;
michael@0 1023 orig_pixels[1] = 0;
michael@0 1024 orig_pixels[kSize - kOff + 0] = 0xff;
michael@0 1025 orig_pixels[kSize - kOff + 1] = 0xd9; // EOI.
michael@0 1026 EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize));
michael@0 1027
michael@0 1028 free_aligned_buffer_page_end(orig_pixels);
michael@0 1029 }
michael@0 1030
michael@0 1031 #endif
michael@0 1032
michael@0 1033 } // namespace libyuv

mercurial