Tue, 06 Jan 2015 21:39:09 +0100
Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | // Copyright (c) 2010 The Chromium Authors. All rights reserved. |
michael@0 | 2 | // Use of this source code is governed by a BSD-style license that can be |
michael@0 | 3 | // found in the LICENSE file. |
michael@0 | 4 | |
michael@0 | 5 | // This webpage shows layout of YV12 and other YUV formats |
michael@0 | 6 | // http://www.fourcc.org/yuv.php |
michael@0 | 7 | // The actual conversion is best described here |
michael@0 | 8 | // http://en.wikipedia.org/wiki/YUV |
michael@0 | 9 | // An article on optimizing YUV conversion using tables instead of multiplies |
michael@0 | 10 | // http://lestourtereaux.free.fr/papers/data/yuvrgb.pdf |
michael@0 | 11 | // |
michael@0 | 12 | // YV12 is a full plane of Y and a half height, half width chroma planes |
michael@0 | 13 | // YV16 is a full plane of Y and a full height, half width chroma planes |
michael@0 | 14 | // YV24 is a full plane of Y and a full height, full width chroma planes |
michael@0 | 15 | // |
michael@0 | 16 | // ARGB pixel format is output, which on little endian is stored as BGRA. |
michael@0 | 17 | // The alpha is set to 255, allowing the application to use RGBA or RGB32. |
michael@0 | 18 | |
michael@0 | 19 | #include "yuv_convert.h" |
michael@0 | 20 | |
michael@0 | 21 | // Header for low level row functions. |
michael@0 | 22 | #include "yuv_row.h" |
michael@0 | 23 | #include "mozilla/SSE.h" |
michael@0 | 24 | |
michael@0 | 25 | namespace mozilla { |
michael@0 | 26 | |
michael@0 | 27 | namespace gfx { |
michael@0 | 28 | |
michael@0 | 29 | // 16.16 fixed point arithmetic |
michael@0 | 30 | const int kFractionBits = 16; |
michael@0 | 31 | const int kFractionMax = 1 << kFractionBits; |
michael@0 | 32 | const int kFractionMask = ((1 << kFractionBits) - 1); |
michael@0 | 33 | |
michael@0 | 34 | NS_GFX_(YUVType) TypeFromSize(int ywidth, |
michael@0 | 35 | int yheight, |
michael@0 | 36 | int cbcrwidth, |
michael@0 | 37 | int cbcrheight) |
michael@0 | 38 | { |
michael@0 | 39 | if (ywidth == cbcrwidth && yheight == cbcrheight) { |
michael@0 | 40 | return YV24; |
michael@0 | 41 | } |
michael@0 | 42 | else if (ywidth / 2 == cbcrwidth && yheight == cbcrheight) { |
michael@0 | 43 | return YV16; |
michael@0 | 44 | } |
michael@0 | 45 | else { |
michael@0 | 46 | return YV12; |
michael@0 | 47 | } |
michael@0 | 48 | } |
michael@0 | 49 | |
michael@0 | 50 | // Convert a frame of YUV to 32 bit ARGB. |
michael@0 | 51 | NS_GFX_(void) ConvertYCbCrToRGB32(const uint8* y_buf, |
michael@0 | 52 | const uint8* u_buf, |
michael@0 | 53 | const uint8* v_buf, |
michael@0 | 54 | uint8* rgb_buf, |
michael@0 | 55 | int pic_x, |
michael@0 | 56 | int pic_y, |
michael@0 | 57 | int pic_width, |
michael@0 | 58 | int pic_height, |
michael@0 | 59 | int y_pitch, |
michael@0 | 60 | int uv_pitch, |
michael@0 | 61 | int rgb_pitch, |
michael@0 | 62 | YUVType yuv_type) { |
michael@0 | 63 | unsigned int y_shift = yuv_type == YV12 ? 1 : 0; |
michael@0 | 64 | unsigned int x_shift = yuv_type == YV24 ? 0 : 1; |
michael@0 | 65 | // Test for SSE because the optimized code uses movntq, which is not part of MMX. |
michael@0 | 66 | bool has_sse = supports_mmx() && supports_sse(); |
michael@0 | 67 | // There is no optimized YV24 SSE routine so we check for this and |
michael@0 | 68 | // fall back to the C code. |
michael@0 | 69 | has_sse &= yuv_type != YV24; |
michael@0 | 70 | bool odd_pic_x = yuv_type != YV24 && pic_x % 2 != 0; |
michael@0 | 71 | int x_width = odd_pic_x ? pic_width - 1 : pic_width; |
michael@0 | 72 | |
michael@0 | 73 | for (int y = pic_y; y < pic_height + pic_y; ++y) { |
michael@0 | 74 | uint8* rgb_row = rgb_buf + (y - pic_y) * rgb_pitch; |
michael@0 | 75 | const uint8* y_ptr = y_buf + y * y_pitch + pic_x; |
michael@0 | 76 | const uint8* u_ptr = u_buf + (y >> y_shift) * uv_pitch + (pic_x >> x_shift); |
michael@0 | 77 | const uint8* v_ptr = v_buf + (y >> y_shift) * uv_pitch + (pic_x >> x_shift); |
michael@0 | 78 | |
michael@0 | 79 | if (odd_pic_x) { |
michael@0 | 80 | // Handle the single odd pixel manually and use the |
michael@0 | 81 | // fast routines for the remaining. |
michael@0 | 82 | FastConvertYUVToRGB32Row_C(y_ptr++, |
michael@0 | 83 | u_ptr++, |
michael@0 | 84 | v_ptr++, |
michael@0 | 85 | rgb_row, |
michael@0 | 86 | 1, |
michael@0 | 87 | x_shift); |
michael@0 | 88 | rgb_row += 4; |
michael@0 | 89 | } |
michael@0 | 90 | |
michael@0 | 91 | if (has_sse) { |
michael@0 | 92 | FastConvertYUVToRGB32Row(y_ptr, |
michael@0 | 93 | u_ptr, |
michael@0 | 94 | v_ptr, |
michael@0 | 95 | rgb_row, |
michael@0 | 96 | x_width); |
michael@0 | 97 | } |
michael@0 | 98 | else { |
michael@0 | 99 | FastConvertYUVToRGB32Row_C(y_ptr, |
michael@0 | 100 | u_ptr, |
michael@0 | 101 | v_ptr, |
michael@0 | 102 | rgb_row, |
michael@0 | 103 | x_width, |
michael@0 | 104 | x_shift); |
michael@0 | 105 | } |
michael@0 | 106 | } |
michael@0 | 107 | |
michael@0 | 108 | // MMX used for FastConvertYUVToRGB32Row requires emms instruction. |
michael@0 | 109 | if (has_sse) |
michael@0 | 110 | EMMS(); |
michael@0 | 111 | } |
michael@0 | 112 | |
michael@0 | 113 | // C version does 8 at a time to mimic MMX code |
michael@0 | 114 | static void FilterRows_C(uint8* ybuf, const uint8* y0_ptr, const uint8* y1_ptr, |
michael@0 | 115 | int source_width, int source_y_fraction) { |
michael@0 | 116 | int y1_fraction = source_y_fraction; |
michael@0 | 117 | int y0_fraction = 256 - y1_fraction; |
michael@0 | 118 | uint8* end = ybuf + source_width; |
michael@0 | 119 | do { |
michael@0 | 120 | ybuf[0] = (y0_ptr[0] * y0_fraction + y1_ptr[0] * y1_fraction) >> 8; |
michael@0 | 121 | ybuf[1] = (y0_ptr[1] * y0_fraction + y1_ptr[1] * y1_fraction) >> 8; |
michael@0 | 122 | ybuf[2] = (y0_ptr[2] * y0_fraction + y1_ptr[2] * y1_fraction) >> 8; |
michael@0 | 123 | ybuf[3] = (y0_ptr[3] * y0_fraction + y1_ptr[3] * y1_fraction) >> 8; |
michael@0 | 124 | ybuf[4] = (y0_ptr[4] * y0_fraction + y1_ptr[4] * y1_fraction) >> 8; |
michael@0 | 125 | ybuf[5] = (y0_ptr[5] * y0_fraction + y1_ptr[5] * y1_fraction) >> 8; |
michael@0 | 126 | ybuf[6] = (y0_ptr[6] * y0_fraction + y1_ptr[6] * y1_fraction) >> 8; |
michael@0 | 127 | ybuf[7] = (y0_ptr[7] * y0_fraction + y1_ptr[7] * y1_fraction) >> 8; |
michael@0 | 128 | y0_ptr += 8; |
michael@0 | 129 | y1_ptr += 8; |
michael@0 | 130 | ybuf += 8; |
michael@0 | 131 | } while (ybuf < end); |
michael@0 | 132 | } |
michael@0 | 133 | |
michael@0 | 134 | #ifdef MOZILLA_MAY_SUPPORT_MMX |
michael@0 | 135 | void FilterRows_MMX(uint8* ybuf, const uint8* y0_ptr, const uint8* y1_ptr, |
michael@0 | 136 | int source_width, int source_y_fraction); |
michael@0 | 137 | #endif |
michael@0 | 138 | |
michael@0 | 139 | #ifdef MOZILLA_MAY_SUPPORT_SSE2 |
michael@0 | 140 | void FilterRows_SSE2(uint8* ybuf, const uint8* y0_ptr, const uint8* y1_ptr, |
michael@0 | 141 | int source_width, int source_y_fraction); |
michael@0 | 142 | #endif |
michael@0 | 143 | |
michael@0 | 144 | static inline void FilterRows(uint8* ybuf, const uint8* y0_ptr, |
michael@0 | 145 | const uint8* y1_ptr, int source_width, |
michael@0 | 146 | int source_y_fraction) { |
michael@0 | 147 | #ifdef MOZILLA_MAY_SUPPORT_SSE2 |
michael@0 | 148 | if (mozilla::supports_sse2()) { |
michael@0 | 149 | FilterRows_SSE2(ybuf, y0_ptr, y1_ptr, source_width, source_y_fraction); |
michael@0 | 150 | return; |
michael@0 | 151 | } |
michael@0 | 152 | #endif |
michael@0 | 153 | |
michael@0 | 154 | #ifdef MOZILLA_MAY_SUPPORT_MMX |
michael@0 | 155 | if (mozilla::supports_mmx()) { |
michael@0 | 156 | FilterRows_MMX(ybuf, y0_ptr, y1_ptr, source_width, source_y_fraction); |
michael@0 | 157 | return; |
michael@0 | 158 | } |
michael@0 | 159 | #endif |
michael@0 | 160 | |
michael@0 | 161 | FilterRows_C(ybuf, y0_ptr, y1_ptr, source_width, source_y_fraction); |
michael@0 | 162 | } |
michael@0 | 163 | |
michael@0 | 164 | |
michael@0 | 165 | // Scale a frame of YUV to 32 bit ARGB. |
michael@0 | 166 | NS_GFX_(void) ScaleYCbCrToRGB32(const uint8* y_buf, |
michael@0 | 167 | const uint8* u_buf, |
michael@0 | 168 | const uint8* v_buf, |
michael@0 | 169 | uint8* rgb_buf, |
michael@0 | 170 | int source_width, |
michael@0 | 171 | int source_height, |
michael@0 | 172 | int width, |
michael@0 | 173 | int height, |
michael@0 | 174 | int y_pitch, |
michael@0 | 175 | int uv_pitch, |
michael@0 | 176 | int rgb_pitch, |
michael@0 | 177 | YUVType yuv_type, |
michael@0 | 178 | Rotate view_rotate, |
michael@0 | 179 | ScaleFilter filter) { |
michael@0 | 180 | bool has_mmx = supports_mmx(); |
michael@0 | 181 | |
michael@0 | 182 | // 4096 allows 3 buffers to fit in 12k. |
michael@0 | 183 | // Helps performance on CPU with 16K L1 cache. |
michael@0 | 184 | // Large enough for 3830x2160 and 30" displays which are 2560x1600. |
michael@0 | 185 | const int kFilterBufferSize = 4096; |
michael@0 | 186 | // Disable filtering if the screen is too big (to avoid buffer overflows). |
michael@0 | 187 | // This should never happen to regular users: they don't have monitors |
michael@0 | 188 | // wider than 4096 pixels. |
michael@0 | 189 | // TODO(fbarchard): Allow rotated videos to filter. |
michael@0 | 190 | if (source_width > kFilterBufferSize || view_rotate) |
michael@0 | 191 | filter = FILTER_NONE; |
michael@0 | 192 | |
michael@0 | 193 | unsigned int y_shift = yuv_type == YV12 ? 1 : 0; |
michael@0 | 194 | // Diagram showing origin and direction of source sampling. |
michael@0 | 195 | // ->0 4<- |
michael@0 | 196 | // 7 3 |
michael@0 | 197 | // |
michael@0 | 198 | // 6 5 |
michael@0 | 199 | // ->1 2<- |
michael@0 | 200 | // Rotations that start at right side of image. |
michael@0 | 201 | if ((view_rotate == ROTATE_180) || |
michael@0 | 202 | (view_rotate == ROTATE_270) || |
michael@0 | 203 | (view_rotate == MIRROR_ROTATE_0) || |
michael@0 | 204 | (view_rotate == MIRROR_ROTATE_90)) { |
michael@0 | 205 | y_buf += source_width - 1; |
michael@0 | 206 | u_buf += source_width / 2 - 1; |
michael@0 | 207 | v_buf += source_width / 2 - 1; |
michael@0 | 208 | source_width = -source_width; |
michael@0 | 209 | } |
michael@0 | 210 | // Rotations that start at bottom of image. |
michael@0 | 211 | if ((view_rotate == ROTATE_90) || |
michael@0 | 212 | (view_rotate == ROTATE_180) || |
michael@0 | 213 | (view_rotate == MIRROR_ROTATE_90) || |
michael@0 | 214 | (view_rotate == MIRROR_ROTATE_180)) { |
michael@0 | 215 | y_buf += (source_height - 1) * y_pitch; |
michael@0 | 216 | u_buf += ((source_height >> y_shift) - 1) * uv_pitch; |
michael@0 | 217 | v_buf += ((source_height >> y_shift) - 1) * uv_pitch; |
michael@0 | 218 | source_height = -source_height; |
michael@0 | 219 | } |
michael@0 | 220 | |
michael@0 | 221 | // Handle zero sized destination. |
michael@0 | 222 | if (width == 0 || height == 0) |
michael@0 | 223 | return; |
michael@0 | 224 | int source_dx = source_width * kFractionMax / width; |
michael@0 | 225 | int source_dy = source_height * kFractionMax / height; |
michael@0 | 226 | int source_dx_uv = source_dx; |
michael@0 | 227 | |
michael@0 | 228 | if ((view_rotate == ROTATE_90) || |
michael@0 | 229 | (view_rotate == ROTATE_270)) { |
michael@0 | 230 | int tmp = height; |
michael@0 | 231 | height = width; |
michael@0 | 232 | width = tmp; |
michael@0 | 233 | tmp = source_height; |
michael@0 | 234 | source_height = source_width; |
michael@0 | 235 | source_width = tmp; |
michael@0 | 236 | int original_dx = source_dx; |
michael@0 | 237 | int original_dy = source_dy; |
michael@0 | 238 | source_dx = ((original_dy >> kFractionBits) * y_pitch) << kFractionBits; |
michael@0 | 239 | source_dx_uv = ((original_dy >> kFractionBits) * uv_pitch) << kFractionBits; |
michael@0 | 240 | source_dy = original_dx; |
michael@0 | 241 | if (view_rotate == ROTATE_90) { |
michael@0 | 242 | y_pitch = -1; |
michael@0 | 243 | uv_pitch = -1; |
michael@0 | 244 | source_height = -source_height; |
michael@0 | 245 | } else { |
michael@0 | 246 | y_pitch = 1; |
michael@0 | 247 | uv_pitch = 1; |
michael@0 | 248 | } |
michael@0 | 249 | } |
michael@0 | 250 | |
michael@0 | 251 | // Need padding because FilterRows() will write 1 to 16 extra pixels |
michael@0 | 252 | // after the end for SSE2 version. |
michael@0 | 253 | uint8 yuvbuf[16 + kFilterBufferSize * 3 + 16]; |
michael@0 | 254 | uint8* ybuf = |
michael@0 | 255 | reinterpret_cast<uint8*>(reinterpret_cast<uintptr_t>(yuvbuf + 15) & ~15); |
michael@0 | 256 | uint8* ubuf = ybuf + kFilterBufferSize; |
michael@0 | 257 | uint8* vbuf = ubuf + kFilterBufferSize; |
michael@0 | 258 | // TODO(fbarchard): Fixed point math is off by 1 on negatives. |
michael@0 | 259 | int yscale_fixed = (source_height << kFractionBits) / height; |
michael@0 | 260 | |
michael@0 | 261 | // TODO(fbarchard): Split this into separate function for better efficiency. |
michael@0 | 262 | for (int y = 0; y < height; ++y) { |
michael@0 | 263 | uint8* dest_pixel = rgb_buf + y * rgb_pitch; |
michael@0 | 264 | int source_y_subpixel = (y * yscale_fixed); |
michael@0 | 265 | if (yscale_fixed >= (kFractionMax * 2)) { |
michael@0 | 266 | source_y_subpixel += kFractionMax / 2; // For 1/2 or less, center filter. |
michael@0 | 267 | } |
michael@0 | 268 | int source_y = source_y_subpixel >> kFractionBits; |
michael@0 | 269 | |
michael@0 | 270 | const uint8* y0_ptr = y_buf + source_y * y_pitch; |
michael@0 | 271 | const uint8* y1_ptr = y0_ptr + y_pitch; |
michael@0 | 272 | |
michael@0 | 273 | const uint8* u0_ptr = u_buf + (source_y >> y_shift) * uv_pitch; |
michael@0 | 274 | const uint8* u1_ptr = u0_ptr + uv_pitch; |
michael@0 | 275 | const uint8* v0_ptr = v_buf + (source_y >> y_shift) * uv_pitch; |
michael@0 | 276 | const uint8* v1_ptr = v0_ptr + uv_pitch; |
michael@0 | 277 | |
michael@0 | 278 | // vertical scaler uses 16.8 fixed point |
michael@0 | 279 | int source_y_fraction = (source_y_subpixel & kFractionMask) >> 8; |
michael@0 | 280 | int source_uv_fraction = |
michael@0 | 281 | ((source_y_subpixel >> y_shift) & kFractionMask) >> 8; |
michael@0 | 282 | |
michael@0 | 283 | const uint8* y_ptr = y0_ptr; |
michael@0 | 284 | const uint8* u_ptr = u0_ptr; |
michael@0 | 285 | const uint8* v_ptr = v0_ptr; |
michael@0 | 286 | // Apply vertical filtering if necessary. |
michael@0 | 287 | // TODO(fbarchard): Remove memcpy when not necessary. |
michael@0 | 288 | if (filter & mozilla::gfx::FILTER_BILINEAR_V) { |
michael@0 | 289 | if (yscale_fixed != kFractionMax && |
michael@0 | 290 | source_y_fraction && ((source_y + 1) < source_height)) { |
michael@0 | 291 | FilterRows(ybuf, y0_ptr, y1_ptr, source_width, source_y_fraction); |
michael@0 | 292 | } else { |
michael@0 | 293 | memcpy(ybuf, y0_ptr, source_width); |
michael@0 | 294 | } |
michael@0 | 295 | y_ptr = ybuf; |
michael@0 | 296 | ybuf[source_width] = ybuf[source_width-1]; |
michael@0 | 297 | int uv_source_width = (source_width + 1) / 2; |
michael@0 | 298 | if (yscale_fixed != kFractionMax && |
michael@0 | 299 | source_uv_fraction && |
michael@0 | 300 | (((source_y >> y_shift) + 1) < (source_height >> y_shift))) { |
michael@0 | 301 | FilterRows(ubuf, u0_ptr, u1_ptr, uv_source_width, source_uv_fraction); |
michael@0 | 302 | FilterRows(vbuf, v0_ptr, v1_ptr, uv_source_width, source_uv_fraction); |
michael@0 | 303 | } else { |
michael@0 | 304 | memcpy(ubuf, u0_ptr, uv_source_width); |
michael@0 | 305 | memcpy(vbuf, v0_ptr, uv_source_width); |
michael@0 | 306 | } |
michael@0 | 307 | u_ptr = ubuf; |
michael@0 | 308 | v_ptr = vbuf; |
michael@0 | 309 | ubuf[uv_source_width] = ubuf[uv_source_width - 1]; |
michael@0 | 310 | vbuf[uv_source_width] = vbuf[uv_source_width - 1]; |
michael@0 | 311 | } |
michael@0 | 312 | if (source_dx == kFractionMax) { // Not scaled |
michael@0 | 313 | FastConvertYUVToRGB32Row(y_ptr, u_ptr, v_ptr, |
michael@0 | 314 | dest_pixel, width); |
michael@0 | 315 | } else if (filter & FILTER_BILINEAR_H) { |
michael@0 | 316 | LinearScaleYUVToRGB32Row(y_ptr, u_ptr, v_ptr, |
michael@0 | 317 | dest_pixel, width, source_dx); |
michael@0 | 318 | } else { |
michael@0 | 319 | // Specialized scalers and rotation. |
michael@0 | 320 | #if defined(MOZILLA_MAY_SUPPORT_SSE) && defined(_MSC_VER) && defined(_M_IX86) |
michael@0 | 321 | if(mozilla::supports_sse()) { |
michael@0 | 322 | if (width == (source_width * 2)) { |
michael@0 | 323 | DoubleYUVToRGB32Row_SSE(y_ptr, u_ptr, v_ptr, |
michael@0 | 324 | dest_pixel, width); |
michael@0 | 325 | } else if ((source_dx & kFractionMask) == 0) { |
michael@0 | 326 | // Scaling by integer scale factor. ie half. |
michael@0 | 327 | ConvertYUVToRGB32Row_SSE(y_ptr, u_ptr, v_ptr, |
michael@0 | 328 | dest_pixel, width, |
michael@0 | 329 | source_dx >> kFractionBits); |
michael@0 | 330 | } else if (source_dx_uv == source_dx) { // Not rotated. |
michael@0 | 331 | ScaleYUVToRGB32Row(y_ptr, u_ptr, v_ptr, |
michael@0 | 332 | dest_pixel, width, source_dx); |
michael@0 | 333 | } else { |
michael@0 | 334 | RotateConvertYUVToRGB32Row_SSE(y_ptr, u_ptr, v_ptr, |
michael@0 | 335 | dest_pixel, width, |
michael@0 | 336 | source_dx >> kFractionBits, |
michael@0 | 337 | source_dx_uv >> kFractionBits); |
michael@0 | 338 | } |
michael@0 | 339 | } |
michael@0 | 340 | else { |
michael@0 | 341 | ScaleYUVToRGB32Row_C(y_ptr, u_ptr, v_ptr, |
michael@0 | 342 | dest_pixel, width, source_dx); |
michael@0 | 343 | } |
michael@0 | 344 | #else |
michael@0 | 345 | (void)source_dx_uv; |
michael@0 | 346 | ScaleYUVToRGB32Row(y_ptr, u_ptr, v_ptr, |
michael@0 | 347 | dest_pixel, width, source_dx); |
michael@0 | 348 | #endif |
michael@0 | 349 | } |
michael@0 | 350 | } |
michael@0 | 351 | // MMX used for FastConvertYUVToRGB32Row and FilterRows requires emms. |
michael@0 | 352 | if (has_mmx) |
michael@0 | 353 | EMMS(); |
michael@0 | 354 | } |
michael@0 | 355 | |
michael@0 | 356 | } // namespace gfx |
michael@0 | 357 | } // namespace mozilla |