michael@0: // Copyright (c) 2010 The Chromium Authors. All rights reserved. michael@0: // Use of this source code is governed by a BSD-style license that can be michael@0: // found in the LICENSE file. michael@0: michael@0: // This webpage shows layout of YV12 and other YUV formats michael@0: // http://www.fourcc.org/yuv.php michael@0: // The actual conversion is best described here michael@0: // http://en.wikipedia.org/wiki/YUV michael@0: // An article on optimizing YUV conversion using tables instead of multiplies michael@0: // http://lestourtereaux.free.fr/papers/data/yuvrgb.pdf michael@0: // michael@0: // YV12 is a full plane of Y and a half height, half width chroma planes michael@0: // YV16 is a full plane of Y and a full height, half width chroma planes michael@0: // YV24 is a full plane of Y and a full height, full width chroma planes michael@0: // michael@0: // ARGB pixel format is output, which on little endian is stored as BGRA. michael@0: // The alpha is set to 255, allowing the application to use RGBA or RGB32. michael@0: michael@0: #include "yuv_convert.h" michael@0: michael@0: // Header for low level row functions. michael@0: #include "yuv_row.h" michael@0: #include "mozilla/SSE.h" michael@0: michael@0: namespace mozilla { michael@0: michael@0: namespace gfx { michael@0: michael@0: // 16.16 fixed point arithmetic michael@0: const int kFractionBits = 16; michael@0: const int kFractionMax = 1 << kFractionBits; michael@0: const int kFractionMask = ((1 << kFractionBits) - 1); michael@0: michael@0: NS_GFX_(YUVType) TypeFromSize(int ywidth, michael@0: int yheight, michael@0: int cbcrwidth, michael@0: int cbcrheight) michael@0: { michael@0: if (ywidth == cbcrwidth && yheight == cbcrheight) { michael@0: return YV24; michael@0: } michael@0: else if (ywidth / 2 == cbcrwidth && yheight == cbcrheight) { michael@0: return YV16; michael@0: } michael@0: else { michael@0: return YV12; michael@0: } michael@0: } michael@0: michael@0: // Convert a frame of YUV to 32 bit ARGB. michael@0: NS_GFX_(void) ConvertYCbCrToRGB32(const uint8* y_buf, michael@0: const uint8* u_buf, michael@0: const uint8* v_buf, michael@0: uint8* rgb_buf, michael@0: int pic_x, michael@0: int pic_y, michael@0: int pic_width, michael@0: int pic_height, michael@0: int y_pitch, michael@0: int uv_pitch, michael@0: int rgb_pitch, michael@0: YUVType yuv_type) { michael@0: unsigned int y_shift = yuv_type == YV12 ? 1 : 0; michael@0: unsigned int x_shift = yuv_type == YV24 ? 0 : 1; michael@0: // Test for SSE because the optimized code uses movntq, which is not part of MMX. michael@0: bool has_sse = supports_mmx() && supports_sse(); michael@0: // There is no optimized YV24 SSE routine so we check for this and michael@0: // fall back to the C code. michael@0: has_sse &= yuv_type != YV24; michael@0: bool odd_pic_x = yuv_type != YV24 && pic_x % 2 != 0; michael@0: int x_width = odd_pic_x ? pic_width - 1 : pic_width; michael@0: michael@0: for (int y = pic_y; y < pic_height + pic_y; ++y) { michael@0: uint8* rgb_row = rgb_buf + (y - pic_y) * rgb_pitch; michael@0: const uint8* y_ptr = y_buf + y * y_pitch + pic_x; michael@0: const uint8* u_ptr = u_buf + (y >> y_shift) * uv_pitch + (pic_x >> x_shift); michael@0: const uint8* v_ptr = v_buf + (y >> y_shift) * uv_pitch + (pic_x >> x_shift); michael@0: michael@0: if (odd_pic_x) { michael@0: // Handle the single odd pixel manually and use the michael@0: // fast routines for the remaining. michael@0: FastConvertYUVToRGB32Row_C(y_ptr++, michael@0: u_ptr++, michael@0: v_ptr++, michael@0: rgb_row, michael@0: 1, michael@0: x_shift); michael@0: rgb_row += 4; michael@0: } michael@0: michael@0: if (has_sse) { michael@0: FastConvertYUVToRGB32Row(y_ptr, michael@0: u_ptr, michael@0: v_ptr, michael@0: rgb_row, michael@0: x_width); michael@0: } michael@0: else { michael@0: FastConvertYUVToRGB32Row_C(y_ptr, michael@0: u_ptr, michael@0: v_ptr, michael@0: rgb_row, michael@0: x_width, michael@0: x_shift); michael@0: } michael@0: } michael@0: michael@0: // MMX used for FastConvertYUVToRGB32Row requires emms instruction. michael@0: if (has_sse) michael@0: EMMS(); michael@0: } michael@0: michael@0: // C version does 8 at a time to mimic MMX code michael@0: static void FilterRows_C(uint8* ybuf, const uint8* y0_ptr, const uint8* y1_ptr, michael@0: int source_width, int source_y_fraction) { michael@0: int y1_fraction = source_y_fraction; michael@0: int y0_fraction = 256 - y1_fraction; michael@0: uint8* end = ybuf + source_width; michael@0: do { michael@0: ybuf[0] = (y0_ptr[0] * y0_fraction + y1_ptr[0] * y1_fraction) >> 8; michael@0: ybuf[1] = (y0_ptr[1] * y0_fraction + y1_ptr[1] * y1_fraction) >> 8; michael@0: ybuf[2] = (y0_ptr[2] * y0_fraction + y1_ptr[2] * y1_fraction) >> 8; michael@0: ybuf[3] = (y0_ptr[3] * y0_fraction + y1_ptr[3] * y1_fraction) >> 8; michael@0: ybuf[4] = (y0_ptr[4] * y0_fraction + y1_ptr[4] * y1_fraction) >> 8; michael@0: ybuf[5] = (y0_ptr[5] * y0_fraction + y1_ptr[5] * y1_fraction) >> 8; michael@0: ybuf[6] = (y0_ptr[6] * y0_fraction + y1_ptr[6] * y1_fraction) >> 8; michael@0: ybuf[7] = (y0_ptr[7] * y0_fraction + y1_ptr[7] * y1_fraction) >> 8; michael@0: y0_ptr += 8; michael@0: y1_ptr += 8; michael@0: ybuf += 8; michael@0: } while (ybuf < end); michael@0: } michael@0: michael@0: #ifdef MOZILLA_MAY_SUPPORT_MMX michael@0: void FilterRows_MMX(uint8* ybuf, const uint8* y0_ptr, const uint8* y1_ptr, michael@0: int source_width, int source_y_fraction); michael@0: #endif michael@0: michael@0: #ifdef MOZILLA_MAY_SUPPORT_SSE2 michael@0: void FilterRows_SSE2(uint8* ybuf, const uint8* y0_ptr, const uint8* y1_ptr, michael@0: int source_width, int source_y_fraction); michael@0: #endif michael@0: michael@0: static inline void FilterRows(uint8* ybuf, const uint8* y0_ptr, michael@0: const uint8* y1_ptr, int source_width, michael@0: int source_y_fraction) { michael@0: #ifdef MOZILLA_MAY_SUPPORT_SSE2 michael@0: if (mozilla::supports_sse2()) { michael@0: FilterRows_SSE2(ybuf, y0_ptr, y1_ptr, source_width, source_y_fraction); michael@0: return; michael@0: } michael@0: #endif michael@0: michael@0: #ifdef MOZILLA_MAY_SUPPORT_MMX michael@0: if (mozilla::supports_mmx()) { michael@0: FilterRows_MMX(ybuf, y0_ptr, y1_ptr, source_width, source_y_fraction); michael@0: return; michael@0: } michael@0: #endif michael@0: michael@0: FilterRows_C(ybuf, y0_ptr, y1_ptr, source_width, source_y_fraction); michael@0: } michael@0: michael@0: michael@0: // Scale a frame of YUV to 32 bit ARGB. michael@0: NS_GFX_(void) ScaleYCbCrToRGB32(const uint8* y_buf, michael@0: const uint8* u_buf, michael@0: const uint8* v_buf, michael@0: uint8* rgb_buf, michael@0: int source_width, michael@0: int source_height, michael@0: int width, michael@0: int height, michael@0: int y_pitch, michael@0: int uv_pitch, michael@0: int rgb_pitch, michael@0: YUVType yuv_type, michael@0: Rotate view_rotate, michael@0: ScaleFilter filter) { michael@0: bool has_mmx = supports_mmx(); michael@0: michael@0: // 4096 allows 3 buffers to fit in 12k. michael@0: // Helps performance on CPU with 16K L1 cache. michael@0: // Large enough for 3830x2160 and 30" displays which are 2560x1600. michael@0: const int kFilterBufferSize = 4096; michael@0: // Disable filtering if the screen is too big (to avoid buffer overflows). michael@0: // This should never happen to regular users: they don't have monitors michael@0: // wider than 4096 pixels. michael@0: // TODO(fbarchard): Allow rotated videos to filter. michael@0: if (source_width > kFilterBufferSize || view_rotate) michael@0: filter = FILTER_NONE; michael@0: michael@0: unsigned int y_shift = yuv_type == YV12 ? 1 : 0; michael@0: // Diagram showing origin and direction of source sampling. michael@0: // ->0 4<- michael@0: // 7 3 michael@0: // michael@0: // 6 5 michael@0: // ->1 2<- michael@0: // Rotations that start at right side of image. michael@0: if ((view_rotate == ROTATE_180) || michael@0: (view_rotate == ROTATE_270) || michael@0: (view_rotate == MIRROR_ROTATE_0) || michael@0: (view_rotate == MIRROR_ROTATE_90)) { michael@0: y_buf += source_width - 1; michael@0: u_buf += source_width / 2 - 1; michael@0: v_buf += source_width / 2 - 1; michael@0: source_width = -source_width; michael@0: } michael@0: // Rotations that start at bottom of image. michael@0: if ((view_rotate == ROTATE_90) || michael@0: (view_rotate == ROTATE_180) || michael@0: (view_rotate == MIRROR_ROTATE_90) || michael@0: (view_rotate == MIRROR_ROTATE_180)) { michael@0: y_buf += (source_height - 1) * y_pitch; michael@0: u_buf += ((source_height >> y_shift) - 1) * uv_pitch; michael@0: v_buf += ((source_height >> y_shift) - 1) * uv_pitch; michael@0: source_height = -source_height; michael@0: } michael@0: michael@0: // Handle zero sized destination. michael@0: if (width == 0 || height == 0) michael@0: return; michael@0: int source_dx = source_width * kFractionMax / width; michael@0: int source_dy = source_height * kFractionMax / height; michael@0: int source_dx_uv = source_dx; michael@0: michael@0: if ((view_rotate == ROTATE_90) || michael@0: (view_rotate == ROTATE_270)) { michael@0: int tmp = height; michael@0: height = width; michael@0: width = tmp; michael@0: tmp = source_height; michael@0: source_height = source_width; michael@0: source_width = tmp; michael@0: int original_dx = source_dx; michael@0: int original_dy = source_dy; michael@0: source_dx = ((original_dy >> kFractionBits) * y_pitch) << kFractionBits; michael@0: source_dx_uv = ((original_dy >> kFractionBits) * uv_pitch) << kFractionBits; michael@0: source_dy = original_dx; michael@0: if (view_rotate == ROTATE_90) { michael@0: y_pitch = -1; michael@0: uv_pitch = -1; michael@0: source_height = -source_height; michael@0: } else { michael@0: y_pitch = 1; michael@0: uv_pitch = 1; michael@0: } michael@0: } michael@0: michael@0: // Need padding because FilterRows() will write 1 to 16 extra pixels michael@0: // after the end for SSE2 version. michael@0: uint8 yuvbuf[16 + kFilterBufferSize * 3 + 16]; michael@0: uint8* ybuf = michael@0: reinterpret_cast(reinterpret_cast(yuvbuf + 15) & ~15); michael@0: uint8* ubuf = ybuf + kFilterBufferSize; michael@0: uint8* vbuf = ubuf + kFilterBufferSize; michael@0: // TODO(fbarchard): Fixed point math is off by 1 on negatives. michael@0: int yscale_fixed = (source_height << kFractionBits) / height; michael@0: michael@0: // TODO(fbarchard): Split this into separate function for better efficiency. michael@0: for (int y = 0; y < height; ++y) { michael@0: uint8* dest_pixel = rgb_buf + y * rgb_pitch; michael@0: int source_y_subpixel = (y * yscale_fixed); michael@0: if (yscale_fixed >= (kFractionMax * 2)) { michael@0: source_y_subpixel += kFractionMax / 2; // For 1/2 or less, center filter. michael@0: } michael@0: int source_y = source_y_subpixel >> kFractionBits; michael@0: michael@0: const uint8* y0_ptr = y_buf + source_y * y_pitch; michael@0: const uint8* y1_ptr = y0_ptr + y_pitch; michael@0: michael@0: const uint8* u0_ptr = u_buf + (source_y >> y_shift) * uv_pitch; michael@0: const uint8* u1_ptr = u0_ptr + uv_pitch; michael@0: const uint8* v0_ptr = v_buf + (source_y >> y_shift) * uv_pitch; michael@0: const uint8* v1_ptr = v0_ptr + uv_pitch; michael@0: michael@0: // vertical scaler uses 16.8 fixed point michael@0: int source_y_fraction = (source_y_subpixel & kFractionMask) >> 8; michael@0: int source_uv_fraction = michael@0: ((source_y_subpixel >> y_shift) & kFractionMask) >> 8; michael@0: michael@0: const uint8* y_ptr = y0_ptr; michael@0: const uint8* u_ptr = u0_ptr; michael@0: const uint8* v_ptr = v0_ptr; michael@0: // Apply vertical filtering if necessary. michael@0: // TODO(fbarchard): Remove memcpy when not necessary. michael@0: if (filter & mozilla::gfx::FILTER_BILINEAR_V) { michael@0: if (yscale_fixed != kFractionMax && michael@0: source_y_fraction && ((source_y + 1) < source_height)) { michael@0: FilterRows(ybuf, y0_ptr, y1_ptr, source_width, source_y_fraction); michael@0: } else { michael@0: memcpy(ybuf, y0_ptr, source_width); michael@0: } michael@0: y_ptr = ybuf; michael@0: ybuf[source_width] = ybuf[source_width-1]; michael@0: int uv_source_width = (source_width + 1) / 2; michael@0: if (yscale_fixed != kFractionMax && michael@0: source_uv_fraction && michael@0: (((source_y >> y_shift) + 1) < (source_height >> y_shift))) { michael@0: FilterRows(ubuf, u0_ptr, u1_ptr, uv_source_width, source_uv_fraction); michael@0: FilterRows(vbuf, v0_ptr, v1_ptr, uv_source_width, source_uv_fraction); michael@0: } else { michael@0: memcpy(ubuf, u0_ptr, uv_source_width); michael@0: memcpy(vbuf, v0_ptr, uv_source_width); michael@0: } michael@0: u_ptr = ubuf; michael@0: v_ptr = vbuf; michael@0: ubuf[uv_source_width] = ubuf[uv_source_width - 1]; michael@0: vbuf[uv_source_width] = vbuf[uv_source_width - 1]; michael@0: } michael@0: if (source_dx == kFractionMax) { // Not scaled michael@0: FastConvertYUVToRGB32Row(y_ptr, u_ptr, v_ptr, michael@0: dest_pixel, width); michael@0: } else if (filter & FILTER_BILINEAR_H) { michael@0: LinearScaleYUVToRGB32Row(y_ptr, u_ptr, v_ptr, michael@0: dest_pixel, width, source_dx); michael@0: } else { michael@0: // Specialized scalers and rotation. michael@0: #if defined(MOZILLA_MAY_SUPPORT_SSE) && defined(_MSC_VER) && defined(_M_IX86) michael@0: if(mozilla::supports_sse()) { michael@0: if (width == (source_width * 2)) { michael@0: DoubleYUVToRGB32Row_SSE(y_ptr, u_ptr, v_ptr, michael@0: dest_pixel, width); michael@0: } else if ((source_dx & kFractionMask) == 0) { michael@0: // Scaling by integer scale factor. ie half. michael@0: ConvertYUVToRGB32Row_SSE(y_ptr, u_ptr, v_ptr, michael@0: dest_pixel, width, michael@0: source_dx >> kFractionBits); michael@0: } else if (source_dx_uv == source_dx) { // Not rotated. michael@0: ScaleYUVToRGB32Row(y_ptr, u_ptr, v_ptr, michael@0: dest_pixel, width, source_dx); michael@0: } else { michael@0: RotateConvertYUVToRGB32Row_SSE(y_ptr, u_ptr, v_ptr, michael@0: dest_pixel, width, michael@0: source_dx >> kFractionBits, michael@0: source_dx_uv >> kFractionBits); michael@0: } michael@0: } michael@0: else { michael@0: ScaleYUVToRGB32Row_C(y_ptr, u_ptr, v_ptr, michael@0: dest_pixel, width, source_dx); michael@0: } michael@0: #else michael@0: (void)source_dx_uv; michael@0: ScaleYUVToRGB32Row(y_ptr, u_ptr, v_ptr, michael@0: dest_pixel, width, source_dx); michael@0: #endif michael@0: } michael@0: } michael@0: // MMX used for FastConvertYUVToRGB32Row and FilterRows requires emms. michael@0: if (has_mmx) michael@0: EMMS(); michael@0: } michael@0: michael@0: } // namespace gfx michael@0: } // namespace mozilla