1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/media/libyuv/source/rotate_argb.cc Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,209 @@ 1.4 +/* 1.5 + * Copyright 2012 The LibYuv Project Authors. All rights reserved. 1.6 + * 1.7 + * Use of this source code is governed by a BSD-style license 1.8 + * that can be found in the LICENSE file in the root of the source 1.9 + * tree. An additional intellectual property rights grant can be found 1.10 + * in the file PATENTS. All contributing project authors may 1.11 + * be found in the AUTHORS file in the root of the source tree. 1.12 + */ 1.13 + 1.14 +#include "libyuv/rotate.h" 1.15 + 1.16 +#include "libyuv/cpu_id.h" 1.17 +#include "libyuv/convert.h" 1.18 +#include "libyuv/planar_functions.h" 1.19 +#include "libyuv/row.h" 1.20 + 1.21 +#ifdef __cplusplus 1.22 +namespace libyuv { 1.23 +extern "C" { 1.24 +#endif 1.25 + 1.26 +// ARGBScale has a function to copy pixels to a row, striding each source 1.27 +// pixel by a constant. 1.28 +#if !defined(LIBYUV_DISABLE_X86) && \ 1.29 + (defined(_M_IX86) || \ 1.30 + (defined(__x86_64__) && !defined(__native_client__)) || defined(__i386__)) 1.31 +#define HAS_SCALEARGBROWDOWNEVEN_SSE2 1.32 +void ScaleARGBRowDownEven_SSE2(const uint8* src_ptr, int src_stride, 1.33 + int src_stepx, 1.34 + uint8* dst_ptr, int dst_width); 1.35 +#endif 1.36 +#if !defined(LIBYUV_DISABLE_NEON) && !defined(__native_client__) && \ 1.37 + (defined(__ARM_NEON__) || defined(LIBYUV_NEON)) 1.38 +#define HAS_SCALEARGBROWDOWNEVEN_NEON 1.39 +void ScaleARGBRowDownEven_NEON(const uint8* src_ptr, int src_stride, 1.40 + int src_stepx, 1.41 + uint8* dst_ptr, int dst_width); 1.42 +#endif 1.43 + 1.44 +void ScaleARGBRowDownEven_C(const uint8* src_ptr, int, 1.45 + int src_stepx, 1.46 + uint8* dst_ptr, int dst_width); 1.47 + 1.48 +static void ARGBTranspose(const uint8* src, int src_stride, 1.49 + uint8* dst, int dst_stride, 1.50 + int width, int height) { 1.51 + int i; 1.52 + int src_pixel_step = src_stride >> 2; 1.53 + void (*ScaleARGBRowDownEven)(const uint8* src_ptr, int src_stride, 1.54 + int src_step, uint8* dst_ptr, int dst_width) = ScaleARGBRowDownEven_C; 1.55 +#if defined(HAS_SCALEARGBROWDOWNEVEN_SSE2) 1.56 + if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(height, 4) && // Width of dest. 1.57 + IS_ALIGNED(dst, 16) && IS_ALIGNED(dst_stride, 16)) { 1.58 + ScaleARGBRowDownEven = ScaleARGBRowDownEven_SSE2; 1.59 + } 1.60 +#elif defined(HAS_SCALEARGBROWDOWNEVEN_NEON) 1.61 + if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(height, 4) && // Width of dest. 1.62 + IS_ALIGNED(src, 4)) { 1.63 + ScaleARGBRowDownEven = ScaleARGBRowDownEven_NEON; 1.64 + } 1.65 +#endif 1.66 + 1.67 + for (i = 0; i < width; ++i) { // column of source to row of dest. 1.68 + ScaleARGBRowDownEven(src, 0, src_pixel_step, dst, height); 1.69 + dst += dst_stride; 1.70 + src += 4; 1.71 + } 1.72 +} 1.73 + 1.74 +void ARGBRotate90(const uint8* src, int src_stride, 1.75 + uint8* dst, int dst_stride, 1.76 + int width, int height) { 1.77 + // Rotate by 90 is a ARGBTranspose with the source read 1.78 + // from bottom to top. So set the source pointer to the end 1.79 + // of the buffer and flip the sign of the source stride. 1.80 + src += src_stride * (height - 1); 1.81 + src_stride = -src_stride; 1.82 + ARGBTranspose(src, src_stride, dst, dst_stride, width, height); 1.83 +} 1.84 + 1.85 +void ARGBRotate270(const uint8* src, int src_stride, 1.86 + uint8* dst, int dst_stride, 1.87 + int width, int height) { 1.88 + // Rotate by 270 is a ARGBTranspose with the destination written 1.89 + // from bottom to top. So set the destination pointer to the end 1.90 + // of the buffer and flip the sign of the destination stride. 1.91 + dst += dst_stride * (width - 1); 1.92 + dst_stride = -dst_stride; 1.93 + ARGBTranspose(src, src_stride, dst, dst_stride, width, height); 1.94 +} 1.95 + 1.96 +void ARGBRotate180(const uint8* src, int src_stride, 1.97 + uint8* dst, int dst_stride, 1.98 + int width, int height) { 1.99 + // Swap first and last row and mirror the content. Uses a temporary row. 1.100 + align_buffer_64(row, width * 4); 1.101 + const uint8* src_bot = src + src_stride * (height - 1); 1.102 + uint8* dst_bot = dst + dst_stride * (height - 1); 1.103 + int half_height = (height + 1) >> 1; 1.104 + int y; 1.105 + void (*ARGBMirrorRow)(const uint8* src, uint8* dst, int width) = 1.106 + ARGBMirrorRow_C; 1.107 + void (*CopyRow)(const uint8* src, uint8* dst, int width) = CopyRow_C; 1.108 +#if defined(HAS_ARGBMIRRORROW_SSSE3) 1.109 + if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 4) && 1.110 + IS_ALIGNED(src, 16) && IS_ALIGNED(src_stride, 16) && 1.111 + IS_ALIGNED(dst, 16) && IS_ALIGNED(dst_stride, 16)) { 1.112 + ARGBMirrorRow = ARGBMirrorRow_SSSE3; 1.113 + } 1.114 +#endif 1.115 +#if defined(HAS_ARGBMIRRORROW_AVX2) 1.116 + if (TestCpuFlag(kCpuHasAVX2) && IS_ALIGNED(width, 8)) { 1.117 + ARGBMirrorRow = ARGBMirrorRow_AVX2; 1.118 + } 1.119 +#endif 1.120 +#if defined(HAS_ARGBMIRRORROW_NEON) 1.121 + if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 4)) { 1.122 + ARGBMirrorRow = ARGBMirrorRow_NEON; 1.123 + } 1.124 +#endif 1.125 +#if defined(HAS_COPYROW_NEON) 1.126 + if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width * 4, 32)) { 1.127 + CopyRow = CopyRow_NEON; 1.128 + } 1.129 +#endif 1.130 +#if defined(HAS_COPYROW_X86) 1.131 + if (TestCpuFlag(kCpuHasX86)) { 1.132 + CopyRow = CopyRow_X86; 1.133 + } 1.134 +#endif 1.135 +#if defined(HAS_COPYROW_SSE2) 1.136 + if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width * 4, 32) && 1.137 + IS_ALIGNED(src, 16) && IS_ALIGNED(src_stride, 16) && 1.138 + IS_ALIGNED(dst, 16) && IS_ALIGNED(dst_stride, 16)) { 1.139 + CopyRow = CopyRow_SSE2; 1.140 + } 1.141 +#endif 1.142 +#if defined(HAS_COPYROW_ERMS) 1.143 + if (TestCpuFlag(kCpuHasERMS)) { 1.144 + CopyRow = CopyRow_ERMS; 1.145 + } 1.146 +#endif 1.147 +#if defined(HAS_COPYROW_MIPS) 1.148 + if (TestCpuFlag(kCpuHasMIPS)) { 1.149 + CopyRow = CopyRow_MIPS; 1.150 + } 1.151 +#endif 1.152 + 1.153 + // Odd height will harmlessly mirror the middle row twice. 1.154 + for (y = 0; y < half_height; ++y) { 1.155 + ARGBMirrorRow(src, row, width); // Mirror first row into a buffer 1.156 + ARGBMirrorRow(src_bot, dst, width); // Mirror last row into first row 1.157 + CopyRow(row, dst_bot, width * 4); // Copy first mirrored row into last 1.158 + src += src_stride; 1.159 + dst += dst_stride; 1.160 + src_bot -= src_stride; 1.161 + dst_bot -= dst_stride; 1.162 + } 1.163 + free_aligned_buffer_64(row); 1.164 +} 1.165 + 1.166 +LIBYUV_API 1.167 +int ARGBRotate(const uint8* src_argb, int src_stride_argb, 1.168 + uint8* dst_argb, int dst_stride_argb, 1.169 + int width, int height, 1.170 + enum RotationMode mode) { 1.171 + if (!src_argb || width <= 0 || height == 0 || !dst_argb) { 1.172 + return -1; 1.173 + } 1.174 + 1.175 + // Negative height means invert the image. 1.176 + if (height < 0) { 1.177 + height = -height; 1.178 + src_argb = src_argb + (height - 1) * src_stride_argb; 1.179 + src_stride_argb = -src_stride_argb; 1.180 + } 1.181 + 1.182 + switch (mode) { 1.183 + case kRotate0: 1.184 + // copy frame 1.185 + return ARGBCopy(src_argb, src_stride_argb, 1.186 + dst_argb, dst_stride_argb, 1.187 + width, height); 1.188 + case kRotate90: 1.189 + ARGBRotate90(src_argb, src_stride_argb, 1.190 + dst_argb, dst_stride_argb, 1.191 + width, height); 1.192 + return 0; 1.193 + case kRotate270: 1.194 + ARGBRotate270(src_argb, src_stride_argb, 1.195 + dst_argb, dst_stride_argb, 1.196 + width, height); 1.197 + return 0; 1.198 + case kRotate180: 1.199 + ARGBRotate180(src_argb, src_stride_argb, 1.200 + dst_argb, dst_stride_argb, 1.201 + width, height); 1.202 + return 0; 1.203 + default: 1.204 + break; 1.205 + } 1.206 + return -1; 1.207 +} 1.208 + 1.209 +#ifdef __cplusplus 1.210 +} // extern "C" 1.211 +} // namespace libyuv 1.212 +#endif