michael@0: /* michael@0: * Copyright 2011 The LibYuv Project Authors. All rights reserved. michael@0: * michael@0: * Use of this source code is governed by a BSD-style license michael@0: * that can be found in the LICENSE file in the root of the source michael@0: * tree. An additional intellectual property rights grant can be found michael@0: * in the file PATENTS. All contributing project authors may michael@0: * be found in the AUTHORS file in the root of the source tree. michael@0: */ michael@0: michael@0: #include "libyuv/rotate.h" michael@0: michael@0: #include "libyuv/cpu_id.h" michael@0: #include "libyuv/convert.h" michael@0: #include "libyuv/planar_functions.h" michael@0: #include "libyuv/row.h" michael@0: michael@0: #ifdef __cplusplus michael@0: namespace libyuv { michael@0: extern "C" { michael@0: #endif michael@0: michael@0: #if !defined(LIBYUV_DISABLE_X86) && \ michael@0: (defined(_M_IX86) || defined(__x86_64__) || defined(__i386__)) michael@0: #if defined(__APPLE__) && defined(__i386__) michael@0: #define DECLARE_FUNCTION(name) \ michael@0: ".text \n" \ michael@0: ".private_extern _" #name " \n" \ michael@0: ".align 4,0x90 \n" \ michael@0: "_" #name ": \n" michael@0: #elif defined(__MINGW32__) || defined(__CYGWIN__) && defined(__i386__) michael@0: #define DECLARE_FUNCTION(name) \ michael@0: ".text \n" \ michael@0: ".align 4,0x90 \n" \ michael@0: "_" #name ": \n" michael@0: #else michael@0: #define DECLARE_FUNCTION(name) \ michael@0: ".text \n" \ michael@0: ".align 4,0x90 \n" \ michael@0: #name ": \n" michael@0: #endif michael@0: #endif michael@0: michael@0: #if !defined(LIBYUV_DISABLE_NEON) && !defined(__native_client__) && \ michael@0: (defined(__ARM_NEON__) || defined(LIBYUV_NEON)) michael@0: #define HAS_MIRRORROW_NEON michael@0: void MirrorRow_NEON(const uint8* src, uint8* dst, int width); michael@0: #define HAS_MIRRORROW_UV_NEON michael@0: void MirrorUVRow_NEON(const uint8* src, uint8* dst_a, uint8* dst_b, int width); michael@0: #define HAS_TRANSPOSE_WX8_NEON michael@0: void TransposeWx8_NEON(const uint8* src, int src_stride, michael@0: uint8* dst, int dst_stride, int width); michael@0: #define HAS_TRANSPOSE_UVWX8_NEON michael@0: void TransposeUVWx8_NEON(const uint8* src, int src_stride, michael@0: uint8* dst_a, int dst_stride_a, michael@0: uint8* dst_b, int dst_stride_b, michael@0: int width); michael@0: #endif // defined(__ARM_NEON__) michael@0: michael@0: #if !defined(LIBYUV_DISABLE_MIPS) && !defined(__native_client__) && \ michael@0: defined(__mips__) && \ michael@0: defined(__mips_dsp) && (__mips_dsp_rev >= 2) michael@0: #define HAS_TRANSPOSE_WX8_MIPS_DSPR2 michael@0: void TransposeWx8_MIPS_DSPR2(const uint8* src, int src_stride, michael@0: uint8* dst, int dst_stride, int width); michael@0: michael@0: void TransposeWx8_FAST_MIPS_DSPR2(const uint8* src, int src_stride, michael@0: uint8* dst, int dst_stride, int width); michael@0: #define HAS_TRANSPOSE_UVWx8_MIPS_DSPR2 michael@0: void TransposeUVWx8_MIPS_DSPR2(const uint8* src, int src_stride, michael@0: uint8* dst_a, int dst_stride_a, michael@0: uint8* dst_b, int dst_stride_b, michael@0: int width); michael@0: #endif // defined(__mips__) michael@0: michael@0: #if !defined(LIBYUV_DISABLE_X86) && \ michael@0: defined(_M_IX86) && defined(_MSC_VER) michael@0: #define HAS_TRANSPOSE_WX8_SSSE3 michael@0: __declspec(naked) __declspec(align(16)) michael@0: static void TransposeWx8_SSSE3(const uint8* src, int src_stride, michael@0: uint8* dst, int dst_stride, int width) { michael@0: __asm { michael@0: push edi michael@0: push esi michael@0: push ebp michael@0: mov eax, [esp + 12 + 4] // src michael@0: mov edi, [esp + 12 + 8] // src_stride michael@0: mov edx, [esp + 12 + 12] // dst michael@0: mov esi, [esp + 12 + 16] // dst_stride michael@0: mov ecx, [esp + 12 + 20] // width michael@0: michael@0: // Read in the data from the source pointer. michael@0: // First round of bit swap. michael@0: align 4 michael@0: convertloop: michael@0: movq xmm0, qword ptr [eax] michael@0: lea ebp, [eax + 8] michael@0: movq xmm1, qword ptr [eax + edi] michael@0: lea eax, [eax + 2 * edi] michael@0: punpcklbw xmm0, xmm1 michael@0: movq xmm2, qword ptr [eax] michael@0: movdqa xmm1, xmm0 michael@0: palignr xmm1, xmm1, 8 michael@0: movq xmm3, qword ptr [eax + edi] michael@0: lea eax, [eax + 2 * edi] michael@0: punpcklbw xmm2, xmm3 michael@0: movdqa xmm3, xmm2 michael@0: movq xmm4, qword ptr [eax] michael@0: palignr xmm3, xmm3, 8 michael@0: movq xmm5, qword ptr [eax + edi] michael@0: punpcklbw xmm4, xmm5 michael@0: lea eax, [eax + 2 * edi] michael@0: movdqa xmm5, xmm4 michael@0: movq xmm6, qword ptr [eax] michael@0: palignr xmm5, xmm5, 8 michael@0: movq xmm7, qword ptr [eax + edi] michael@0: punpcklbw xmm6, xmm7 michael@0: mov eax, ebp michael@0: movdqa xmm7, xmm6 michael@0: palignr xmm7, xmm7, 8 michael@0: // Second round of bit swap. michael@0: punpcklwd xmm0, xmm2 michael@0: punpcklwd xmm1, xmm3 michael@0: movdqa xmm2, xmm0 michael@0: movdqa xmm3, xmm1 michael@0: palignr xmm2, xmm2, 8 michael@0: palignr xmm3, xmm3, 8 michael@0: punpcklwd xmm4, xmm6 michael@0: punpcklwd xmm5, xmm7 michael@0: movdqa xmm6, xmm4 michael@0: movdqa xmm7, xmm5 michael@0: palignr xmm6, xmm6, 8 michael@0: palignr xmm7, xmm7, 8 michael@0: // Third round of bit swap. michael@0: // Write to the destination pointer. michael@0: punpckldq xmm0, xmm4 michael@0: movq qword ptr [edx], xmm0 michael@0: movdqa xmm4, xmm0 michael@0: palignr xmm4, xmm4, 8 michael@0: movq qword ptr [edx + esi], xmm4 michael@0: lea edx, [edx + 2 * esi] michael@0: punpckldq xmm2, xmm6 michael@0: movdqa xmm6, xmm2 michael@0: palignr xmm6, xmm6, 8 michael@0: movq qword ptr [edx], xmm2 michael@0: punpckldq xmm1, xmm5 michael@0: movq qword ptr [edx + esi], xmm6 michael@0: lea edx, [edx + 2 * esi] michael@0: movdqa xmm5, xmm1 michael@0: movq qword ptr [edx], xmm1 michael@0: palignr xmm5, xmm5, 8 michael@0: punpckldq xmm3, xmm7 michael@0: movq qword ptr [edx + esi], xmm5 michael@0: lea edx, [edx + 2 * esi] michael@0: movq qword ptr [edx], xmm3 michael@0: movdqa xmm7, xmm3 michael@0: palignr xmm7, xmm7, 8 michael@0: sub ecx, 8 michael@0: movq qword ptr [edx + esi], xmm7 michael@0: lea edx, [edx + 2 * esi] michael@0: jg convertloop michael@0: michael@0: pop ebp michael@0: pop esi michael@0: pop edi michael@0: ret michael@0: } michael@0: } michael@0: michael@0: #define HAS_TRANSPOSE_UVWX8_SSE2 michael@0: __declspec(naked) __declspec(align(16)) michael@0: static void TransposeUVWx8_SSE2(const uint8* src, int src_stride, michael@0: uint8* dst_a, int dst_stride_a, michael@0: uint8* dst_b, int dst_stride_b, michael@0: int w) { michael@0: __asm { michael@0: push ebx michael@0: push esi michael@0: push edi michael@0: push ebp michael@0: mov eax, [esp + 16 + 4] // src michael@0: mov edi, [esp + 16 + 8] // src_stride michael@0: mov edx, [esp + 16 + 12] // dst_a michael@0: mov esi, [esp + 16 + 16] // dst_stride_a michael@0: mov ebx, [esp + 16 + 20] // dst_b michael@0: mov ebp, [esp + 16 + 24] // dst_stride_b michael@0: mov ecx, esp michael@0: sub esp, 4 + 16 michael@0: and esp, ~15 michael@0: mov [esp + 16], ecx michael@0: mov ecx, [ecx + 16 + 28] // w michael@0: michael@0: align 4 michael@0: convertloop: michael@0: // Read in the data from the source pointer. michael@0: // First round of bit swap. michael@0: movdqa xmm0, [eax] michael@0: movdqa xmm1, [eax + edi] michael@0: lea eax, [eax + 2 * edi] michael@0: movdqa xmm7, xmm0 // use xmm7 as temp register. michael@0: punpcklbw xmm0, xmm1 michael@0: punpckhbw xmm7, xmm1 michael@0: movdqa xmm1, xmm7 michael@0: movdqa xmm2, [eax] michael@0: movdqa xmm3, [eax + edi] michael@0: lea eax, [eax + 2 * edi] michael@0: movdqa xmm7, xmm2 michael@0: punpcklbw xmm2, xmm3 michael@0: punpckhbw xmm7, xmm3 michael@0: movdqa xmm3, xmm7 michael@0: movdqa xmm4, [eax] michael@0: movdqa xmm5, [eax + edi] michael@0: lea eax, [eax + 2 * edi] michael@0: movdqa xmm7, xmm4 michael@0: punpcklbw xmm4, xmm5 michael@0: punpckhbw xmm7, xmm5 michael@0: movdqa xmm5, xmm7 michael@0: movdqa xmm6, [eax] michael@0: movdqa xmm7, [eax + edi] michael@0: lea eax, [eax + 2 * edi] michael@0: movdqa [esp], xmm5 // backup xmm5 michael@0: neg edi michael@0: movdqa xmm5, xmm6 // use xmm5 as temp register. michael@0: punpcklbw xmm6, xmm7 michael@0: punpckhbw xmm5, xmm7 michael@0: movdqa xmm7, xmm5 michael@0: lea eax, [eax + 8 * edi + 16] michael@0: neg edi michael@0: // Second round of bit swap. michael@0: movdqa xmm5, xmm0 michael@0: punpcklwd xmm0, xmm2 michael@0: punpckhwd xmm5, xmm2 michael@0: movdqa xmm2, xmm5 michael@0: movdqa xmm5, xmm1 michael@0: punpcklwd xmm1, xmm3 michael@0: punpckhwd xmm5, xmm3 michael@0: movdqa xmm3, xmm5 michael@0: movdqa xmm5, xmm4 michael@0: punpcklwd xmm4, xmm6 michael@0: punpckhwd xmm5, xmm6 michael@0: movdqa xmm6, xmm5 michael@0: movdqa xmm5, [esp] // restore xmm5 michael@0: movdqa [esp], xmm6 // backup xmm6 michael@0: movdqa xmm6, xmm5 // use xmm6 as temp register. michael@0: punpcklwd xmm5, xmm7 michael@0: punpckhwd xmm6, xmm7 michael@0: movdqa xmm7, xmm6 michael@0: // Third round of bit swap. michael@0: // Write to the destination pointer. michael@0: movdqa xmm6, xmm0 michael@0: punpckldq xmm0, xmm4 michael@0: punpckhdq xmm6, xmm4 michael@0: movdqa xmm4, xmm6 michael@0: movdqa xmm6, [esp] // restore xmm6 michael@0: movlpd qword ptr [edx], xmm0 michael@0: movhpd qword ptr [ebx], xmm0 michael@0: movlpd qword ptr [edx + esi], xmm4 michael@0: lea edx, [edx + 2 * esi] michael@0: movhpd qword ptr [ebx + ebp], xmm4 michael@0: lea ebx, [ebx + 2 * ebp] michael@0: movdqa xmm0, xmm2 // use xmm0 as the temp register. michael@0: punpckldq xmm2, xmm6 michael@0: movlpd qword ptr [edx], xmm2 michael@0: movhpd qword ptr [ebx], xmm2 michael@0: punpckhdq xmm0, xmm6 michael@0: movlpd qword ptr [edx + esi], xmm0 michael@0: lea edx, [edx + 2 * esi] michael@0: movhpd qword ptr [ebx + ebp], xmm0 michael@0: lea ebx, [ebx + 2 * ebp] michael@0: movdqa xmm0, xmm1 // use xmm0 as the temp register. michael@0: punpckldq xmm1, xmm5 michael@0: movlpd qword ptr [edx], xmm1 michael@0: movhpd qword ptr [ebx], xmm1 michael@0: punpckhdq xmm0, xmm5 michael@0: movlpd qword ptr [edx + esi], xmm0 michael@0: lea edx, [edx + 2 * esi] michael@0: movhpd qword ptr [ebx + ebp], xmm0 michael@0: lea ebx, [ebx + 2 * ebp] michael@0: movdqa xmm0, xmm3 // use xmm0 as the temp register. michael@0: punpckldq xmm3, xmm7 michael@0: movlpd qword ptr [edx], xmm3 michael@0: movhpd qword ptr [ebx], xmm3 michael@0: punpckhdq xmm0, xmm7 michael@0: sub ecx, 8 michael@0: movlpd qword ptr [edx + esi], xmm0 michael@0: lea edx, [edx + 2 * esi] michael@0: movhpd qword ptr [ebx + ebp], xmm0 michael@0: lea ebx, [ebx + 2 * ebp] michael@0: jg convertloop michael@0: michael@0: mov esp, [esp + 16] michael@0: pop ebp michael@0: pop edi michael@0: pop esi michael@0: pop ebx michael@0: ret michael@0: } michael@0: } michael@0: #elif !defined(LIBYUV_DISABLE_X86) && \ michael@0: (defined(__i386__) || (defined(__x86_64__) && !defined(__native_client__))) michael@0: #define HAS_TRANSPOSE_WX8_SSSE3 michael@0: static void TransposeWx8_SSSE3(const uint8* src, int src_stride, michael@0: uint8* dst, int dst_stride, int width) { michael@0: asm volatile ( michael@0: // Read in the data from the source pointer. michael@0: // First round of bit swap. michael@0: ".p2align 2 \n" michael@0: "1: \n" michael@0: "movq (%0),%%xmm0 \n" michael@0: "movq (%0,%3),%%xmm1 \n" michael@0: "lea (%0,%3,2),%0 \n" michael@0: "punpcklbw %%xmm1,%%xmm0 \n" michael@0: "movq (%0),%%xmm2 \n" michael@0: "movdqa %%xmm0,%%xmm1 \n" michael@0: "palignr $0x8,%%xmm1,%%xmm1 \n" michael@0: "movq (%0,%3),%%xmm3 \n" michael@0: "lea (%0,%3,2),%0 \n" michael@0: "punpcklbw %%xmm3,%%xmm2 \n" michael@0: "movdqa %%xmm2,%%xmm3 \n" michael@0: "movq (%0),%%xmm4 \n" michael@0: "palignr $0x8,%%xmm3,%%xmm3 \n" michael@0: "movq (%0,%3),%%xmm5 \n" michael@0: "lea (%0,%3,2),%0 \n" michael@0: "punpcklbw %%xmm5,%%xmm4 \n" michael@0: "movdqa %%xmm4,%%xmm5 \n" michael@0: "movq (%0),%%xmm6 \n" michael@0: "palignr $0x8,%%xmm5,%%xmm5 \n" michael@0: "movq (%0,%3),%%xmm7 \n" michael@0: "lea (%0,%3,2),%0 \n" michael@0: "punpcklbw %%xmm7,%%xmm6 \n" michael@0: "neg %3 \n" michael@0: "movdqa %%xmm6,%%xmm7 \n" michael@0: "lea 0x8(%0,%3,8),%0 \n" michael@0: "palignr $0x8,%%xmm7,%%xmm7 \n" michael@0: "neg %3 \n" michael@0: // Second round of bit swap. michael@0: "punpcklwd %%xmm2,%%xmm0 \n" michael@0: "punpcklwd %%xmm3,%%xmm1 \n" michael@0: "movdqa %%xmm0,%%xmm2 \n" michael@0: "movdqa %%xmm1,%%xmm3 \n" michael@0: "palignr $0x8,%%xmm2,%%xmm2 \n" michael@0: "palignr $0x8,%%xmm3,%%xmm3 \n" michael@0: "punpcklwd %%xmm6,%%xmm4 \n" michael@0: "punpcklwd %%xmm7,%%xmm5 \n" michael@0: "movdqa %%xmm4,%%xmm6 \n" michael@0: "movdqa %%xmm5,%%xmm7 \n" michael@0: "palignr $0x8,%%xmm6,%%xmm6 \n" michael@0: "palignr $0x8,%%xmm7,%%xmm7 \n" michael@0: // Third round of bit swap. michael@0: // Write to the destination pointer. michael@0: "punpckldq %%xmm4,%%xmm0 \n" michael@0: "movq %%xmm0,(%1) \n" michael@0: "movdqa %%xmm0,%%xmm4 \n" michael@0: "palignr $0x8,%%xmm4,%%xmm4 \n" michael@0: "movq %%xmm4,(%1,%4) \n" michael@0: "lea (%1,%4,2),%1 \n" michael@0: "punpckldq %%xmm6,%%xmm2 \n" michael@0: "movdqa %%xmm2,%%xmm6 \n" michael@0: "movq %%xmm2,(%1) \n" michael@0: "palignr $0x8,%%xmm6,%%xmm6 \n" michael@0: "punpckldq %%xmm5,%%xmm1 \n" michael@0: "movq %%xmm6,(%1,%4) \n" michael@0: "lea (%1,%4,2),%1 \n" michael@0: "movdqa %%xmm1,%%xmm5 \n" michael@0: "movq %%xmm1,(%1) \n" michael@0: "palignr $0x8,%%xmm5,%%xmm5 \n" michael@0: "movq %%xmm5,(%1,%4) \n" michael@0: "lea (%1,%4,2),%1 \n" michael@0: "punpckldq %%xmm7,%%xmm3 \n" michael@0: "movq %%xmm3,(%1) \n" michael@0: "movdqa %%xmm3,%%xmm7 \n" michael@0: "palignr $0x8,%%xmm7,%%xmm7 \n" michael@0: "sub $0x8,%2 \n" michael@0: "movq %%xmm7,(%1,%4) \n" michael@0: "lea (%1,%4,2),%1 \n" michael@0: "jg 1b \n" michael@0: : "+r"(src), // %0 michael@0: "+r"(dst), // %1 michael@0: "+r"(width) // %2 michael@0: : "r"((intptr_t)(src_stride)), // %3 michael@0: "r"((intptr_t)(dst_stride)) // %4 michael@0: : "memory", "cc" michael@0: #if defined(__SSE2__) michael@0: , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" michael@0: #endif michael@0: ); michael@0: } michael@0: michael@0: #if !defined(LIBYUV_DISABLE_X86) && defined(__i386__) michael@0: #define HAS_TRANSPOSE_UVWX8_SSE2 michael@0: extern "C" void TransposeUVWx8_SSE2(const uint8* src, int src_stride, michael@0: uint8* dst_a, int dst_stride_a, michael@0: uint8* dst_b, int dst_stride_b, michael@0: int w); michael@0: asm ( michael@0: DECLARE_FUNCTION(TransposeUVWx8_SSE2) michael@0: "push %ebx \n" michael@0: "push %esi \n" michael@0: "push %edi \n" michael@0: "push %ebp \n" michael@0: "mov 0x14(%esp),%eax \n" michael@0: "mov 0x18(%esp),%edi \n" michael@0: "mov 0x1c(%esp),%edx \n" michael@0: "mov 0x20(%esp),%esi \n" michael@0: "mov 0x24(%esp),%ebx \n" michael@0: "mov 0x28(%esp),%ebp \n" michael@0: "mov %esp,%ecx \n" michael@0: "sub $0x14,%esp \n" michael@0: "and $0xfffffff0,%esp \n" michael@0: "mov %ecx,0x10(%esp) \n" michael@0: "mov 0x2c(%ecx),%ecx \n" michael@0: michael@0: "1: \n" michael@0: "movdqa (%eax),%xmm0 \n" michael@0: "movdqa (%eax,%edi,1),%xmm1 \n" michael@0: "lea (%eax,%edi,2),%eax \n" michael@0: "movdqa %xmm0,%xmm7 \n" michael@0: "punpcklbw %xmm1,%xmm0 \n" michael@0: "punpckhbw %xmm1,%xmm7 \n" michael@0: "movdqa %xmm7,%xmm1 \n" michael@0: "movdqa (%eax),%xmm2 \n" michael@0: "movdqa (%eax,%edi,1),%xmm3 \n" michael@0: "lea (%eax,%edi,2),%eax \n" michael@0: "movdqa %xmm2,%xmm7 \n" michael@0: "punpcklbw %xmm3,%xmm2 \n" michael@0: "punpckhbw %xmm3,%xmm7 \n" michael@0: "movdqa %xmm7,%xmm3 \n" michael@0: "movdqa (%eax),%xmm4 \n" michael@0: "movdqa (%eax,%edi,1),%xmm5 \n" michael@0: "lea (%eax,%edi,2),%eax \n" michael@0: "movdqa %xmm4,%xmm7 \n" michael@0: "punpcklbw %xmm5,%xmm4 \n" michael@0: "punpckhbw %xmm5,%xmm7 \n" michael@0: "movdqa %xmm7,%xmm5 \n" michael@0: "movdqa (%eax),%xmm6 \n" michael@0: "movdqa (%eax,%edi,1),%xmm7 \n" michael@0: "lea (%eax,%edi,2),%eax \n" michael@0: "movdqa %xmm5,(%esp) \n" michael@0: "neg %edi \n" michael@0: "movdqa %xmm6,%xmm5 \n" michael@0: "punpcklbw %xmm7,%xmm6 \n" michael@0: "punpckhbw %xmm7,%xmm5 \n" michael@0: "movdqa %xmm5,%xmm7 \n" michael@0: "lea 0x10(%eax,%edi,8),%eax \n" michael@0: "neg %edi \n" michael@0: "movdqa %xmm0,%xmm5 \n" michael@0: "punpcklwd %xmm2,%xmm0 \n" michael@0: "punpckhwd %xmm2,%xmm5 \n" michael@0: "movdqa %xmm5,%xmm2 \n" michael@0: "movdqa %xmm1,%xmm5 \n" michael@0: "punpcklwd %xmm3,%xmm1 \n" michael@0: "punpckhwd %xmm3,%xmm5 \n" michael@0: "movdqa %xmm5,%xmm3 \n" michael@0: "movdqa %xmm4,%xmm5 \n" michael@0: "punpcklwd %xmm6,%xmm4 \n" michael@0: "punpckhwd %xmm6,%xmm5 \n" michael@0: "movdqa %xmm5,%xmm6 \n" michael@0: "movdqa (%esp),%xmm5 \n" michael@0: "movdqa %xmm6,(%esp) \n" michael@0: "movdqa %xmm5,%xmm6 \n" michael@0: "punpcklwd %xmm7,%xmm5 \n" michael@0: "punpckhwd %xmm7,%xmm6 \n" michael@0: "movdqa %xmm6,%xmm7 \n" michael@0: "movdqa %xmm0,%xmm6 \n" michael@0: "punpckldq %xmm4,%xmm0 \n" michael@0: "punpckhdq %xmm4,%xmm6 \n" michael@0: "movdqa %xmm6,%xmm4 \n" michael@0: "movdqa (%esp),%xmm6 \n" michael@0: "movlpd %xmm0,(%edx) \n" michael@0: "movhpd %xmm0,(%ebx) \n" michael@0: "movlpd %xmm4,(%edx,%esi,1) \n" michael@0: "lea (%edx,%esi,2),%edx \n" michael@0: "movhpd %xmm4,(%ebx,%ebp,1) \n" michael@0: "lea (%ebx,%ebp,2),%ebx \n" michael@0: "movdqa %xmm2,%xmm0 \n" michael@0: "punpckldq %xmm6,%xmm2 \n" michael@0: "movlpd %xmm2,(%edx) \n" michael@0: "movhpd %xmm2,(%ebx) \n" michael@0: "punpckhdq %xmm6,%xmm0 \n" michael@0: "movlpd %xmm0,(%edx,%esi,1) \n" michael@0: "lea (%edx,%esi,2),%edx \n" michael@0: "movhpd %xmm0,(%ebx,%ebp,1) \n" michael@0: "lea (%ebx,%ebp,2),%ebx \n" michael@0: "movdqa %xmm1,%xmm0 \n" michael@0: "punpckldq %xmm5,%xmm1 \n" michael@0: "movlpd %xmm1,(%edx) \n" michael@0: "movhpd %xmm1,(%ebx) \n" michael@0: "punpckhdq %xmm5,%xmm0 \n" michael@0: "movlpd %xmm0,(%edx,%esi,1) \n" michael@0: "lea (%edx,%esi,2),%edx \n" michael@0: "movhpd %xmm0,(%ebx,%ebp,1) \n" michael@0: "lea (%ebx,%ebp,2),%ebx \n" michael@0: "movdqa %xmm3,%xmm0 \n" michael@0: "punpckldq %xmm7,%xmm3 \n" michael@0: "movlpd %xmm3,(%edx) \n" michael@0: "movhpd %xmm3,(%ebx) \n" michael@0: "punpckhdq %xmm7,%xmm0 \n" michael@0: "sub $0x8,%ecx \n" michael@0: "movlpd %xmm0,(%edx,%esi,1) \n" michael@0: "lea (%edx,%esi,2),%edx \n" michael@0: "movhpd %xmm0,(%ebx,%ebp,1) \n" michael@0: "lea (%ebx,%ebp,2),%ebx \n" michael@0: "jg 1b \n" michael@0: "mov 0x10(%esp),%esp \n" michael@0: "pop %ebp \n" michael@0: "pop %edi \n" michael@0: "pop %esi \n" michael@0: "pop %ebx \n" michael@0: #if defined(__native_client__) michael@0: "pop %ecx \n" michael@0: "and $0xffffffe0,%ecx \n" michael@0: "jmp *%ecx \n" michael@0: #else michael@0: "ret \n" michael@0: #endif michael@0: ); michael@0: #elif !defined(LIBYUV_DISABLE_X86) && !defined(__native_client__) && \ michael@0: defined(__x86_64__) michael@0: // 64 bit version has enough registers to do 16x8 to 8x16 at a time. michael@0: #define HAS_TRANSPOSE_WX8_FAST_SSSE3 michael@0: static void TransposeWx8_FAST_SSSE3(const uint8* src, int src_stride, michael@0: uint8* dst, int dst_stride, int width) { michael@0: asm volatile ( michael@0: // Read in the data from the source pointer. michael@0: // First round of bit swap. michael@0: ".p2align 2 \n" michael@0: "1: \n" michael@0: "movdqa (%0),%%xmm0 \n" michael@0: "movdqa (%0,%3),%%xmm1 \n" michael@0: "lea (%0,%3,2),%0 \n" michael@0: "movdqa %%xmm0,%%xmm8 \n" michael@0: "punpcklbw %%xmm1,%%xmm0 \n" michael@0: "punpckhbw %%xmm1,%%xmm8 \n" michael@0: "movdqa (%0),%%xmm2 \n" michael@0: "movdqa %%xmm0,%%xmm1 \n" michael@0: "movdqa %%xmm8,%%xmm9 \n" michael@0: "palignr $0x8,%%xmm1,%%xmm1 \n" michael@0: "palignr $0x8,%%xmm9,%%xmm9 \n" michael@0: "movdqa (%0,%3),%%xmm3 \n" michael@0: "lea (%0,%3,2),%0 \n" michael@0: "movdqa %%xmm2,%%xmm10 \n" michael@0: "punpcklbw %%xmm3,%%xmm2 \n" michael@0: "punpckhbw %%xmm3,%%xmm10 \n" michael@0: "movdqa %%xmm2,%%xmm3 \n" michael@0: "movdqa %%xmm10,%%xmm11 \n" michael@0: "movdqa (%0),%%xmm4 \n" michael@0: "palignr $0x8,%%xmm3,%%xmm3 \n" michael@0: "palignr $0x8,%%xmm11,%%xmm11 \n" michael@0: "movdqa (%0,%3),%%xmm5 \n" michael@0: "lea (%0,%3,2),%0 \n" michael@0: "movdqa %%xmm4,%%xmm12 \n" michael@0: "punpcklbw %%xmm5,%%xmm4 \n" michael@0: "punpckhbw %%xmm5,%%xmm12 \n" michael@0: "movdqa %%xmm4,%%xmm5 \n" michael@0: "movdqa %%xmm12,%%xmm13 \n" michael@0: "movdqa (%0),%%xmm6 \n" michael@0: "palignr $0x8,%%xmm5,%%xmm5 \n" michael@0: "palignr $0x8,%%xmm13,%%xmm13 \n" michael@0: "movdqa (%0,%3),%%xmm7 \n" michael@0: "lea (%0,%3,2),%0 \n" michael@0: "movdqa %%xmm6,%%xmm14 \n" michael@0: "punpcklbw %%xmm7,%%xmm6 \n" michael@0: "punpckhbw %%xmm7,%%xmm14 \n" michael@0: "neg %3 \n" michael@0: "movdqa %%xmm6,%%xmm7 \n" michael@0: "movdqa %%xmm14,%%xmm15 \n" michael@0: "lea 0x10(%0,%3,8),%0 \n" michael@0: "palignr $0x8,%%xmm7,%%xmm7 \n" michael@0: "palignr $0x8,%%xmm15,%%xmm15 \n" michael@0: "neg %3 \n" michael@0: // Second round of bit swap. michael@0: "punpcklwd %%xmm2,%%xmm0 \n" michael@0: "punpcklwd %%xmm3,%%xmm1 \n" michael@0: "movdqa %%xmm0,%%xmm2 \n" michael@0: "movdqa %%xmm1,%%xmm3 \n" michael@0: "palignr $0x8,%%xmm2,%%xmm2 \n" michael@0: "palignr $0x8,%%xmm3,%%xmm3 \n" michael@0: "punpcklwd %%xmm6,%%xmm4 \n" michael@0: "punpcklwd %%xmm7,%%xmm5 \n" michael@0: "movdqa %%xmm4,%%xmm6 \n" michael@0: "movdqa %%xmm5,%%xmm7 \n" michael@0: "palignr $0x8,%%xmm6,%%xmm6 \n" michael@0: "palignr $0x8,%%xmm7,%%xmm7 \n" michael@0: "punpcklwd %%xmm10,%%xmm8 \n" michael@0: "punpcklwd %%xmm11,%%xmm9 \n" michael@0: "movdqa %%xmm8,%%xmm10 \n" michael@0: "movdqa %%xmm9,%%xmm11 \n" michael@0: "palignr $0x8,%%xmm10,%%xmm10 \n" michael@0: "palignr $0x8,%%xmm11,%%xmm11 \n" michael@0: "punpcklwd %%xmm14,%%xmm12 \n" michael@0: "punpcklwd %%xmm15,%%xmm13 \n" michael@0: "movdqa %%xmm12,%%xmm14 \n" michael@0: "movdqa %%xmm13,%%xmm15 \n" michael@0: "palignr $0x8,%%xmm14,%%xmm14 \n" michael@0: "palignr $0x8,%%xmm15,%%xmm15 \n" michael@0: // Third round of bit swap. michael@0: // Write to the destination pointer. michael@0: "punpckldq %%xmm4,%%xmm0 \n" michael@0: "movq %%xmm0,(%1) \n" michael@0: "movdqa %%xmm0,%%xmm4 \n" michael@0: "palignr $0x8,%%xmm4,%%xmm4 \n" michael@0: "movq %%xmm4,(%1,%4) \n" michael@0: "lea (%1,%4,2),%1 \n" michael@0: "punpckldq %%xmm6,%%xmm2 \n" michael@0: "movdqa %%xmm2,%%xmm6 \n" michael@0: "movq %%xmm2,(%1) \n" michael@0: "palignr $0x8,%%xmm6,%%xmm6 \n" michael@0: "punpckldq %%xmm5,%%xmm1 \n" michael@0: "movq %%xmm6,(%1,%4) \n" michael@0: "lea (%1,%4,2),%1 \n" michael@0: "movdqa %%xmm1,%%xmm5 \n" michael@0: "movq %%xmm1,(%1) \n" michael@0: "palignr $0x8,%%xmm5,%%xmm5 \n" michael@0: "movq %%xmm5,(%1,%4) \n" michael@0: "lea (%1,%4,2),%1 \n" michael@0: "punpckldq %%xmm7,%%xmm3 \n" michael@0: "movq %%xmm3,(%1) \n" michael@0: "movdqa %%xmm3,%%xmm7 \n" michael@0: "palignr $0x8,%%xmm7,%%xmm7 \n" michael@0: "movq %%xmm7,(%1,%4) \n" michael@0: "lea (%1,%4,2),%1 \n" michael@0: "punpckldq %%xmm12,%%xmm8 \n" michael@0: "movq %%xmm8,(%1) \n" michael@0: "movdqa %%xmm8,%%xmm12 \n" michael@0: "palignr $0x8,%%xmm12,%%xmm12 \n" michael@0: "movq %%xmm12,(%1,%4) \n" michael@0: "lea (%1,%4,2),%1 \n" michael@0: "punpckldq %%xmm14,%%xmm10 \n" michael@0: "movdqa %%xmm10,%%xmm14 \n" michael@0: "movq %%xmm10,(%1) \n" michael@0: "palignr $0x8,%%xmm14,%%xmm14 \n" michael@0: "punpckldq %%xmm13,%%xmm9 \n" michael@0: "movq %%xmm14,(%1,%4) \n" michael@0: "lea (%1,%4,2),%1 \n" michael@0: "movdqa %%xmm9,%%xmm13 \n" michael@0: "movq %%xmm9,(%1) \n" michael@0: "palignr $0x8,%%xmm13,%%xmm13 \n" michael@0: "movq %%xmm13,(%1,%4) \n" michael@0: "lea (%1,%4,2),%1 \n" michael@0: "punpckldq %%xmm15,%%xmm11 \n" michael@0: "movq %%xmm11,(%1) \n" michael@0: "movdqa %%xmm11,%%xmm15 \n" michael@0: "palignr $0x8,%%xmm15,%%xmm15 \n" michael@0: "sub $0x10,%2 \n" michael@0: "movq %%xmm15,(%1,%4) \n" michael@0: "lea (%1,%4,2),%1 \n" michael@0: "jg 1b \n" michael@0: : "+r"(src), // %0 michael@0: "+r"(dst), // %1 michael@0: "+r"(width) // %2 michael@0: : "r"((intptr_t)(src_stride)), // %3 michael@0: "r"((intptr_t)(dst_stride)) // %4 michael@0: : "memory", "cc", michael@0: "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", michael@0: "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" michael@0: ); michael@0: } michael@0: michael@0: #define HAS_TRANSPOSE_UVWX8_SSE2 michael@0: static void TransposeUVWx8_SSE2(const uint8* src, int src_stride, michael@0: uint8* dst_a, int dst_stride_a, michael@0: uint8* dst_b, int dst_stride_b, michael@0: int w) { michael@0: asm volatile ( michael@0: // Read in the data from the source pointer. michael@0: // First round of bit swap. michael@0: ".p2align 2 \n" michael@0: "1: \n" michael@0: "movdqa (%0),%%xmm0 \n" michael@0: "movdqa (%0,%4),%%xmm1 \n" michael@0: "lea (%0,%4,2),%0 \n" michael@0: "movdqa %%xmm0,%%xmm8 \n" michael@0: "punpcklbw %%xmm1,%%xmm0 \n" michael@0: "punpckhbw %%xmm1,%%xmm8 \n" michael@0: "movdqa %%xmm8,%%xmm1 \n" michael@0: "movdqa (%0),%%xmm2 \n" michael@0: "movdqa (%0,%4),%%xmm3 \n" michael@0: "lea (%0,%4,2),%0 \n" michael@0: "movdqa %%xmm2,%%xmm8 \n" michael@0: "punpcklbw %%xmm3,%%xmm2 \n" michael@0: "punpckhbw %%xmm3,%%xmm8 \n" michael@0: "movdqa %%xmm8,%%xmm3 \n" michael@0: "movdqa (%0),%%xmm4 \n" michael@0: "movdqa (%0,%4),%%xmm5 \n" michael@0: "lea (%0,%4,2),%0 \n" michael@0: "movdqa %%xmm4,%%xmm8 \n" michael@0: "punpcklbw %%xmm5,%%xmm4 \n" michael@0: "punpckhbw %%xmm5,%%xmm8 \n" michael@0: "movdqa %%xmm8,%%xmm5 \n" michael@0: "movdqa (%0),%%xmm6 \n" michael@0: "movdqa (%0,%4),%%xmm7 \n" michael@0: "lea (%0,%4,2),%0 \n" michael@0: "movdqa %%xmm6,%%xmm8 \n" michael@0: "punpcklbw %%xmm7,%%xmm6 \n" michael@0: "neg %4 \n" michael@0: "lea 0x10(%0,%4,8),%0 \n" michael@0: "punpckhbw %%xmm7,%%xmm8 \n" michael@0: "movdqa %%xmm8,%%xmm7 \n" michael@0: "neg %4 \n" michael@0: // Second round of bit swap. michael@0: "movdqa %%xmm0,%%xmm8 \n" michael@0: "movdqa %%xmm1,%%xmm9 \n" michael@0: "punpckhwd %%xmm2,%%xmm8 \n" michael@0: "punpckhwd %%xmm3,%%xmm9 \n" michael@0: "punpcklwd %%xmm2,%%xmm0 \n" michael@0: "punpcklwd %%xmm3,%%xmm1 \n" michael@0: "movdqa %%xmm8,%%xmm2 \n" michael@0: "movdqa %%xmm9,%%xmm3 \n" michael@0: "movdqa %%xmm4,%%xmm8 \n" michael@0: "movdqa %%xmm5,%%xmm9 \n" michael@0: "punpckhwd %%xmm6,%%xmm8 \n" michael@0: "punpckhwd %%xmm7,%%xmm9 \n" michael@0: "punpcklwd %%xmm6,%%xmm4 \n" michael@0: "punpcklwd %%xmm7,%%xmm5 \n" michael@0: "movdqa %%xmm8,%%xmm6 \n" michael@0: "movdqa %%xmm9,%%xmm7 \n" michael@0: // Third round of bit swap. michael@0: // Write to the destination pointer. michael@0: "movdqa %%xmm0,%%xmm8 \n" michael@0: "punpckldq %%xmm4,%%xmm0 \n" michael@0: "movlpd %%xmm0,(%1) \n" // Write back U channel michael@0: "movhpd %%xmm0,(%2) \n" // Write back V channel michael@0: "punpckhdq %%xmm4,%%xmm8 \n" michael@0: "movlpd %%xmm8,(%1,%5) \n" michael@0: "lea (%1,%5,2),%1 \n" michael@0: "movhpd %%xmm8,(%2,%6) \n" michael@0: "lea (%2,%6,2),%2 \n" michael@0: "movdqa %%xmm2,%%xmm8 \n" michael@0: "punpckldq %%xmm6,%%xmm2 \n" michael@0: "movlpd %%xmm2,(%1) \n" michael@0: "movhpd %%xmm2,(%2) \n" michael@0: "punpckhdq %%xmm6,%%xmm8 \n" michael@0: "movlpd %%xmm8,(%1,%5) \n" michael@0: "lea (%1,%5,2),%1 \n" michael@0: "movhpd %%xmm8,(%2,%6) \n" michael@0: "lea (%2,%6,2),%2 \n" michael@0: "movdqa %%xmm1,%%xmm8 \n" michael@0: "punpckldq %%xmm5,%%xmm1 \n" michael@0: "movlpd %%xmm1,(%1) \n" michael@0: "movhpd %%xmm1,(%2) \n" michael@0: "punpckhdq %%xmm5,%%xmm8 \n" michael@0: "movlpd %%xmm8,(%1,%5) \n" michael@0: "lea (%1,%5,2),%1 \n" michael@0: "movhpd %%xmm8,(%2,%6) \n" michael@0: "lea (%2,%6,2),%2 \n" michael@0: "movdqa %%xmm3,%%xmm8 \n" michael@0: "punpckldq %%xmm7,%%xmm3 \n" michael@0: "movlpd %%xmm3,(%1) \n" michael@0: "movhpd %%xmm3,(%2) \n" michael@0: "punpckhdq %%xmm7,%%xmm8 \n" michael@0: "sub $0x8,%3 \n" michael@0: "movlpd %%xmm8,(%1,%5) \n" michael@0: "lea (%1,%5,2),%1 \n" michael@0: "movhpd %%xmm8,(%2,%6) \n" michael@0: "lea (%2,%6,2),%2 \n" michael@0: "jg 1b \n" michael@0: : "+r"(src), // %0 michael@0: "+r"(dst_a), // %1 michael@0: "+r"(dst_b), // %2 michael@0: "+r"(w) // %3 michael@0: : "r"((intptr_t)(src_stride)), // %4 michael@0: "r"((intptr_t)(dst_stride_a)), // %5 michael@0: "r"((intptr_t)(dst_stride_b)) // %6 michael@0: : "memory", "cc", michael@0: "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", michael@0: "xmm8", "xmm9" michael@0: ); michael@0: } michael@0: #endif michael@0: #endif michael@0: michael@0: static void TransposeWx8_C(const uint8* src, int src_stride, michael@0: uint8* dst, int dst_stride, michael@0: int width) { michael@0: int i; michael@0: for (i = 0; i < width; ++i) { michael@0: dst[0] = src[0 * src_stride]; michael@0: dst[1] = src[1 * src_stride]; michael@0: dst[2] = src[2 * src_stride]; michael@0: dst[3] = src[3 * src_stride]; michael@0: dst[4] = src[4 * src_stride]; michael@0: dst[5] = src[5 * src_stride]; michael@0: dst[6] = src[6 * src_stride]; michael@0: dst[7] = src[7 * src_stride]; michael@0: ++src; michael@0: dst += dst_stride; michael@0: } michael@0: } michael@0: michael@0: static void TransposeWxH_C(const uint8* src, int src_stride, michael@0: uint8* dst, int dst_stride, michael@0: int width, int height) { michael@0: int i; michael@0: for (i = 0; i < width; ++i) { michael@0: int j; michael@0: for (j = 0; j < height; ++j) { michael@0: dst[i * dst_stride + j] = src[j * src_stride + i]; michael@0: } michael@0: } michael@0: } michael@0: michael@0: LIBYUV_API michael@0: void TransposePlane(const uint8* src, int src_stride, michael@0: uint8* dst, int dst_stride, michael@0: int width, int height) { michael@0: int i = height; michael@0: void (*TransposeWx8)(const uint8* src, int src_stride, michael@0: uint8* dst, int dst_stride, michael@0: int width) = TransposeWx8_C; michael@0: #if defined(HAS_TRANSPOSE_WX8_NEON) michael@0: if (TestCpuFlag(kCpuHasNEON)) { michael@0: TransposeWx8 = TransposeWx8_NEON; michael@0: } michael@0: #endif michael@0: #if defined(HAS_TRANSPOSE_WX8_SSSE3) michael@0: if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 8)) { michael@0: TransposeWx8 = TransposeWx8_SSSE3; michael@0: } michael@0: #endif michael@0: #if defined(HAS_TRANSPOSE_WX8_FAST_SSSE3) michael@0: if (TestCpuFlag(kCpuHasSSSE3) && michael@0: IS_ALIGNED(width, 16) && michael@0: IS_ALIGNED(src, 16) && IS_ALIGNED(src_stride, 16)) { michael@0: TransposeWx8 = TransposeWx8_FAST_SSSE3; michael@0: } michael@0: #endif michael@0: #if defined(HAS_TRANSPOSE_WX8_MIPS_DSPR2) michael@0: if (TestCpuFlag(kCpuHasMIPS_DSPR2)) { michael@0: if (IS_ALIGNED(width, 4) && michael@0: IS_ALIGNED(src, 4) && IS_ALIGNED(src_stride, 4)) { michael@0: TransposeWx8 = TransposeWx8_FAST_MIPS_DSPR2; michael@0: } else { michael@0: TransposeWx8 = TransposeWx8_MIPS_DSPR2; michael@0: } michael@0: } michael@0: #endif michael@0: michael@0: // Work across the source in 8x8 tiles michael@0: while (i >= 8) { michael@0: TransposeWx8(src, src_stride, dst, dst_stride, width); michael@0: src += 8 * src_stride; // Go down 8 rows. michael@0: dst += 8; // Move over 8 columns. michael@0: i -= 8; michael@0: } michael@0: michael@0: TransposeWxH_C(src, src_stride, dst, dst_stride, width, i); michael@0: } michael@0: michael@0: LIBYUV_API michael@0: void RotatePlane90(const uint8* src, int src_stride, michael@0: uint8* dst, int dst_stride, michael@0: int width, int height) { michael@0: // Rotate by 90 is a transpose with the source read michael@0: // from bottom to top. So set the source pointer to the end michael@0: // of the buffer and flip the sign of the source stride. michael@0: src += src_stride * (height - 1); michael@0: src_stride = -src_stride; michael@0: TransposePlane(src, src_stride, dst, dst_stride, width, height); michael@0: } michael@0: michael@0: LIBYUV_API michael@0: void RotatePlane270(const uint8* src, int src_stride, michael@0: uint8* dst, int dst_stride, michael@0: int width, int height) { michael@0: // Rotate by 270 is a transpose with the destination written michael@0: // from bottom to top. So set the destination pointer to the end michael@0: // of the buffer and flip the sign of the destination stride. michael@0: dst += dst_stride * (width - 1); michael@0: dst_stride = -dst_stride; michael@0: TransposePlane(src, src_stride, dst, dst_stride, width, height); michael@0: } michael@0: michael@0: LIBYUV_API michael@0: void RotatePlane180(const uint8* src, int src_stride, michael@0: uint8* dst, int dst_stride, michael@0: int width, int height) { michael@0: // Swap first and last row and mirror the content. Uses a temporary row. michael@0: align_buffer_64(row, width); michael@0: const uint8* src_bot = src + src_stride * (height - 1); michael@0: uint8* dst_bot = dst + dst_stride * (height - 1); michael@0: int half_height = (height + 1) >> 1; michael@0: int y; michael@0: void (*MirrorRow)(const uint8* src, uint8* dst, int width) = MirrorRow_C; michael@0: void (*CopyRow)(const uint8* src, uint8* dst, int width) = CopyRow_C; michael@0: #if defined(HAS_MIRRORROW_NEON) michael@0: if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 16)) { michael@0: MirrorRow = MirrorRow_NEON; michael@0: } michael@0: #endif michael@0: #if defined(HAS_MIRRORROW_SSE2) michael@0: if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 16) && michael@0: IS_ALIGNED(src, 16) && IS_ALIGNED(src_stride, 16) && michael@0: IS_ALIGNED(dst, 16) && IS_ALIGNED(dst_stride, 16)) { michael@0: MirrorRow = MirrorRow_SSE2; michael@0: } michael@0: #endif michael@0: #if defined(HAS_MIRRORROW_SSSE3) michael@0: if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 16) && michael@0: IS_ALIGNED(src, 16) && IS_ALIGNED(src_stride, 16) && michael@0: IS_ALIGNED(dst, 16) && IS_ALIGNED(dst_stride, 16)) { michael@0: MirrorRow = MirrorRow_SSSE3; michael@0: } michael@0: #endif michael@0: #if defined(HAS_MIRRORROW_AVX2) michael@0: if (TestCpuFlag(kCpuHasAVX2) && IS_ALIGNED(width, 32)) { michael@0: MirrorRow = MirrorRow_AVX2; michael@0: } michael@0: #endif michael@0: #if defined(HAS_MIRRORROW_MIPS_DSPR2) michael@0: if (TestCpuFlag(kCpuHasMIPS_DSPR2) && michael@0: IS_ALIGNED(src, 4) && IS_ALIGNED(src_stride, 4) && michael@0: IS_ALIGNED(dst, 4) && IS_ALIGNED(dst_stride, 4)) { michael@0: MirrorRow = MirrorRow_MIPS_DSPR2; michael@0: } michael@0: #endif michael@0: #if defined(HAS_COPYROW_NEON) michael@0: if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 32)) { michael@0: CopyRow = CopyRow_NEON; michael@0: } michael@0: #endif michael@0: #if defined(HAS_COPYROW_X86) michael@0: if (TestCpuFlag(kCpuHasX86) && IS_ALIGNED(width, 4)) { michael@0: CopyRow = CopyRow_X86; michael@0: } michael@0: #endif michael@0: #if defined(HAS_COPYROW_SSE2) michael@0: if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 32) && michael@0: IS_ALIGNED(src, 16) && IS_ALIGNED(src_stride, 16) && michael@0: IS_ALIGNED(dst, 16) && IS_ALIGNED(dst_stride, 16)) { michael@0: CopyRow = CopyRow_SSE2; michael@0: } michael@0: #endif michael@0: #if defined(HAS_COPYROW_ERMS) michael@0: if (TestCpuFlag(kCpuHasERMS)) { michael@0: CopyRow = CopyRow_ERMS; michael@0: } michael@0: #endif michael@0: #if defined(HAS_COPYROW_MIPS) michael@0: if (TestCpuFlag(kCpuHasMIPS)) { michael@0: CopyRow = CopyRow_MIPS; michael@0: } michael@0: #endif michael@0: michael@0: // Odd height will harmlessly mirror the middle row twice. michael@0: for (y = 0; y < half_height; ++y) { michael@0: MirrorRow(src, row, width); // Mirror first row into a buffer michael@0: src += src_stride; michael@0: MirrorRow(src_bot, dst, width); // Mirror last row into first row michael@0: dst += dst_stride; michael@0: CopyRow(row, dst_bot, width); // Copy first mirrored row into last michael@0: src_bot -= src_stride; michael@0: dst_bot -= dst_stride; michael@0: } michael@0: free_aligned_buffer_64(row); michael@0: } michael@0: michael@0: static void TransposeUVWx8_C(const uint8* src, int src_stride, michael@0: uint8* dst_a, int dst_stride_a, michael@0: uint8* dst_b, int dst_stride_b, michael@0: int width) { michael@0: int i; michael@0: for (i = 0; i < width; ++i) { michael@0: dst_a[0] = src[0 * src_stride + 0]; michael@0: dst_b[0] = src[0 * src_stride + 1]; michael@0: dst_a[1] = src[1 * src_stride + 0]; michael@0: dst_b[1] = src[1 * src_stride + 1]; michael@0: dst_a[2] = src[2 * src_stride + 0]; michael@0: dst_b[2] = src[2 * src_stride + 1]; michael@0: dst_a[3] = src[3 * src_stride + 0]; michael@0: dst_b[3] = src[3 * src_stride + 1]; michael@0: dst_a[4] = src[4 * src_stride + 0]; michael@0: dst_b[4] = src[4 * src_stride + 1]; michael@0: dst_a[5] = src[5 * src_stride + 0]; michael@0: dst_b[5] = src[5 * src_stride + 1]; michael@0: dst_a[6] = src[6 * src_stride + 0]; michael@0: dst_b[6] = src[6 * src_stride + 1]; michael@0: dst_a[7] = src[7 * src_stride + 0]; michael@0: dst_b[7] = src[7 * src_stride + 1]; michael@0: src += 2; michael@0: dst_a += dst_stride_a; michael@0: dst_b += dst_stride_b; michael@0: } michael@0: } michael@0: michael@0: static void TransposeUVWxH_C(const uint8* src, int src_stride, michael@0: uint8* dst_a, int dst_stride_a, michael@0: uint8* dst_b, int dst_stride_b, michael@0: int width, int height) { michael@0: int i; michael@0: for (i = 0; i < width * 2; i += 2) { michael@0: int j; michael@0: for (j = 0; j < height; ++j) { michael@0: dst_a[j + ((i >> 1) * dst_stride_a)] = src[i + (j * src_stride)]; michael@0: dst_b[j + ((i >> 1) * dst_stride_b)] = src[i + (j * src_stride) + 1]; michael@0: } michael@0: } michael@0: } michael@0: michael@0: LIBYUV_API michael@0: void TransposeUV(const uint8* src, int src_stride, michael@0: uint8* dst_a, int dst_stride_a, michael@0: uint8* dst_b, int dst_stride_b, michael@0: int width, int height) { michael@0: int i = height; michael@0: void (*TransposeUVWx8)(const uint8* src, int src_stride, michael@0: uint8* dst_a, int dst_stride_a, michael@0: uint8* dst_b, int dst_stride_b, michael@0: int width) = TransposeUVWx8_C; michael@0: #if defined(HAS_TRANSPOSE_UVWX8_NEON) michael@0: if (TestCpuFlag(kCpuHasNEON)) { michael@0: TransposeUVWx8 = TransposeUVWx8_NEON; michael@0: } michael@0: #elif defined(HAS_TRANSPOSE_UVWX8_SSE2) michael@0: if (TestCpuFlag(kCpuHasSSE2) && michael@0: IS_ALIGNED(width, 8) && michael@0: IS_ALIGNED(src, 16) && IS_ALIGNED(src_stride, 16)) { michael@0: TransposeUVWx8 = TransposeUVWx8_SSE2; michael@0: } michael@0: #elif defined(HAS_TRANSPOSE_UVWx8_MIPS_DSPR2) michael@0: if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 2) && michael@0: IS_ALIGNED(src, 4) && IS_ALIGNED(src_stride, 4)) { michael@0: TransposeUVWx8 = TransposeUVWx8_MIPS_DSPR2; michael@0: } michael@0: #endif michael@0: michael@0: // Work through the source in 8x8 tiles. michael@0: while (i >= 8) { michael@0: TransposeUVWx8(src, src_stride, michael@0: dst_a, dst_stride_a, michael@0: dst_b, dst_stride_b, michael@0: width); michael@0: src += 8 * src_stride; // Go down 8 rows. michael@0: dst_a += 8; // Move over 8 columns. michael@0: dst_b += 8; // Move over 8 columns. michael@0: i -= 8; michael@0: } michael@0: michael@0: TransposeUVWxH_C(src, src_stride, michael@0: dst_a, dst_stride_a, michael@0: dst_b, dst_stride_b, michael@0: width, i); michael@0: } michael@0: michael@0: LIBYUV_API michael@0: void RotateUV90(const uint8* src, int src_stride, michael@0: uint8* dst_a, int dst_stride_a, michael@0: uint8* dst_b, int dst_stride_b, michael@0: int width, int height) { michael@0: src += src_stride * (height - 1); michael@0: src_stride = -src_stride; michael@0: michael@0: TransposeUV(src, src_stride, michael@0: dst_a, dst_stride_a, michael@0: dst_b, dst_stride_b, michael@0: width, height); michael@0: } michael@0: michael@0: LIBYUV_API michael@0: void RotateUV270(const uint8* src, int src_stride, michael@0: uint8* dst_a, int dst_stride_a, michael@0: uint8* dst_b, int dst_stride_b, michael@0: int width, int height) { michael@0: dst_a += dst_stride_a * (width - 1); michael@0: dst_b += dst_stride_b * (width - 1); michael@0: dst_stride_a = -dst_stride_a; michael@0: dst_stride_b = -dst_stride_b; michael@0: michael@0: TransposeUV(src, src_stride, michael@0: dst_a, dst_stride_a, michael@0: dst_b, dst_stride_b, michael@0: width, height); michael@0: } michael@0: michael@0: // Rotate 180 is a horizontal and vertical flip. michael@0: LIBYUV_API michael@0: void RotateUV180(const uint8* src, int src_stride, michael@0: uint8* dst_a, int dst_stride_a, michael@0: uint8* dst_b, int dst_stride_b, michael@0: int width, int height) { michael@0: int i; michael@0: void (*MirrorRowUV)(const uint8* src, uint8* dst_u, uint8* dst_v, int width) = michael@0: MirrorUVRow_C; michael@0: #if defined(HAS_MIRRORUVROW_NEON) michael@0: if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 8)) { michael@0: MirrorRowUV = MirrorUVRow_NEON; michael@0: } michael@0: #elif defined(HAS_MIRRORROW_UV_SSSE3) michael@0: if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 16) && michael@0: IS_ALIGNED(src, 16) && IS_ALIGNED(src_stride, 16)) { michael@0: MirrorRowUV = MirrorUVRow_SSSE3; michael@0: } michael@0: #elif defined(HAS_MIRRORUVROW_MIPS_DSPR2) michael@0: if (TestCpuFlag(kCpuHasMIPS_DSPR2) && michael@0: IS_ALIGNED(src, 4) && IS_ALIGNED(src_stride, 4)) { michael@0: MirrorRowUV = MirrorUVRow_MIPS_DSPR2; michael@0: } michael@0: #endif michael@0: michael@0: dst_a += dst_stride_a * (height - 1); michael@0: dst_b += dst_stride_b * (height - 1); michael@0: michael@0: for (i = 0; i < height; ++i) { michael@0: MirrorRowUV(src, dst_a, dst_b, width); michael@0: src += src_stride; michael@0: dst_a -= dst_stride_a; michael@0: dst_b -= dst_stride_b; michael@0: } michael@0: } michael@0: michael@0: LIBYUV_API michael@0: int RotatePlane(const uint8* src, int src_stride, michael@0: uint8* dst, int dst_stride, michael@0: int width, int height, michael@0: enum RotationMode mode) { michael@0: if (!src || width <= 0 || height == 0 || !dst) { michael@0: return -1; michael@0: } michael@0: michael@0: // Negative height means invert the image. michael@0: if (height < 0) { michael@0: height = -height; michael@0: src = src + (height - 1) * src_stride; michael@0: src_stride = -src_stride; michael@0: } michael@0: michael@0: switch (mode) { michael@0: case kRotate0: michael@0: // copy frame michael@0: CopyPlane(src, src_stride, michael@0: dst, dst_stride, michael@0: width, height); michael@0: return 0; michael@0: case kRotate90: michael@0: RotatePlane90(src, src_stride, michael@0: dst, dst_stride, michael@0: width, height); michael@0: return 0; michael@0: case kRotate270: michael@0: RotatePlane270(src, src_stride, michael@0: dst, dst_stride, michael@0: width, height); michael@0: return 0; michael@0: case kRotate180: michael@0: RotatePlane180(src, src_stride, michael@0: dst, dst_stride, michael@0: width, height); michael@0: return 0; michael@0: default: michael@0: break; michael@0: } michael@0: return -1; michael@0: } michael@0: michael@0: LIBYUV_API michael@0: int I420Rotate(const uint8* src_y, int src_stride_y, michael@0: const uint8* src_u, int src_stride_u, michael@0: const uint8* src_v, int src_stride_v, michael@0: uint8* dst_y, int dst_stride_y, michael@0: uint8* dst_u, int dst_stride_u, michael@0: uint8* dst_v, int dst_stride_v, michael@0: int width, int height, michael@0: enum RotationMode mode) { michael@0: int halfwidth = (width + 1) >> 1; michael@0: int halfheight = (height + 1) >> 1; michael@0: if (!src_y || !src_u || !src_v || width <= 0 || height == 0 || michael@0: !dst_y || !dst_u || !dst_v) { michael@0: return -1; michael@0: } michael@0: michael@0: // Negative height means invert the image. michael@0: if (height < 0) { michael@0: height = -height; michael@0: halfheight = (height + 1) >> 1; michael@0: src_y = src_y + (height - 1) * src_stride_y; michael@0: src_u = src_u + (halfheight - 1) * src_stride_u; michael@0: src_v = src_v + (halfheight - 1) * src_stride_v; michael@0: src_stride_y = -src_stride_y; michael@0: src_stride_u = -src_stride_u; michael@0: src_stride_v = -src_stride_v; michael@0: } michael@0: michael@0: switch (mode) { michael@0: case kRotate0: michael@0: // copy frame michael@0: return I420Copy(src_y, src_stride_y, michael@0: src_u, src_stride_u, michael@0: src_v, src_stride_v, michael@0: dst_y, dst_stride_y, michael@0: dst_u, dst_stride_u, michael@0: dst_v, dst_stride_v, michael@0: width, height); michael@0: case kRotate90: michael@0: RotatePlane90(src_y, src_stride_y, michael@0: dst_y, dst_stride_y, michael@0: width, height); michael@0: RotatePlane90(src_u, src_stride_u, michael@0: dst_u, dst_stride_u, michael@0: halfwidth, halfheight); michael@0: RotatePlane90(src_v, src_stride_v, michael@0: dst_v, dst_stride_v, michael@0: halfwidth, halfheight); michael@0: return 0; michael@0: case kRotate270: michael@0: RotatePlane270(src_y, src_stride_y, michael@0: dst_y, dst_stride_y, michael@0: width, height); michael@0: RotatePlane270(src_u, src_stride_u, michael@0: dst_u, dst_stride_u, michael@0: halfwidth, halfheight); michael@0: RotatePlane270(src_v, src_stride_v, michael@0: dst_v, dst_stride_v, michael@0: halfwidth, halfheight); michael@0: return 0; michael@0: case kRotate180: michael@0: RotatePlane180(src_y, src_stride_y, michael@0: dst_y, dst_stride_y, michael@0: width, height); michael@0: RotatePlane180(src_u, src_stride_u, michael@0: dst_u, dst_stride_u, michael@0: halfwidth, halfheight); michael@0: RotatePlane180(src_v, src_stride_v, michael@0: dst_v, dst_stride_v, michael@0: halfwidth, halfheight); michael@0: return 0; michael@0: default: michael@0: break; michael@0: } michael@0: return -1; michael@0: } michael@0: michael@0: LIBYUV_API michael@0: int NV12ToI420Rotate(const uint8* src_y, int src_stride_y, michael@0: const uint8* src_uv, int src_stride_uv, michael@0: uint8* dst_y, int dst_stride_y, michael@0: uint8* dst_u, int dst_stride_u, michael@0: uint8* dst_v, int dst_stride_v, michael@0: int width, int height, michael@0: enum RotationMode mode) { michael@0: int halfwidth = (width + 1) >> 1; michael@0: int halfheight = (height + 1) >> 1; michael@0: if (!src_y || !src_uv || width <= 0 || height == 0 || michael@0: !dst_y || !dst_u || !dst_v) { michael@0: return -1; michael@0: } michael@0: michael@0: // Negative height means invert the image. michael@0: if (height < 0) { michael@0: height = -height; michael@0: halfheight = (height + 1) >> 1; michael@0: src_y = src_y + (height - 1) * src_stride_y; michael@0: src_uv = src_uv + (halfheight - 1) * src_stride_uv; michael@0: src_stride_y = -src_stride_y; michael@0: src_stride_uv = -src_stride_uv; michael@0: } michael@0: michael@0: switch (mode) { michael@0: case kRotate0: michael@0: // copy frame michael@0: return NV12ToI420(src_y, src_stride_y, michael@0: src_uv, src_stride_uv, michael@0: dst_y, dst_stride_y, michael@0: dst_u, dst_stride_u, michael@0: dst_v, dst_stride_v, michael@0: width, height); michael@0: case kRotate90: michael@0: RotatePlane90(src_y, src_stride_y, michael@0: dst_y, dst_stride_y, michael@0: width, height); michael@0: RotateUV90(src_uv, src_stride_uv, michael@0: dst_u, dst_stride_u, michael@0: dst_v, dst_stride_v, michael@0: halfwidth, halfheight); michael@0: return 0; michael@0: case kRotate270: michael@0: RotatePlane270(src_y, src_stride_y, michael@0: dst_y, dst_stride_y, michael@0: width, height); michael@0: RotateUV270(src_uv, src_stride_uv, michael@0: dst_u, dst_stride_u, michael@0: dst_v, dst_stride_v, michael@0: halfwidth, halfheight); michael@0: return 0; michael@0: case kRotate180: michael@0: RotatePlane180(src_y, src_stride_y, michael@0: dst_y, dst_stride_y, michael@0: width, height); michael@0: RotateUV180(src_uv, src_stride_uv, michael@0: dst_u, dst_stride_u, michael@0: dst_v, dst_stride_v, michael@0: halfwidth, halfheight); michael@0: return 0; michael@0: default: michael@0: break; michael@0: } michael@0: return -1; michael@0: } michael@0: michael@0: #ifdef __cplusplus michael@0: } // extern "C" michael@0: } // namespace libyuv michael@0: #endif