michael@0: /* michael@0: * Copyright 2011 The LibYuv Project Authors. All rights reserved. michael@0: * michael@0: * Use of this source code is governed by a BSD-style license michael@0: * that can be found in the LICENSE file in the root of the source michael@0: * tree. An additional intellectual property rights grant can be found michael@0: * in the file PATENTS. All contributing project authors may michael@0: * be found in the AUTHORS file in the root of the source tree. michael@0: */ michael@0: michael@0: #include "libyuv/row.h" michael@0: michael@0: #include "libyuv/basic_types.h" michael@0: michael@0: #ifdef __cplusplus michael@0: namespace libyuv { michael@0: extern "C" { michael@0: #endif michael@0: michael@0: #if !defined(LIBYUV_DISABLE_NEON) && defined(__ARM_NEON__) michael@0: static uvec8 kVTbl4x4Transpose = michael@0: { 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15 }; michael@0: michael@0: void TransposeWx8_NEON(const uint8* src, int src_stride, michael@0: uint8* dst, int dst_stride, michael@0: int width) { michael@0: asm volatile ( michael@0: // loops are on blocks of 8. loop will stop when michael@0: // counter gets to or below 0. starting the counter michael@0: // at w-8 allow for this michael@0: "sub %4, #8 \n" michael@0: michael@0: // handle 8x8 blocks. this should be the majority of the plane michael@0: ".p2align 2 \n" michael@0: "1: \n" michael@0: "mov r9, %0 \n" michael@0: michael@0: "vld1.8 {d0}, [r9], %1 \n" michael@0: "vld1.8 {d1}, [r9], %1 \n" michael@0: "vld1.8 {d2}, [r9], %1 \n" michael@0: "vld1.8 {d3}, [r9], %1 \n" michael@0: "vld1.8 {d4}, [r9], %1 \n" michael@0: "vld1.8 {d5}, [r9], %1 \n" michael@0: "vld1.8 {d6}, [r9], %1 \n" michael@0: "vld1.8 {d7}, [r9] \n" michael@0: michael@0: "vtrn.8 d1, d0 \n" michael@0: "vtrn.8 d3, d2 \n" michael@0: "vtrn.8 d5, d4 \n" michael@0: "vtrn.8 d7, d6 \n" michael@0: michael@0: "vtrn.16 d1, d3 \n" michael@0: "vtrn.16 d0, d2 \n" michael@0: "vtrn.16 d5, d7 \n" michael@0: "vtrn.16 d4, d6 \n" michael@0: michael@0: "vtrn.32 d1, d5 \n" michael@0: "vtrn.32 d0, d4 \n" michael@0: "vtrn.32 d3, d7 \n" michael@0: "vtrn.32 d2, d6 \n" michael@0: michael@0: "vrev16.8 q0, q0 \n" michael@0: "vrev16.8 q1, q1 \n" michael@0: "vrev16.8 q2, q2 \n" michael@0: "vrev16.8 q3, q3 \n" michael@0: michael@0: "mov r9, %2 \n" michael@0: michael@0: "vst1.8 {d1}, [r9], %3 \n" michael@0: "vst1.8 {d0}, [r9], %3 \n" michael@0: "vst1.8 {d3}, [r9], %3 \n" michael@0: "vst1.8 {d2}, [r9], %3 \n" michael@0: "vst1.8 {d5}, [r9], %3 \n" michael@0: "vst1.8 {d4}, [r9], %3 \n" michael@0: "vst1.8 {d7}, [r9], %3 \n" michael@0: "vst1.8 {d6}, [r9] \n" michael@0: michael@0: "add %0, #8 \n" // src += 8 michael@0: "add %2, %2, %3, lsl #3 \n" // dst += 8 * dst_stride michael@0: "subs %4, #8 \n" // w -= 8 michael@0: "bge 1b \n" michael@0: michael@0: // add 8 back to counter. if the result is 0 there are michael@0: // no residuals. michael@0: "adds %4, #8 \n" michael@0: "beq 4f \n" michael@0: michael@0: // some residual, so between 1 and 7 lines left to transpose michael@0: "cmp %4, #2 \n" michael@0: "blt 3f \n" michael@0: michael@0: "cmp %4, #4 \n" michael@0: "blt 2f \n" michael@0: michael@0: // 4x8 block michael@0: "mov r9, %0 \n" michael@0: "vld1.32 {d0[0]}, [r9], %1 \n" michael@0: "vld1.32 {d0[1]}, [r9], %1 \n" michael@0: "vld1.32 {d1[0]}, [r9], %1 \n" michael@0: "vld1.32 {d1[1]}, [r9], %1 \n" michael@0: "vld1.32 {d2[0]}, [r9], %1 \n" michael@0: "vld1.32 {d2[1]}, [r9], %1 \n" michael@0: "vld1.32 {d3[0]}, [r9], %1 \n" michael@0: "vld1.32 {d3[1]}, [r9] \n" michael@0: michael@0: "mov r9, %2 \n" michael@0: michael@0: "vld1.8 {q3}, [%5] \n" michael@0: michael@0: "vtbl.8 d4, {d0, d1}, d6 \n" michael@0: "vtbl.8 d5, {d0, d1}, d7 \n" michael@0: "vtbl.8 d0, {d2, d3}, d6 \n" michael@0: "vtbl.8 d1, {d2, d3}, d7 \n" michael@0: michael@0: // TODO(frkoenig): Rework shuffle above to michael@0: // write out with 4 instead of 8 writes. michael@0: "vst1.32 {d4[0]}, [r9], %3 \n" michael@0: "vst1.32 {d4[1]}, [r9], %3 \n" michael@0: "vst1.32 {d5[0]}, [r9], %3 \n" michael@0: "vst1.32 {d5[1]}, [r9] \n" michael@0: michael@0: "add r9, %2, #4 \n" michael@0: "vst1.32 {d0[0]}, [r9], %3 \n" michael@0: "vst1.32 {d0[1]}, [r9], %3 \n" michael@0: "vst1.32 {d1[0]}, [r9], %3 \n" michael@0: "vst1.32 {d1[1]}, [r9] \n" michael@0: michael@0: "add %0, #4 \n" // src += 4 michael@0: "add %2, %2, %3, lsl #2 \n" // dst += 4 * dst_stride michael@0: "subs %4, #4 \n" // w -= 4 michael@0: "beq 4f \n" michael@0: michael@0: // some residual, check to see if it includes a 2x8 block, michael@0: // or less michael@0: "cmp %4, #2 \n" michael@0: "blt 3f \n" michael@0: michael@0: // 2x8 block michael@0: "2: \n" michael@0: "mov r9, %0 \n" michael@0: "vld1.16 {d0[0]}, [r9], %1 \n" michael@0: "vld1.16 {d1[0]}, [r9], %1 \n" michael@0: "vld1.16 {d0[1]}, [r9], %1 \n" michael@0: "vld1.16 {d1[1]}, [r9], %1 \n" michael@0: "vld1.16 {d0[2]}, [r9], %1 \n" michael@0: "vld1.16 {d1[2]}, [r9], %1 \n" michael@0: "vld1.16 {d0[3]}, [r9], %1 \n" michael@0: "vld1.16 {d1[3]}, [r9] \n" michael@0: michael@0: "vtrn.8 d0, d1 \n" michael@0: michael@0: "mov r9, %2 \n" michael@0: michael@0: "vst1.64 {d0}, [r9], %3 \n" michael@0: "vst1.64 {d1}, [r9] \n" michael@0: michael@0: "add %0, #2 \n" // src += 2 michael@0: "add %2, %2, %3, lsl #1 \n" // dst += 2 * dst_stride michael@0: "subs %4, #2 \n" // w -= 2 michael@0: "beq 4f \n" michael@0: michael@0: // 1x8 block michael@0: "3: \n" michael@0: "vld1.8 {d0[0]}, [%0], %1 \n" michael@0: "vld1.8 {d0[1]}, [%0], %1 \n" michael@0: "vld1.8 {d0[2]}, [%0], %1 \n" michael@0: "vld1.8 {d0[3]}, [%0], %1 \n" michael@0: "vld1.8 {d0[4]}, [%0], %1 \n" michael@0: "vld1.8 {d0[5]}, [%0], %1 \n" michael@0: "vld1.8 {d0[6]}, [%0], %1 \n" michael@0: "vld1.8 {d0[7]}, [%0] \n" michael@0: michael@0: "vst1.64 {d0}, [%2] \n" michael@0: michael@0: "4: \n" michael@0: michael@0: : "+r"(src), // %0 michael@0: "+r"(src_stride), // %1 michael@0: "+r"(dst), // %2 michael@0: "+r"(dst_stride), // %3 michael@0: "+r"(width) // %4 michael@0: : "r"(&kVTbl4x4Transpose) // %5 michael@0: : "memory", "cc", "r9", "q0", "q1", "q2", "q3" michael@0: ); michael@0: } michael@0: michael@0: static uvec8 kVTbl4x4TransposeDi = michael@0: { 0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15 }; michael@0: michael@0: void TransposeUVWx8_NEON(const uint8* src, int src_stride, michael@0: uint8* dst_a, int dst_stride_a, michael@0: uint8* dst_b, int dst_stride_b, michael@0: int width) { michael@0: asm volatile ( michael@0: // loops are on blocks of 8. loop will stop when michael@0: // counter gets to or below 0. starting the counter michael@0: // at w-8 allow for this michael@0: "sub %6, #8 \n" michael@0: michael@0: // handle 8x8 blocks. this should be the majority of the plane michael@0: ".p2align 2 \n" michael@0: "1: \n" michael@0: "mov r9, %0 \n" michael@0: michael@0: "vld2.8 {d0, d1}, [r9], %1 \n" michael@0: "vld2.8 {d2, d3}, [r9], %1 \n" michael@0: "vld2.8 {d4, d5}, [r9], %1 \n" michael@0: "vld2.8 {d6, d7}, [r9], %1 \n" michael@0: "vld2.8 {d16, d17}, [r9], %1 \n" michael@0: "vld2.8 {d18, d19}, [r9], %1 \n" michael@0: "vld2.8 {d20, d21}, [r9], %1 \n" michael@0: "vld2.8 {d22, d23}, [r9] \n" michael@0: michael@0: "vtrn.8 q1, q0 \n" michael@0: "vtrn.8 q3, q2 \n" michael@0: "vtrn.8 q9, q8 \n" michael@0: "vtrn.8 q11, q10 \n" michael@0: michael@0: "vtrn.16 q1, q3 \n" michael@0: "vtrn.16 q0, q2 \n" michael@0: "vtrn.16 q9, q11 \n" michael@0: "vtrn.16 q8, q10 \n" michael@0: michael@0: "vtrn.32 q1, q9 \n" michael@0: "vtrn.32 q0, q8 \n" michael@0: "vtrn.32 q3, q11 \n" michael@0: "vtrn.32 q2, q10 \n" michael@0: michael@0: "vrev16.8 q0, q0 \n" michael@0: "vrev16.8 q1, q1 \n" michael@0: "vrev16.8 q2, q2 \n" michael@0: "vrev16.8 q3, q3 \n" michael@0: "vrev16.8 q8, q8 \n" michael@0: "vrev16.8 q9, q9 \n" michael@0: "vrev16.8 q10, q10 \n" michael@0: "vrev16.8 q11, q11 \n" michael@0: michael@0: "mov r9, %2 \n" michael@0: michael@0: "vst1.8 {d2}, [r9], %3 \n" michael@0: "vst1.8 {d0}, [r9], %3 \n" michael@0: "vst1.8 {d6}, [r9], %3 \n" michael@0: "vst1.8 {d4}, [r9], %3 \n" michael@0: "vst1.8 {d18}, [r9], %3 \n" michael@0: "vst1.8 {d16}, [r9], %3 \n" michael@0: "vst1.8 {d22}, [r9], %3 \n" michael@0: "vst1.8 {d20}, [r9] \n" michael@0: michael@0: "mov r9, %4 \n" michael@0: michael@0: "vst1.8 {d3}, [r9], %5 \n" michael@0: "vst1.8 {d1}, [r9], %5 \n" michael@0: "vst1.8 {d7}, [r9], %5 \n" michael@0: "vst1.8 {d5}, [r9], %5 \n" michael@0: "vst1.8 {d19}, [r9], %5 \n" michael@0: "vst1.8 {d17}, [r9], %5 \n" michael@0: "vst1.8 {d23}, [r9], %5 \n" michael@0: "vst1.8 {d21}, [r9] \n" michael@0: michael@0: "add %0, #8*2 \n" // src += 8*2 michael@0: "add %2, %2, %3, lsl #3 \n" // dst_a += 8 * dst_stride_a michael@0: "add %4, %4, %5, lsl #3 \n" // dst_b += 8 * dst_stride_b michael@0: "subs %6, #8 \n" // w -= 8 michael@0: "bge 1b \n" michael@0: michael@0: // add 8 back to counter. if the result is 0 there are michael@0: // no residuals. michael@0: "adds %6, #8 \n" michael@0: "beq 4f \n" michael@0: michael@0: // some residual, so between 1 and 7 lines left to transpose michael@0: "cmp %6, #2 \n" michael@0: "blt 3f \n" michael@0: michael@0: "cmp %6, #4 \n" michael@0: "blt 2f \n" michael@0: michael@0: //TODO(frkoenig): Clean this up michael@0: // 4x8 block michael@0: "mov r9, %0 \n" michael@0: "vld1.64 {d0}, [r9], %1 \n" michael@0: "vld1.64 {d1}, [r9], %1 \n" michael@0: "vld1.64 {d2}, [r9], %1 \n" michael@0: "vld1.64 {d3}, [r9], %1 \n" michael@0: "vld1.64 {d4}, [r9], %1 \n" michael@0: "vld1.64 {d5}, [r9], %1 \n" michael@0: "vld1.64 {d6}, [r9], %1 \n" michael@0: "vld1.64 {d7}, [r9] \n" michael@0: michael@0: "vld1.8 {q15}, [%7] \n" michael@0: michael@0: "vtrn.8 q0, q1 \n" michael@0: "vtrn.8 q2, q3 \n" michael@0: michael@0: "vtbl.8 d16, {d0, d1}, d30 \n" michael@0: "vtbl.8 d17, {d0, d1}, d31 \n" michael@0: "vtbl.8 d18, {d2, d3}, d30 \n" michael@0: "vtbl.8 d19, {d2, d3}, d31 \n" michael@0: "vtbl.8 d20, {d4, d5}, d30 \n" michael@0: "vtbl.8 d21, {d4, d5}, d31 \n" michael@0: "vtbl.8 d22, {d6, d7}, d30 \n" michael@0: "vtbl.8 d23, {d6, d7}, d31 \n" michael@0: michael@0: "mov r9, %2 \n" michael@0: michael@0: "vst1.32 {d16[0]}, [r9], %3 \n" michael@0: "vst1.32 {d16[1]}, [r9], %3 \n" michael@0: "vst1.32 {d17[0]}, [r9], %3 \n" michael@0: "vst1.32 {d17[1]}, [r9], %3 \n" michael@0: michael@0: "add r9, %2, #4 \n" michael@0: "vst1.32 {d20[0]}, [r9], %3 \n" michael@0: "vst1.32 {d20[1]}, [r9], %3 \n" michael@0: "vst1.32 {d21[0]}, [r9], %3 \n" michael@0: "vst1.32 {d21[1]}, [r9] \n" michael@0: michael@0: "mov r9, %4 \n" michael@0: michael@0: "vst1.32 {d18[0]}, [r9], %5 \n" michael@0: "vst1.32 {d18[1]}, [r9], %5 \n" michael@0: "vst1.32 {d19[0]}, [r9], %5 \n" michael@0: "vst1.32 {d19[1]}, [r9], %5 \n" michael@0: michael@0: "add r9, %4, #4 \n" michael@0: "vst1.32 {d22[0]}, [r9], %5 \n" michael@0: "vst1.32 {d22[1]}, [r9], %5 \n" michael@0: "vst1.32 {d23[0]}, [r9], %5 \n" michael@0: "vst1.32 {d23[1]}, [r9] \n" michael@0: michael@0: "add %0, #4*2 \n" // src += 4 * 2 michael@0: "add %2, %2, %3, lsl #2 \n" // dst_a += 4 * dst_stride_a michael@0: "add %4, %4, %5, lsl #2 \n" // dst_b += 4 * dst_stride_b michael@0: "subs %6, #4 \n" // w -= 4 michael@0: "beq 4f \n" michael@0: michael@0: // some residual, check to see if it includes a 2x8 block, michael@0: // or less michael@0: "cmp %6, #2 \n" michael@0: "blt 3f \n" michael@0: michael@0: // 2x8 block michael@0: "2: \n" michael@0: "mov r9, %0 \n" michael@0: "vld2.16 {d0[0], d2[0]}, [r9], %1 \n" michael@0: "vld2.16 {d1[0], d3[0]}, [r9], %1 \n" michael@0: "vld2.16 {d0[1], d2[1]}, [r9], %1 \n" michael@0: "vld2.16 {d1[1], d3[1]}, [r9], %1 \n" michael@0: "vld2.16 {d0[2], d2[2]}, [r9], %1 \n" michael@0: "vld2.16 {d1[2], d3[2]}, [r9], %1 \n" michael@0: "vld2.16 {d0[3], d2[3]}, [r9], %1 \n" michael@0: "vld2.16 {d1[3], d3[3]}, [r9] \n" michael@0: michael@0: "vtrn.8 d0, d1 \n" michael@0: "vtrn.8 d2, d3 \n" michael@0: michael@0: "mov r9, %2 \n" michael@0: michael@0: "vst1.64 {d0}, [r9], %3 \n" michael@0: "vst1.64 {d2}, [r9] \n" michael@0: michael@0: "mov r9, %4 \n" michael@0: michael@0: "vst1.64 {d1}, [r9], %5 \n" michael@0: "vst1.64 {d3}, [r9] \n" michael@0: michael@0: "add %0, #2*2 \n" // src += 2 * 2 michael@0: "add %2, %2, %3, lsl #1 \n" // dst_a += 2 * dst_stride_a michael@0: "add %4, %4, %5, lsl #1 \n" // dst_b += 2 * dst_stride_b michael@0: "subs %6, #2 \n" // w -= 2 michael@0: "beq 4f \n" michael@0: michael@0: // 1x8 block michael@0: "3: \n" michael@0: "vld2.8 {d0[0], d1[0]}, [%0], %1 \n" michael@0: "vld2.8 {d0[1], d1[1]}, [%0], %1 \n" michael@0: "vld2.8 {d0[2], d1[2]}, [%0], %1 \n" michael@0: "vld2.8 {d0[3], d1[3]}, [%0], %1 \n" michael@0: "vld2.8 {d0[4], d1[4]}, [%0], %1 \n" michael@0: "vld2.8 {d0[5], d1[5]}, [%0], %1 \n" michael@0: "vld2.8 {d0[6], d1[6]}, [%0], %1 \n" michael@0: "vld2.8 {d0[7], d1[7]}, [%0] \n" michael@0: michael@0: "vst1.64 {d0}, [%2] \n" michael@0: "vst1.64 {d1}, [%4] \n" michael@0: michael@0: "4: \n" michael@0: michael@0: : "+r"(src), // %0 michael@0: "+r"(src_stride), // %1 michael@0: "+r"(dst_a), // %2 michael@0: "+r"(dst_stride_a), // %3 michael@0: "+r"(dst_b), // %4 michael@0: "+r"(dst_stride_b), // %5 michael@0: "+r"(width) // %6 michael@0: : "r"(&kVTbl4x4TransposeDi) // %7 michael@0: : "memory", "cc", "r9", michael@0: "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11" michael@0: ); michael@0: } michael@0: #endif michael@0: michael@0: #ifdef __cplusplus michael@0: } // extern "C" michael@0: } // namespace libyuv michael@0: #endif