michael@0: ; michael@0: ; Copyright (c) 2013 The WebM project authors. All Rights Reserved. michael@0: ; michael@0: ; Use of this source code is governed by a BSD-style license michael@0: ; that can be found in the LICENSE file in the root of the source michael@0: ; tree. An additional intellectual property rights grant can be found michael@0: ; in the file PATENTS. All contributing project authors may michael@0: ; be found in the AUTHORS file in the root of the source tree. michael@0: ; michael@0: michael@0: michael@0: ; These functions are only valid when: michael@0: ; x_step_q4 == 16 michael@0: ; w%4 == 0 michael@0: ; h%4 == 0 michael@0: ; taps == 8 michael@0: ; VP9_FILTER_WEIGHT == 128 michael@0: ; VP9_FILTER_SHIFT == 7 michael@0: michael@0: EXPORT |vp9_convolve8_horiz_neon| michael@0: EXPORT |vp9_convolve8_vert_neon| michael@0: IMPORT |vp9_convolve8_horiz_c| michael@0: IMPORT |vp9_convolve8_vert_c| michael@0: ARM michael@0: REQUIRE8 michael@0: PRESERVE8 michael@0: michael@0: AREA ||.text||, CODE, READONLY, ALIGN=2 michael@0: michael@0: ; Multiply and accumulate by q0 michael@0: MACRO michael@0: MULTIPLY_BY_Q0 $dst, $src0, $src1, $src2, $src3, $src4, $src5, $src6, $src7 michael@0: vmull.s16 $dst, $src0, d0[0] michael@0: vmlal.s16 $dst, $src1, d0[1] michael@0: vmlal.s16 $dst, $src2, d0[2] michael@0: vmlal.s16 $dst, $src3, d0[3] michael@0: vmlal.s16 $dst, $src4, d1[0] michael@0: vmlal.s16 $dst, $src5, d1[1] michael@0: vmlal.s16 $dst, $src6, d1[2] michael@0: vmlal.s16 $dst, $src7, d1[3] michael@0: MEND michael@0: michael@0: ; r0 const uint8_t *src michael@0: ; r1 int src_stride michael@0: ; r2 uint8_t *dst michael@0: ; r3 int dst_stride michael@0: ; sp[]const int16_t *filter_x michael@0: ; sp[]int x_step_q4 michael@0: ; sp[]const int16_t *filter_y ; unused michael@0: ; sp[]int y_step_q4 ; unused michael@0: ; sp[]int w michael@0: ; sp[]int h michael@0: michael@0: |vp9_convolve8_horiz_neon| PROC michael@0: ldr r12, [sp, #4] ; x_step_q4 michael@0: cmp r12, #16 michael@0: bne vp9_convolve8_horiz_c michael@0: michael@0: push {r4-r10, lr} michael@0: michael@0: sub r0, r0, #3 ; adjust for taps michael@0: michael@0: ldr r5, [sp, #32] ; filter_x michael@0: ldr r6, [sp, #48] ; w michael@0: ldr r7, [sp, #52] ; h michael@0: michael@0: vld1.s16 {q0}, [r5] ; filter_x michael@0: michael@0: sub r8, r1, r1, lsl #2 ; -src_stride * 3 michael@0: add r8, r8, #4 ; -src_stride * 3 + 4 michael@0: michael@0: sub r4, r3, r3, lsl #2 ; -dst_stride * 3 michael@0: add r4, r4, #4 ; -dst_stride * 3 + 4 michael@0: michael@0: rsb r9, r6, r1, lsl #2 ; reset src for outer loop michael@0: sub r9, r9, #7 michael@0: rsb r12, r6, r3, lsl #2 ; reset dst for outer loop michael@0: michael@0: mov r10, r6 ; w loop counter michael@0: michael@0: loop_horiz_v michael@0: vld1.8 {d24}, [r0], r1 michael@0: vld1.8 {d25}, [r0], r1 michael@0: vld1.8 {d26}, [r0], r1 michael@0: vld1.8 {d27}, [r0], r8 michael@0: michael@0: vtrn.16 q12, q13 michael@0: vtrn.8 d24, d25 michael@0: vtrn.8 d26, d27 michael@0: michael@0: pld [r0, r1, lsl #2] michael@0: michael@0: vmovl.u8 q8, d24 michael@0: vmovl.u8 q9, d25 michael@0: vmovl.u8 q10, d26 michael@0: vmovl.u8 q11, d27 michael@0: michael@0: ; save a few instructions in the inner loop michael@0: vswp d17, d18 michael@0: vmov d23, d21 michael@0: michael@0: add r0, r0, #3 michael@0: michael@0: loop_horiz michael@0: add r5, r0, #64 michael@0: michael@0: vld1.32 {d28[]}, [r0], r1 michael@0: vld1.32 {d29[]}, [r0], r1 michael@0: vld1.32 {d31[]}, [r0], r1 michael@0: vld1.32 {d30[]}, [r0], r8 michael@0: michael@0: pld [r5] michael@0: michael@0: vtrn.16 d28, d31 michael@0: vtrn.16 d29, d30 michael@0: vtrn.8 d28, d29 michael@0: vtrn.8 d31, d30 michael@0: michael@0: pld [r5, r1] michael@0: michael@0: ; extract to s16 michael@0: vtrn.32 q14, q15 michael@0: vmovl.u8 q12, d28 michael@0: vmovl.u8 q13, d29 michael@0: michael@0: pld [r5, r1, lsl #1] michael@0: michael@0: ; src[] * filter_x michael@0: MULTIPLY_BY_Q0 q1, d16, d17, d20, d22, d18, d19, d23, d24 michael@0: MULTIPLY_BY_Q0 q2, d17, d20, d22, d18, d19, d23, d24, d26 michael@0: MULTIPLY_BY_Q0 q14, d20, d22, d18, d19, d23, d24, d26, d27 michael@0: MULTIPLY_BY_Q0 q15, d22, d18, d19, d23, d24, d26, d27, d25 michael@0: michael@0: pld [r5, -r8] michael@0: michael@0: ; += 64 >> 7 michael@0: vqrshrun.s32 d2, q1, #7 michael@0: vqrshrun.s32 d3, q2, #7 michael@0: vqrshrun.s32 d4, q14, #7 michael@0: vqrshrun.s32 d5, q15, #7 michael@0: michael@0: ; saturate michael@0: vqmovn.u16 d2, q1 michael@0: vqmovn.u16 d3, q2 michael@0: michael@0: ; transpose michael@0: vtrn.16 d2, d3 michael@0: vtrn.32 d2, d3 michael@0: vtrn.8 d2, d3 michael@0: michael@0: vst1.u32 {d2[0]}, [r2@32], r3 michael@0: vst1.u32 {d3[0]}, [r2@32], r3 michael@0: vst1.u32 {d2[1]}, [r2@32], r3 michael@0: vst1.u32 {d3[1]}, [r2@32], r4 michael@0: michael@0: vmov q8, q9 michael@0: vmov d20, d23 michael@0: vmov q11, q12 michael@0: vmov q9, q13 michael@0: michael@0: subs r6, r6, #4 ; w -= 4 michael@0: bgt loop_horiz michael@0: michael@0: ; outer loop michael@0: mov r6, r10 ; restore w counter michael@0: add r0, r0, r9 ; src += src_stride * 4 - w michael@0: add r2, r2, r12 ; dst += dst_stride * 4 - w michael@0: subs r7, r7, #4 ; h -= 4 michael@0: bgt loop_horiz_v michael@0: michael@0: pop {r4-r10, pc} michael@0: michael@0: ENDP michael@0: michael@0: |vp9_convolve8_vert_neon| PROC michael@0: ldr r12, [sp, #12] michael@0: cmp r12, #16 michael@0: bne vp9_convolve8_vert_c michael@0: michael@0: push {r4-r8, lr} michael@0: michael@0: ; adjust for taps michael@0: sub r0, r0, r1 michael@0: sub r0, r0, r1, lsl #1 michael@0: michael@0: ldr r4, [sp, #32] ; filter_y michael@0: ldr r6, [sp, #40] ; w michael@0: ldr lr, [sp, #44] ; h michael@0: michael@0: vld1.s16 {q0}, [r4] ; filter_y michael@0: michael@0: lsl r1, r1, #1 michael@0: lsl r3, r3, #1 michael@0: michael@0: loop_vert_h michael@0: mov r4, r0 michael@0: add r7, r0, r1, asr #1 michael@0: mov r5, r2 michael@0: add r8, r2, r3, asr #1 michael@0: mov r12, lr ; h loop counter michael@0: michael@0: vld1.u32 {d16[0]}, [r4], r1 michael@0: vld1.u32 {d16[1]}, [r7], r1 michael@0: vld1.u32 {d18[0]}, [r4], r1 michael@0: vld1.u32 {d18[1]}, [r7], r1 michael@0: vld1.u32 {d20[0]}, [r4], r1 michael@0: vld1.u32 {d20[1]}, [r7], r1 michael@0: vld1.u32 {d22[0]}, [r4], r1 michael@0: michael@0: vmovl.u8 q8, d16 michael@0: vmovl.u8 q9, d18 michael@0: vmovl.u8 q10, d20 michael@0: vmovl.u8 q11, d22 michael@0: michael@0: loop_vert michael@0: ; always process a 4x4 block at a time michael@0: vld1.u32 {d24[0]}, [r7], r1 michael@0: vld1.u32 {d26[0]}, [r4], r1 michael@0: vld1.u32 {d26[1]}, [r7], r1 michael@0: vld1.u32 {d24[1]}, [r4], r1 michael@0: michael@0: ; extract to s16 michael@0: vmovl.u8 q12, d24 michael@0: vmovl.u8 q13, d26 michael@0: michael@0: pld [r5] michael@0: pld [r8] michael@0: michael@0: ; src[] * filter_y michael@0: MULTIPLY_BY_Q0 q1, d16, d17, d18, d19, d20, d21, d22, d24 michael@0: michael@0: pld [r5, r3] michael@0: pld [r8, r3] michael@0: michael@0: MULTIPLY_BY_Q0 q2, d17, d18, d19, d20, d21, d22, d24, d26 michael@0: michael@0: pld [r7] michael@0: pld [r4] michael@0: michael@0: MULTIPLY_BY_Q0 q14, d18, d19, d20, d21, d22, d24, d26, d27 michael@0: michael@0: pld [r7, r1] michael@0: pld [r4, r1] michael@0: michael@0: MULTIPLY_BY_Q0 q15, d19, d20, d21, d22, d24, d26, d27, d25 michael@0: michael@0: ; += 64 >> 7 michael@0: vqrshrun.s32 d2, q1, #7 michael@0: vqrshrun.s32 d3, q2, #7 michael@0: vqrshrun.s32 d4, q14, #7 michael@0: vqrshrun.s32 d5, q15, #7 michael@0: michael@0: ; saturate michael@0: vqmovn.u16 d2, q1 michael@0: vqmovn.u16 d3, q2 michael@0: michael@0: vst1.u32 {d2[0]}, [r5@32], r3 michael@0: vst1.u32 {d2[1]}, [r8@32], r3 michael@0: vst1.u32 {d3[0]}, [r5@32], r3 michael@0: vst1.u32 {d3[1]}, [r8@32], r3 michael@0: michael@0: vmov q8, q10 michael@0: vmov d18, d22 michael@0: vmov d19, d24 michael@0: vmov q10, q13 michael@0: vmov d22, d25 michael@0: michael@0: subs r12, r12, #4 ; h -= 4 michael@0: bgt loop_vert michael@0: michael@0: ; outer loop michael@0: add r0, r0, #4 michael@0: add r2, r2, #4 michael@0: subs r6, r6, #4 ; w -= 4 michael@0: bgt loop_vert_h michael@0: michael@0: pop {r4-r8, pc} michael@0: michael@0: ENDP michael@0: END