media/libvpx/vp9/common/arm/neon/vp9_convolve8_avg_neon.asm

Thu, 22 Jan 2015 13:21:57 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 22 Jan 2015 13:21:57 +0100
branch
TOR_BUG_9701
changeset 15
b8a032363ba2
permissions
-rw-r--r--

Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6

michael@0 1 ;
michael@0 2 ; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
michael@0 3 ;
michael@0 4 ; Use of this source code is governed by a BSD-style license
michael@0 5 ; that can be found in the LICENSE file in the root of the source
michael@0 6 ; tree. An additional intellectual property rights grant can be found
michael@0 7 ; in the file PATENTS. All contributing project authors may
michael@0 8 ; be found in the AUTHORS file in the root of the source tree.
michael@0 9 ;
michael@0 10
michael@0 11
michael@0 12 ; These functions are only valid when:
michael@0 13 ; x_step_q4 == 16
michael@0 14 ; w%4 == 0
michael@0 15 ; h%4 == 0
michael@0 16 ; taps == 8
michael@0 17 ; VP9_FILTER_WEIGHT == 128
michael@0 18 ; VP9_FILTER_SHIFT == 7
michael@0 19
michael@0 20 EXPORT |vp9_convolve8_avg_horiz_neon|
michael@0 21 EXPORT |vp9_convolve8_avg_vert_neon|
michael@0 22 IMPORT |vp9_convolve8_avg_horiz_c|
michael@0 23 IMPORT |vp9_convolve8_avg_vert_c|
michael@0 24 ARM
michael@0 25 REQUIRE8
michael@0 26 PRESERVE8
michael@0 27
michael@0 28 AREA ||.text||, CODE, READONLY, ALIGN=2
michael@0 29
michael@0 30 ; Multiply and accumulate by q0
michael@0 31 MACRO
michael@0 32 MULTIPLY_BY_Q0 $dst, $src0, $src1, $src2, $src3, $src4, $src5, $src6, $src7
michael@0 33 vmull.s16 $dst, $src0, d0[0]
michael@0 34 vmlal.s16 $dst, $src1, d0[1]
michael@0 35 vmlal.s16 $dst, $src2, d0[2]
michael@0 36 vmlal.s16 $dst, $src3, d0[3]
michael@0 37 vmlal.s16 $dst, $src4, d1[0]
michael@0 38 vmlal.s16 $dst, $src5, d1[1]
michael@0 39 vmlal.s16 $dst, $src6, d1[2]
michael@0 40 vmlal.s16 $dst, $src7, d1[3]
michael@0 41 MEND
michael@0 42
michael@0 43 ; r0 const uint8_t *src
michael@0 44 ; r1 int src_stride
michael@0 45 ; r2 uint8_t *dst
michael@0 46 ; r3 int dst_stride
michael@0 47 ; sp[]const int16_t *filter_x
michael@0 48 ; sp[]int x_step_q4
michael@0 49 ; sp[]const int16_t *filter_y ; unused
michael@0 50 ; sp[]int y_step_q4 ; unused
michael@0 51 ; sp[]int w
michael@0 52 ; sp[]int h
michael@0 53
michael@0 54 |vp9_convolve8_avg_horiz_neon| PROC
michael@0 55 ldr r12, [sp, #4] ; x_step_q4
michael@0 56 cmp r12, #16
michael@0 57 bne vp9_convolve8_avg_horiz_c
michael@0 58
michael@0 59 push {r4-r10, lr}
michael@0 60
michael@0 61 sub r0, r0, #3 ; adjust for taps
michael@0 62
michael@0 63 ldr r5, [sp, #32] ; filter_x
michael@0 64 ldr r6, [sp, #48] ; w
michael@0 65 ldr r7, [sp, #52] ; h
michael@0 66
michael@0 67 vld1.s16 {q0}, [r5] ; filter_x
michael@0 68
michael@0 69 sub r8, r1, r1, lsl #2 ; -src_stride * 3
michael@0 70 add r8, r8, #4 ; -src_stride * 3 + 4
michael@0 71
michael@0 72 sub r4, r3, r3, lsl #2 ; -dst_stride * 3
michael@0 73 add r4, r4, #4 ; -dst_stride * 3 + 4
michael@0 74
michael@0 75 rsb r9, r6, r1, lsl #2 ; reset src for outer loop
michael@0 76 sub r9, r9, #7
michael@0 77 rsb r12, r6, r3, lsl #2 ; reset dst for outer loop
michael@0 78
michael@0 79 mov r10, r6 ; w loop counter
michael@0 80
michael@0 81 loop_horiz_v
michael@0 82 vld1.8 {d24}, [r0], r1
michael@0 83 vld1.8 {d25}, [r0], r1
michael@0 84 vld1.8 {d26}, [r0], r1
michael@0 85 vld1.8 {d27}, [r0], r8
michael@0 86
michael@0 87 vtrn.16 q12, q13
michael@0 88 vtrn.8 d24, d25
michael@0 89 vtrn.8 d26, d27
michael@0 90
michael@0 91 pld [r0, r1, lsl #2]
michael@0 92
michael@0 93 vmovl.u8 q8, d24
michael@0 94 vmovl.u8 q9, d25
michael@0 95 vmovl.u8 q10, d26
michael@0 96 vmovl.u8 q11, d27
michael@0 97
michael@0 98 ; save a few instructions in the inner loop
michael@0 99 vswp d17, d18
michael@0 100 vmov d23, d21
michael@0 101
michael@0 102 add r0, r0, #3
michael@0 103
michael@0 104 loop_horiz
michael@0 105 add r5, r0, #64
michael@0 106
michael@0 107 vld1.32 {d28[]}, [r0], r1
michael@0 108 vld1.32 {d29[]}, [r0], r1
michael@0 109 vld1.32 {d31[]}, [r0], r1
michael@0 110 vld1.32 {d30[]}, [r0], r8
michael@0 111
michael@0 112 pld [r5]
michael@0 113
michael@0 114 vtrn.16 d28, d31
michael@0 115 vtrn.16 d29, d30
michael@0 116 vtrn.8 d28, d29
michael@0 117 vtrn.8 d31, d30
michael@0 118
michael@0 119 pld [r5, r1]
michael@0 120
michael@0 121 ; extract to s16
michael@0 122 vtrn.32 q14, q15
michael@0 123 vmovl.u8 q12, d28
michael@0 124 vmovl.u8 q13, d29
michael@0 125
michael@0 126 pld [r5, r1, lsl #1]
michael@0 127
michael@0 128 ; slightly out of order load to match the existing data
michael@0 129 vld1.u32 {d6[0]}, [r2], r3
michael@0 130 vld1.u32 {d7[0]}, [r2], r3
michael@0 131 vld1.u32 {d6[1]}, [r2], r3
michael@0 132 vld1.u32 {d7[1]}, [r2], r3
michael@0 133
michael@0 134 sub r2, r2, r3, lsl #2 ; reset for store
michael@0 135
michael@0 136 ; src[] * filter_x
michael@0 137 MULTIPLY_BY_Q0 q1, d16, d17, d20, d22, d18, d19, d23, d24
michael@0 138 MULTIPLY_BY_Q0 q2, d17, d20, d22, d18, d19, d23, d24, d26
michael@0 139 MULTIPLY_BY_Q0 q14, d20, d22, d18, d19, d23, d24, d26, d27
michael@0 140 MULTIPLY_BY_Q0 q15, d22, d18, d19, d23, d24, d26, d27, d25
michael@0 141
michael@0 142 pld [r5, -r8]
michael@0 143
michael@0 144 ; += 64 >> 7
michael@0 145 vqrshrun.s32 d2, q1, #7
michael@0 146 vqrshrun.s32 d3, q2, #7
michael@0 147 vqrshrun.s32 d4, q14, #7
michael@0 148 vqrshrun.s32 d5, q15, #7
michael@0 149
michael@0 150 ; saturate
michael@0 151 vqmovn.u16 d2, q1
michael@0 152 vqmovn.u16 d3, q2
michael@0 153
michael@0 154 ; transpose
michael@0 155 vtrn.16 d2, d3
michael@0 156 vtrn.32 d2, d3
michael@0 157 vtrn.8 d2, d3
michael@0 158
michael@0 159 ; average the new value and the dst value
michael@0 160 vrhadd.u8 q1, q1, q3
michael@0 161
michael@0 162 vst1.u32 {d2[0]}, [r2@32], r3
michael@0 163 vst1.u32 {d3[0]}, [r2@32], r3
michael@0 164 vst1.u32 {d2[1]}, [r2@32], r3
michael@0 165 vst1.u32 {d3[1]}, [r2@32], r4
michael@0 166
michael@0 167 vmov q8, q9
michael@0 168 vmov d20, d23
michael@0 169 vmov q11, q12
michael@0 170 vmov q9, q13
michael@0 171
michael@0 172 subs r6, r6, #4 ; w -= 4
michael@0 173 bgt loop_horiz
michael@0 174
michael@0 175 ; outer loop
michael@0 176 mov r6, r10 ; restore w counter
michael@0 177 add r0, r0, r9 ; src += src_stride * 4 - w
michael@0 178 add r2, r2, r12 ; dst += dst_stride * 4 - w
michael@0 179 subs r7, r7, #4 ; h -= 4
michael@0 180 bgt loop_horiz_v
michael@0 181
michael@0 182 pop {r4-r10, pc}
michael@0 183
michael@0 184 ENDP
michael@0 185
michael@0 186 |vp9_convolve8_avg_vert_neon| PROC
michael@0 187 ldr r12, [sp, #12]
michael@0 188 cmp r12, #16
michael@0 189 bne vp9_convolve8_avg_vert_c
michael@0 190
michael@0 191 push {r4-r8, lr}
michael@0 192
michael@0 193 ; adjust for taps
michael@0 194 sub r0, r0, r1
michael@0 195 sub r0, r0, r1, lsl #1
michael@0 196
michael@0 197 ldr r4, [sp, #32] ; filter_y
michael@0 198 ldr r6, [sp, #40] ; w
michael@0 199 ldr lr, [sp, #44] ; h
michael@0 200
michael@0 201 vld1.s16 {q0}, [r4] ; filter_y
michael@0 202
michael@0 203 lsl r1, r1, #1
michael@0 204 lsl r3, r3, #1
michael@0 205
michael@0 206 loop_vert_h
michael@0 207 mov r4, r0
michael@0 208 add r7, r0, r1, asr #1
michael@0 209 mov r5, r2
michael@0 210 add r8, r2, r3, asr #1
michael@0 211 mov r12, lr ; h loop counter
michael@0 212
michael@0 213 vld1.u32 {d16[0]}, [r4], r1
michael@0 214 vld1.u32 {d16[1]}, [r7], r1
michael@0 215 vld1.u32 {d18[0]}, [r4], r1
michael@0 216 vld1.u32 {d18[1]}, [r7], r1
michael@0 217 vld1.u32 {d20[0]}, [r4], r1
michael@0 218 vld1.u32 {d20[1]}, [r7], r1
michael@0 219 vld1.u32 {d22[0]}, [r4], r1
michael@0 220
michael@0 221 vmovl.u8 q8, d16
michael@0 222 vmovl.u8 q9, d18
michael@0 223 vmovl.u8 q10, d20
michael@0 224 vmovl.u8 q11, d22
michael@0 225
michael@0 226 loop_vert
michael@0 227 ; always process a 4x4 block at a time
michael@0 228 vld1.u32 {d24[0]}, [r7], r1
michael@0 229 vld1.u32 {d26[0]}, [r4], r1
michael@0 230 vld1.u32 {d26[1]}, [r7], r1
michael@0 231 vld1.u32 {d24[1]}, [r4], r1
michael@0 232
michael@0 233 ; extract to s16
michael@0 234 vmovl.u8 q12, d24
michael@0 235 vmovl.u8 q13, d26
michael@0 236
michael@0 237 vld1.u32 {d6[0]}, [r5@32], r3
michael@0 238 vld1.u32 {d6[1]}, [r8@32], r3
michael@0 239 vld1.u32 {d7[0]}, [r5@32], r3
michael@0 240 vld1.u32 {d7[1]}, [r8@32], r3
michael@0 241
michael@0 242 pld [r7]
michael@0 243 pld [r4]
michael@0 244
michael@0 245 ; src[] * filter_y
michael@0 246 MULTIPLY_BY_Q0 q1, d16, d17, d18, d19, d20, d21, d22, d24
michael@0 247
michael@0 248 pld [r7, r1]
michael@0 249 pld [r4, r1]
michael@0 250
michael@0 251 MULTIPLY_BY_Q0 q2, d17, d18, d19, d20, d21, d22, d24, d26
michael@0 252
michael@0 253 pld [r5]
michael@0 254 pld [r8]
michael@0 255
michael@0 256 MULTIPLY_BY_Q0 q14, d18, d19, d20, d21, d22, d24, d26, d27
michael@0 257
michael@0 258 pld [r5, r3]
michael@0 259 pld [r8, r3]
michael@0 260
michael@0 261 MULTIPLY_BY_Q0 q15, d19, d20, d21, d22, d24, d26, d27, d25
michael@0 262
michael@0 263 ; += 64 >> 7
michael@0 264 vqrshrun.s32 d2, q1, #7
michael@0 265 vqrshrun.s32 d3, q2, #7
michael@0 266 vqrshrun.s32 d4, q14, #7
michael@0 267 vqrshrun.s32 d5, q15, #7
michael@0 268
michael@0 269 ; saturate
michael@0 270 vqmovn.u16 d2, q1
michael@0 271 vqmovn.u16 d3, q2
michael@0 272
michael@0 273 ; average the new value and the dst value
michael@0 274 vrhadd.u8 q1, q1, q3
michael@0 275
michael@0 276 sub r5, r5, r3, lsl #1 ; reset for store
michael@0 277 sub r8, r8, r3, lsl #1
michael@0 278
michael@0 279 vst1.u32 {d2[0]}, [r5@32], r3
michael@0 280 vst1.u32 {d2[1]}, [r8@32], r3
michael@0 281 vst1.u32 {d3[0]}, [r5@32], r3
michael@0 282 vst1.u32 {d3[1]}, [r8@32], r3
michael@0 283
michael@0 284 vmov q8, q10
michael@0 285 vmov d18, d22
michael@0 286 vmov d19, d24
michael@0 287 vmov q10, q13
michael@0 288 vmov d22, d25
michael@0 289
michael@0 290 subs r12, r12, #4 ; h -= 4
michael@0 291 bgt loop_vert
michael@0 292
michael@0 293 ; outer loop
michael@0 294 add r0, r0, #4
michael@0 295 add r2, r2, #4
michael@0 296 subs r6, r6, #4 ; w -= 4
michael@0 297 bgt loop_vert_h
michael@0 298
michael@0 299 pop {r4-r8, pc}
michael@0 300
michael@0 301 ENDP
michael@0 302 END

mercurial