Thu, 15 Jan 2015 15:59:08 +0100
Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | ; |
michael@0 | 2 | ; Copyright (c) 2013 The WebM project authors. All Rights Reserved. |
michael@0 | 3 | ; |
michael@0 | 4 | ; Use of this source code is governed by a BSD-style license |
michael@0 | 5 | ; that can be found in the LICENSE file in the root of the source |
michael@0 | 6 | ; tree. An additional intellectual property rights grant can be found |
michael@0 | 7 | ; in the file PATENTS. All contributing project authors may |
michael@0 | 8 | ; be found in the AUTHORS file in the root of the source tree. |
michael@0 | 9 | ; |
michael@0 | 10 | |
michael@0 | 11 | |
michael@0 | 12 | ; These functions are only valid when: |
michael@0 | 13 | ; x_step_q4 == 16 |
michael@0 | 14 | ; w%4 == 0 |
michael@0 | 15 | ; h%4 == 0 |
michael@0 | 16 | ; taps == 8 |
michael@0 | 17 | ; VP9_FILTER_WEIGHT == 128 |
michael@0 | 18 | ; VP9_FILTER_SHIFT == 7 |
michael@0 | 19 | |
michael@0 | 20 | EXPORT |vp9_convolve8_horiz_neon| |
michael@0 | 21 | EXPORT |vp9_convolve8_vert_neon| |
michael@0 | 22 | IMPORT |vp9_convolve8_horiz_c| |
michael@0 | 23 | IMPORT |vp9_convolve8_vert_c| |
michael@0 | 24 | ARM |
michael@0 | 25 | REQUIRE8 |
michael@0 | 26 | PRESERVE8 |
michael@0 | 27 | |
michael@0 | 28 | AREA ||.text||, CODE, READONLY, ALIGN=2 |
michael@0 | 29 | |
michael@0 | 30 | ; Multiply and accumulate by q0 |
michael@0 | 31 | MACRO |
michael@0 | 32 | MULTIPLY_BY_Q0 $dst, $src0, $src1, $src2, $src3, $src4, $src5, $src6, $src7 |
michael@0 | 33 | vmull.s16 $dst, $src0, d0[0] |
michael@0 | 34 | vmlal.s16 $dst, $src1, d0[1] |
michael@0 | 35 | vmlal.s16 $dst, $src2, d0[2] |
michael@0 | 36 | vmlal.s16 $dst, $src3, d0[3] |
michael@0 | 37 | vmlal.s16 $dst, $src4, d1[0] |
michael@0 | 38 | vmlal.s16 $dst, $src5, d1[1] |
michael@0 | 39 | vmlal.s16 $dst, $src6, d1[2] |
michael@0 | 40 | vmlal.s16 $dst, $src7, d1[3] |
michael@0 | 41 | MEND |
michael@0 | 42 | |
michael@0 | 43 | ; r0 const uint8_t *src |
michael@0 | 44 | ; r1 int src_stride |
michael@0 | 45 | ; r2 uint8_t *dst |
michael@0 | 46 | ; r3 int dst_stride |
michael@0 | 47 | ; sp[]const int16_t *filter_x |
michael@0 | 48 | ; sp[]int x_step_q4 |
michael@0 | 49 | ; sp[]const int16_t *filter_y ; unused |
michael@0 | 50 | ; sp[]int y_step_q4 ; unused |
michael@0 | 51 | ; sp[]int w |
michael@0 | 52 | ; sp[]int h |
michael@0 | 53 | |
michael@0 | 54 | |vp9_convolve8_horiz_neon| PROC |
michael@0 | 55 | ldr r12, [sp, #4] ; x_step_q4 |
michael@0 | 56 | cmp r12, #16 |
michael@0 | 57 | bne vp9_convolve8_horiz_c |
michael@0 | 58 | |
michael@0 | 59 | push {r4-r10, lr} |
michael@0 | 60 | |
michael@0 | 61 | sub r0, r0, #3 ; adjust for taps |
michael@0 | 62 | |
michael@0 | 63 | ldr r5, [sp, #32] ; filter_x |
michael@0 | 64 | ldr r6, [sp, #48] ; w |
michael@0 | 65 | ldr r7, [sp, #52] ; h |
michael@0 | 66 | |
michael@0 | 67 | vld1.s16 {q0}, [r5] ; filter_x |
michael@0 | 68 | |
michael@0 | 69 | sub r8, r1, r1, lsl #2 ; -src_stride * 3 |
michael@0 | 70 | add r8, r8, #4 ; -src_stride * 3 + 4 |
michael@0 | 71 | |
michael@0 | 72 | sub r4, r3, r3, lsl #2 ; -dst_stride * 3 |
michael@0 | 73 | add r4, r4, #4 ; -dst_stride * 3 + 4 |
michael@0 | 74 | |
michael@0 | 75 | rsb r9, r6, r1, lsl #2 ; reset src for outer loop |
michael@0 | 76 | sub r9, r9, #7 |
michael@0 | 77 | rsb r12, r6, r3, lsl #2 ; reset dst for outer loop |
michael@0 | 78 | |
michael@0 | 79 | mov r10, r6 ; w loop counter |
michael@0 | 80 | |
michael@0 | 81 | loop_horiz_v |
michael@0 | 82 | vld1.8 {d24}, [r0], r1 |
michael@0 | 83 | vld1.8 {d25}, [r0], r1 |
michael@0 | 84 | vld1.8 {d26}, [r0], r1 |
michael@0 | 85 | vld1.8 {d27}, [r0], r8 |
michael@0 | 86 | |
michael@0 | 87 | vtrn.16 q12, q13 |
michael@0 | 88 | vtrn.8 d24, d25 |
michael@0 | 89 | vtrn.8 d26, d27 |
michael@0 | 90 | |
michael@0 | 91 | pld [r0, r1, lsl #2] |
michael@0 | 92 | |
michael@0 | 93 | vmovl.u8 q8, d24 |
michael@0 | 94 | vmovl.u8 q9, d25 |
michael@0 | 95 | vmovl.u8 q10, d26 |
michael@0 | 96 | vmovl.u8 q11, d27 |
michael@0 | 97 | |
michael@0 | 98 | ; save a few instructions in the inner loop |
michael@0 | 99 | vswp d17, d18 |
michael@0 | 100 | vmov d23, d21 |
michael@0 | 101 | |
michael@0 | 102 | add r0, r0, #3 |
michael@0 | 103 | |
michael@0 | 104 | loop_horiz |
michael@0 | 105 | add r5, r0, #64 |
michael@0 | 106 | |
michael@0 | 107 | vld1.32 {d28[]}, [r0], r1 |
michael@0 | 108 | vld1.32 {d29[]}, [r0], r1 |
michael@0 | 109 | vld1.32 {d31[]}, [r0], r1 |
michael@0 | 110 | vld1.32 {d30[]}, [r0], r8 |
michael@0 | 111 | |
michael@0 | 112 | pld [r5] |
michael@0 | 113 | |
michael@0 | 114 | vtrn.16 d28, d31 |
michael@0 | 115 | vtrn.16 d29, d30 |
michael@0 | 116 | vtrn.8 d28, d29 |
michael@0 | 117 | vtrn.8 d31, d30 |
michael@0 | 118 | |
michael@0 | 119 | pld [r5, r1] |
michael@0 | 120 | |
michael@0 | 121 | ; extract to s16 |
michael@0 | 122 | vtrn.32 q14, q15 |
michael@0 | 123 | vmovl.u8 q12, d28 |
michael@0 | 124 | vmovl.u8 q13, d29 |
michael@0 | 125 | |
michael@0 | 126 | pld [r5, r1, lsl #1] |
michael@0 | 127 | |
michael@0 | 128 | ; src[] * filter_x |
michael@0 | 129 | MULTIPLY_BY_Q0 q1, d16, d17, d20, d22, d18, d19, d23, d24 |
michael@0 | 130 | MULTIPLY_BY_Q0 q2, d17, d20, d22, d18, d19, d23, d24, d26 |
michael@0 | 131 | MULTIPLY_BY_Q0 q14, d20, d22, d18, d19, d23, d24, d26, d27 |
michael@0 | 132 | MULTIPLY_BY_Q0 q15, d22, d18, d19, d23, d24, d26, d27, d25 |
michael@0 | 133 | |
michael@0 | 134 | pld [r5, -r8] |
michael@0 | 135 | |
michael@0 | 136 | ; += 64 >> 7 |
michael@0 | 137 | vqrshrun.s32 d2, q1, #7 |
michael@0 | 138 | vqrshrun.s32 d3, q2, #7 |
michael@0 | 139 | vqrshrun.s32 d4, q14, #7 |
michael@0 | 140 | vqrshrun.s32 d5, q15, #7 |
michael@0 | 141 | |
michael@0 | 142 | ; saturate |
michael@0 | 143 | vqmovn.u16 d2, q1 |
michael@0 | 144 | vqmovn.u16 d3, q2 |
michael@0 | 145 | |
michael@0 | 146 | ; transpose |
michael@0 | 147 | vtrn.16 d2, d3 |
michael@0 | 148 | vtrn.32 d2, d3 |
michael@0 | 149 | vtrn.8 d2, d3 |
michael@0 | 150 | |
michael@0 | 151 | vst1.u32 {d2[0]}, [r2@32], r3 |
michael@0 | 152 | vst1.u32 {d3[0]}, [r2@32], r3 |
michael@0 | 153 | vst1.u32 {d2[1]}, [r2@32], r3 |
michael@0 | 154 | vst1.u32 {d3[1]}, [r2@32], r4 |
michael@0 | 155 | |
michael@0 | 156 | vmov q8, q9 |
michael@0 | 157 | vmov d20, d23 |
michael@0 | 158 | vmov q11, q12 |
michael@0 | 159 | vmov q9, q13 |
michael@0 | 160 | |
michael@0 | 161 | subs r6, r6, #4 ; w -= 4 |
michael@0 | 162 | bgt loop_horiz |
michael@0 | 163 | |
michael@0 | 164 | ; outer loop |
michael@0 | 165 | mov r6, r10 ; restore w counter |
michael@0 | 166 | add r0, r0, r9 ; src += src_stride * 4 - w |
michael@0 | 167 | add r2, r2, r12 ; dst += dst_stride * 4 - w |
michael@0 | 168 | subs r7, r7, #4 ; h -= 4 |
michael@0 | 169 | bgt loop_horiz_v |
michael@0 | 170 | |
michael@0 | 171 | pop {r4-r10, pc} |
michael@0 | 172 | |
michael@0 | 173 | ENDP |
michael@0 | 174 | |
michael@0 | 175 | |vp9_convolve8_vert_neon| PROC |
michael@0 | 176 | ldr r12, [sp, #12] |
michael@0 | 177 | cmp r12, #16 |
michael@0 | 178 | bne vp9_convolve8_vert_c |
michael@0 | 179 | |
michael@0 | 180 | push {r4-r8, lr} |
michael@0 | 181 | |
michael@0 | 182 | ; adjust for taps |
michael@0 | 183 | sub r0, r0, r1 |
michael@0 | 184 | sub r0, r0, r1, lsl #1 |
michael@0 | 185 | |
michael@0 | 186 | ldr r4, [sp, #32] ; filter_y |
michael@0 | 187 | ldr r6, [sp, #40] ; w |
michael@0 | 188 | ldr lr, [sp, #44] ; h |
michael@0 | 189 | |
michael@0 | 190 | vld1.s16 {q0}, [r4] ; filter_y |
michael@0 | 191 | |
michael@0 | 192 | lsl r1, r1, #1 |
michael@0 | 193 | lsl r3, r3, #1 |
michael@0 | 194 | |
michael@0 | 195 | loop_vert_h |
michael@0 | 196 | mov r4, r0 |
michael@0 | 197 | add r7, r0, r1, asr #1 |
michael@0 | 198 | mov r5, r2 |
michael@0 | 199 | add r8, r2, r3, asr #1 |
michael@0 | 200 | mov r12, lr ; h loop counter |
michael@0 | 201 | |
michael@0 | 202 | vld1.u32 {d16[0]}, [r4], r1 |
michael@0 | 203 | vld1.u32 {d16[1]}, [r7], r1 |
michael@0 | 204 | vld1.u32 {d18[0]}, [r4], r1 |
michael@0 | 205 | vld1.u32 {d18[1]}, [r7], r1 |
michael@0 | 206 | vld1.u32 {d20[0]}, [r4], r1 |
michael@0 | 207 | vld1.u32 {d20[1]}, [r7], r1 |
michael@0 | 208 | vld1.u32 {d22[0]}, [r4], r1 |
michael@0 | 209 | |
michael@0 | 210 | vmovl.u8 q8, d16 |
michael@0 | 211 | vmovl.u8 q9, d18 |
michael@0 | 212 | vmovl.u8 q10, d20 |
michael@0 | 213 | vmovl.u8 q11, d22 |
michael@0 | 214 | |
michael@0 | 215 | loop_vert |
michael@0 | 216 | ; always process a 4x4 block at a time |
michael@0 | 217 | vld1.u32 {d24[0]}, [r7], r1 |
michael@0 | 218 | vld1.u32 {d26[0]}, [r4], r1 |
michael@0 | 219 | vld1.u32 {d26[1]}, [r7], r1 |
michael@0 | 220 | vld1.u32 {d24[1]}, [r4], r1 |
michael@0 | 221 | |
michael@0 | 222 | ; extract to s16 |
michael@0 | 223 | vmovl.u8 q12, d24 |
michael@0 | 224 | vmovl.u8 q13, d26 |
michael@0 | 225 | |
michael@0 | 226 | pld [r5] |
michael@0 | 227 | pld [r8] |
michael@0 | 228 | |
michael@0 | 229 | ; src[] * filter_y |
michael@0 | 230 | MULTIPLY_BY_Q0 q1, d16, d17, d18, d19, d20, d21, d22, d24 |
michael@0 | 231 | |
michael@0 | 232 | pld [r5, r3] |
michael@0 | 233 | pld [r8, r3] |
michael@0 | 234 | |
michael@0 | 235 | MULTIPLY_BY_Q0 q2, d17, d18, d19, d20, d21, d22, d24, d26 |
michael@0 | 236 | |
michael@0 | 237 | pld [r7] |
michael@0 | 238 | pld [r4] |
michael@0 | 239 | |
michael@0 | 240 | MULTIPLY_BY_Q0 q14, d18, d19, d20, d21, d22, d24, d26, d27 |
michael@0 | 241 | |
michael@0 | 242 | pld [r7, r1] |
michael@0 | 243 | pld [r4, r1] |
michael@0 | 244 | |
michael@0 | 245 | MULTIPLY_BY_Q0 q15, d19, d20, d21, d22, d24, d26, d27, d25 |
michael@0 | 246 | |
michael@0 | 247 | ; += 64 >> 7 |
michael@0 | 248 | vqrshrun.s32 d2, q1, #7 |
michael@0 | 249 | vqrshrun.s32 d3, q2, #7 |
michael@0 | 250 | vqrshrun.s32 d4, q14, #7 |
michael@0 | 251 | vqrshrun.s32 d5, q15, #7 |
michael@0 | 252 | |
michael@0 | 253 | ; saturate |
michael@0 | 254 | vqmovn.u16 d2, q1 |
michael@0 | 255 | vqmovn.u16 d3, q2 |
michael@0 | 256 | |
michael@0 | 257 | vst1.u32 {d2[0]}, [r5@32], r3 |
michael@0 | 258 | vst1.u32 {d2[1]}, [r8@32], r3 |
michael@0 | 259 | vst1.u32 {d3[0]}, [r5@32], r3 |
michael@0 | 260 | vst1.u32 {d3[1]}, [r8@32], r3 |
michael@0 | 261 | |
michael@0 | 262 | vmov q8, q10 |
michael@0 | 263 | vmov d18, d22 |
michael@0 | 264 | vmov d19, d24 |
michael@0 | 265 | vmov q10, q13 |
michael@0 | 266 | vmov d22, d25 |
michael@0 | 267 | |
michael@0 | 268 | subs r12, r12, #4 ; h -= 4 |
michael@0 | 269 | bgt loop_vert |
michael@0 | 270 | |
michael@0 | 271 | ; outer loop |
michael@0 | 272 | add r0, r0, #4 |
michael@0 | 273 | add r2, r2, #4 |
michael@0 | 274 | subs r6, r6, #4 ; w -= 4 |
michael@0 | 275 | bgt loop_vert_h |
michael@0 | 276 | |
michael@0 | 277 | pop {r4-r8, pc} |
michael@0 | 278 | |
michael@0 | 279 | ENDP |
michael@0 | 280 | END |