Thu, 15 Jan 2015 15:59:08 +0100
Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | ; |
michael@0 | 2 | ; Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
michael@0 | 3 | ; |
michael@0 | 4 | ; Use of this source code is governed by a BSD-style license |
michael@0 | 5 | ; that can be found in the LICENSE file in the root of the source |
michael@0 | 6 | ; tree. An additional intellectual property rights grant can be found |
michael@0 | 7 | ; in the file PATENTS. All contributing project authors may |
michael@0 | 8 | ; be found in the AUTHORS file in the root of the source tree. |
michael@0 | 9 | ; |
michael@0 | 10 | |
michael@0 | 11 | EXPORT |vp8_short_walsh4x4_armv6| |
michael@0 | 12 | |
michael@0 | 13 | ARM |
michael@0 | 14 | REQUIRE8 |
michael@0 | 15 | PRESERVE8 |
michael@0 | 16 | |
michael@0 | 17 | AREA |.text|, CODE, READONLY ; name this block of code |
michael@0 | 18 | |
michael@0 | 19 | ;short vp8_short_walsh4x4_armv6(short *input, short *output, int pitch) |
michael@0 | 20 | ; r0 short *input, |
michael@0 | 21 | ; r1 short *output, |
michael@0 | 22 | ; r2 int pitch |
michael@0 | 23 | |vp8_short_walsh4x4_armv6| PROC |
michael@0 | 24 | |
michael@0 | 25 | stmdb sp!, {r4 - r11, lr} |
michael@0 | 26 | |
michael@0 | 27 | ldrd r4, r5, [r0], r2 |
michael@0 | 28 | ldr lr, c00040004 |
michael@0 | 29 | ldrd r6, r7, [r0], r2 |
michael@0 | 30 | |
michael@0 | 31 | ; 0-3 |
michael@0 | 32 | qadd16 r3, r4, r5 ; [d1|a1] [1+3 | 0+2] |
michael@0 | 33 | qsub16 r4, r4, r5 ; [c1|b1] [1-3 | 0-2] |
michael@0 | 34 | |
michael@0 | 35 | ldrd r8, r9, [r0], r2 |
michael@0 | 36 | ; 4-7 |
michael@0 | 37 | qadd16 r5, r6, r7 ; [d1|a1] [5+7 | 4+6] |
michael@0 | 38 | qsub16 r6, r6, r7 ; [c1|b1] [5-7 | 4-6] |
michael@0 | 39 | |
michael@0 | 40 | ldrd r10, r11, [r0] |
michael@0 | 41 | ; 8-11 |
michael@0 | 42 | qadd16 r7, r8, r9 ; [d1|a1] [9+11 | 8+10] |
michael@0 | 43 | qsub16 r8, r8, r9 ; [c1|b1] [9-11 | 8-10] |
michael@0 | 44 | |
michael@0 | 45 | ; 12-15 |
michael@0 | 46 | qadd16 r9, r10, r11 ; [d1|a1] [13+15 | 12+14] |
michael@0 | 47 | qsub16 r10, r10, r11 ; [c1|b1] [13-15 | 12-14] |
michael@0 | 48 | |
michael@0 | 49 | |
michael@0 | 50 | lsls r2, r3, #16 |
michael@0 | 51 | smuad r11, r3, lr ; A0 = a1<<2 + d1<<2 |
michael@0 | 52 | addne r11, r11, #1 ; A0 += (a1!=0) |
michael@0 | 53 | |
michael@0 | 54 | lsls r2, r7, #16 |
michael@0 | 55 | smuad r12, r7, lr ; C0 = a1<<2 + d1<<2 |
michael@0 | 56 | addne r12, r12, #1 ; C0 += (a1!=0) |
michael@0 | 57 | |
michael@0 | 58 | add r0, r11, r12 ; a1_0 = A0 + C0 |
michael@0 | 59 | sub r11, r11, r12 ; b1_0 = A0 - C0 |
michael@0 | 60 | |
michael@0 | 61 | lsls r2, r5, #16 |
michael@0 | 62 | smuad r12, r5, lr ; B0 = a1<<2 + d1<<2 |
michael@0 | 63 | addne r12, r12, #1 ; B0 += (a1!=0) |
michael@0 | 64 | |
michael@0 | 65 | lsls r2, r9, #16 |
michael@0 | 66 | smuad r2, r9, lr ; D0 = a1<<2 + d1<<2 |
michael@0 | 67 | addne r2, r2, #1 ; D0 += (a1!=0) |
michael@0 | 68 | |
michael@0 | 69 | add lr, r12, r2 ; d1_0 = B0 + D0 |
michael@0 | 70 | sub r12, r12, r2 ; c1_0 = B0 - D0 |
michael@0 | 71 | |
michael@0 | 72 | ; op[0,4,8,12] |
michael@0 | 73 | adds r2, r0, lr ; a2 = a1_0 + d1_0 |
michael@0 | 74 | addmi r2, r2, #1 ; += a2 < 0 |
michael@0 | 75 | add r2, r2, #3 ; += 3 |
michael@0 | 76 | subs r0, r0, lr ; d2 = a1_0 - d1_0 |
michael@0 | 77 | mov r2, r2, asr #3 ; >> 3 |
michael@0 | 78 | strh r2, [r1] ; op[0] |
michael@0 | 79 | |
michael@0 | 80 | addmi r0, r0, #1 ; += a2 < 0 |
michael@0 | 81 | add r0, r0, #3 ; += 3 |
michael@0 | 82 | ldr lr, c00040004 |
michael@0 | 83 | mov r0, r0, asr #3 ; >> 3 |
michael@0 | 84 | strh r0, [r1, #24] ; op[12] |
michael@0 | 85 | |
michael@0 | 86 | adds r2, r11, r12 ; b2 = b1_0 + c1_0 |
michael@0 | 87 | addmi r2, r2, #1 ; += a2 < 0 |
michael@0 | 88 | add r2, r2, #3 ; += 3 |
michael@0 | 89 | subs r0, r11, r12 ; c2 = b1_0 - c1_0 |
michael@0 | 90 | mov r2, r2, asr #3 ; >> 3 |
michael@0 | 91 | strh r2, [r1, #8] ; op[4] |
michael@0 | 92 | |
michael@0 | 93 | addmi r0, r0, #1 ; += a2 < 0 |
michael@0 | 94 | add r0, r0, #3 ; += 3 |
michael@0 | 95 | smusd r3, r3, lr ; A3 = a1<<2 - d1<<2 |
michael@0 | 96 | smusd r7, r7, lr ; C3 = a1<<2 - d1<<2 |
michael@0 | 97 | mov r0, r0, asr #3 ; >> 3 |
michael@0 | 98 | strh r0, [r1, #16] ; op[8] |
michael@0 | 99 | |
michael@0 | 100 | |
michael@0 | 101 | ; op[3,7,11,15] |
michael@0 | 102 | add r0, r3, r7 ; a1_3 = A3 + C3 |
michael@0 | 103 | sub r3, r3, r7 ; b1_3 = A3 - C3 |
michael@0 | 104 | |
michael@0 | 105 | smusd r5, r5, lr ; B3 = a1<<2 - d1<<2 |
michael@0 | 106 | smusd r9, r9, lr ; D3 = a1<<2 - d1<<2 |
michael@0 | 107 | add r7, r5, r9 ; d1_3 = B3 + D3 |
michael@0 | 108 | sub r5, r5, r9 ; c1_3 = B3 - D3 |
michael@0 | 109 | |
michael@0 | 110 | adds r2, r0, r7 ; a2 = a1_3 + d1_3 |
michael@0 | 111 | addmi r2, r2, #1 ; += a2 < 0 |
michael@0 | 112 | add r2, r2, #3 ; += 3 |
michael@0 | 113 | adds r9, r3, r5 ; b2 = b1_3 + c1_3 |
michael@0 | 114 | mov r2, r2, asr #3 ; >> 3 |
michael@0 | 115 | strh r2, [r1, #6] ; op[3] |
michael@0 | 116 | |
michael@0 | 117 | addmi r9, r9, #1 ; += a2 < 0 |
michael@0 | 118 | add r9, r9, #3 ; += 3 |
michael@0 | 119 | subs r2, r3, r5 ; c2 = b1_3 - c1_3 |
michael@0 | 120 | mov r9, r9, asr #3 ; >> 3 |
michael@0 | 121 | strh r9, [r1, #14] ; op[7] |
michael@0 | 122 | |
michael@0 | 123 | addmi r2, r2, #1 ; += a2 < 0 |
michael@0 | 124 | add r2, r2, #3 ; += 3 |
michael@0 | 125 | subs r9, r0, r7 ; d2 = a1_3 - d1_3 |
michael@0 | 126 | mov r2, r2, asr #3 ; >> 3 |
michael@0 | 127 | strh r2, [r1, #22] ; op[11] |
michael@0 | 128 | |
michael@0 | 129 | addmi r9, r9, #1 ; += a2 < 0 |
michael@0 | 130 | add r9, r9, #3 ; += 3 |
michael@0 | 131 | smuad r3, r4, lr ; A1 = b1<<2 + c1<<2 |
michael@0 | 132 | smuad r5, r8, lr ; C1 = b1<<2 + c1<<2 |
michael@0 | 133 | mov r9, r9, asr #3 ; >> 3 |
michael@0 | 134 | strh r9, [r1, #30] ; op[15] |
michael@0 | 135 | |
michael@0 | 136 | ; op[1,5,9,13] |
michael@0 | 137 | add r0, r3, r5 ; a1_1 = A1 + C1 |
michael@0 | 138 | sub r3, r3, r5 ; b1_1 = A1 - C1 |
michael@0 | 139 | |
michael@0 | 140 | smuad r7, r6, lr ; B1 = b1<<2 + c1<<2 |
michael@0 | 141 | smuad r9, r10, lr ; D1 = b1<<2 + c1<<2 |
michael@0 | 142 | add r5, r7, r9 ; d1_1 = B1 + D1 |
michael@0 | 143 | sub r7, r7, r9 ; c1_1 = B1 - D1 |
michael@0 | 144 | |
michael@0 | 145 | adds r2, r0, r5 ; a2 = a1_1 + d1_1 |
michael@0 | 146 | addmi r2, r2, #1 ; += a2 < 0 |
michael@0 | 147 | add r2, r2, #3 ; += 3 |
michael@0 | 148 | adds r9, r3, r7 ; b2 = b1_1 + c1_1 |
michael@0 | 149 | mov r2, r2, asr #3 ; >> 3 |
michael@0 | 150 | strh r2, [r1, #2] ; op[1] |
michael@0 | 151 | |
michael@0 | 152 | addmi r9, r9, #1 ; += a2 < 0 |
michael@0 | 153 | add r9, r9, #3 ; += 3 |
michael@0 | 154 | subs r2, r3, r7 ; c2 = b1_1 - c1_1 |
michael@0 | 155 | mov r9, r9, asr #3 ; >> 3 |
michael@0 | 156 | strh r9, [r1, #10] ; op[5] |
michael@0 | 157 | |
michael@0 | 158 | addmi r2, r2, #1 ; += a2 < 0 |
michael@0 | 159 | add r2, r2, #3 ; += 3 |
michael@0 | 160 | subs r9, r0, r5 ; d2 = a1_1 - d1_1 |
michael@0 | 161 | mov r2, r2, asr #3 ; >> 3 |
michael@0 | 162 | strh r2, [r1, #18] ; op[9] |
michael@0 | 163 | |
michael@0 | 164 | addmi r9, r9, #1 ; += a2 < 0 |
michael@0 | 165 | add r9, r9, #3 ; += 3 |
michael@0 | 166 | smusd r4, r4, lr ; A2 = b1<<2 - c1<<2 |
michael@0 | 167 | smusd r8, r8, lr ; C2 = b1<<2 - c1<<2 |
michael@0 | 168 | mov r9, r9, asr #3 ; >> 3 |
michael@0 | 169 | strh r9, [r1, #26] ; op[13] |
michael@0 | 170 | |
michael@0 | 171 | |
michael@0 | 172 | ; op[2,6,10,14] |
michael@0 | 173 | add r11, r4, r8 ; a1_2 = A2 + C2 |
michael@0 | 174 | sub r12, r4, r8 ; b1_2 = A2 - C2 |
michael@0 | 175 | |
michael@0 | 176 | smusd r6, r6, lr ; B2 = b1<<2 - c1<<2 |
michael@0 | 177 | smusd r10, r10, lr ; D2 = b1<<2 - c1<<2 |
michael@0 | 178 | add r4, r6, r10 ; d1_2 = B2 + D2 |
michael@0 | 179 | sub r8, r6, r10 ; c1_2 = B2 - D2 |
michael@0 | 180 | |
michael@0 | 181 | adds r2, r11, r4 ; a2 = a1_2 + d1_2 |
michael@0 | 182 | addmi r2, r2, #1 ; += a2 < 0 |
michael@0 | 183 | add r2, r2, #3 ; += 3 |
michael@0 | 184 | adds r9, r12, r8 ; b2 = b1_2 + c1_2 |
michael@0 | 185 | mov r2, r2, asr #3 ; >> 3 |
michael@0 | 186 | strh r2, [r1, #4] ; op[2] |
michael@0 | 187 | |
michael@0 | 188 | addmi r9, r9, #1 ; += a2 < 0 |
michael@0 | 189 | add r9, r9, #3 ; += 3 |
michael@0 | 190 | subs r2, r12, r8 ; c2 = b1_2 - c1_2 |
michael@0 | 191 | mov r9, r9, asr #3 ; >> 3 |
michael@0 | 192 | strh r9, [r1, #12] ; op[6] |
michael@0 | 193 | |
michael@0 | 194 | addmi r2, r2, #1 ; += a2 < 0 |
michael@0 | 195 | add r2, r2, #3 ; += 3 |
michael@0 | 196 | subs r9, r11, r4 ; d2 = a1_2 - d1_2 |
michael@0 | 197 | mov r2, r2, asr #3 ; >> 3 |
michael@0 | 198 | strh r2, [r1, #20] ; op[10] |
michael@0 | 199 | |
michael@0 | 200 | addmi r9, r9, #1 ; += a2 < 0 |
michael@0 | 201 | add r9, r9, #3 ; += 3 |
michael@0 | 202 | mov r9, r9, asr #3 ; >> 3 |
michael@0 | 203 | strh r9, [r1, #28] ; op[14] |
michael@0 | 204 | |
michael@0 | 205 | |
michael@0 | 206 | ldmia sp!, {r4 - r11, pc} |
michael@0 | 207 | ENDP ; |vp8_short_walsh4x4_armv6| |
michael@0 | 208 | |
michael@0 | 209 | c00040004 |
michael@0 | 210 | DCD 0x00040004 |
michael@0 | 211 | |
michael@0 | 212 | END |