Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
michael@0 | 1 | /* |
michael@0 | 2 | * Copyright 2012 The Android Open Source Project |
michael@0 | 3 | * |
michael@0 | 4 | * Use of this source code is governed by a BSD-style license that can be |
michael@0 | 5 | * found in the LICENSE file. |
michael@0 | 6 | */ |
michael@0 | 7 | |
michael@0 | 8 | #include "SkBlitRow.h" |
michael@0 | 9 | #include "SkColorPriv.h" |
michael@0 | 10 | #include "SkDither.h" |
michael@0 | 11 | #include "SkMathPriv.h" |
michael@0 | 12 | #include "SkUtils.h" |
michael@0 | 13 | #include "SkUtilsArm.h" |
michael@0 | 14 | |
michael@0 | 15 | #include "SkCachePreload_arm.h" |
michael@0 | 16 | |
michael@0 | 17 | // Define USE_NEON_CODE to indicate that we need to build NEON routines |
michael@0 | 18 | #define USE_NEON_CODE (!SK_ARM_NEON_IS_NONE) |
michael@0 | 19 | |
michael@0 | 20 | // Define USE_ARM_CODE to indicate that we need to build ARM routines |
michael@0 | 21 | #define USE_ARM_CODE (!SK_ARM_NEON_IS_ALWAYS) |
michael@0 | 22 | |
michael@0 | 23 | #if USE_NEON_CODE |
michael@0 | 24 | #include "SkBlitRow_opts_arm_neon.h" |
michael@0 | 25 | #endif |
michael@0 | 26 | |
michael@0 | 27 | #if USE_ARM_CODE |
michael@0 | 28 | |
michael@0 | 29 | static void S32A_D565_Opaque(uint16_t* SK_RESTRICT dst, |
michael@0 | 30 | const SkPMColor* SK_RESTRICT src, int count, |
michael@0 | 31 | U8CPU alpha, int /*x*/, int /*y*/) { |
michael@0 | 32 | SkASSERT(255 == alpha); |
michael@0 | 33 | |
michael@0 | 34 | asm volatile ( |
michael@0 | 35 | "1: \n\t" |
michael@0 | 36 | "ldr r3, [%[src]], #4 \n\t" |
michael@0 | 37 | "cmp r3, #0xff000000 \n\t" |
michael@0 | 38 | "blo 2f \n\t" |
michael@0 | 39 | "and r4, r3, #0x0000f8 \n\t" |
michael@0 | 40 | "and r5, r3, #0x00fc00 \n\t" |
michael@0 | 41 | "and r6, r3, #0xf80000 \n\t" |
michael@0 | 42 | #ifdef SK_ARM_HAS_EDSP |
michael@0 | 43 | "pld [r1, #32] \n\t" |
michael@0 | 44 | #endif |
michael@0 | 45 | "lsl r3, r4, #8 \n\t" |
michael@0 | 46 | "orr r3, r3, r5, lsr #5 \n\t" |
michael@0 | 47 | "orr r3, r3, r6, lsr #19 \n\t" |
michael@0 | 48 | "subs %[count], %[count], #1 \n\t" |
michael@0 | 49 | "strh r3, [%[dst]], #2 \n\t" |
michael@0 | 50 | "bne 1b \n\t" |
michael@0 | 51 | "b 4f \n\t" |
michael@0 | 52 | "2: \n\t" |
michael@0 | 53 | "lsrs r7, r3, #24 \n\t" |
michael@0 | 54 | "beq 3f \n\t" |
michael@0 | 55 | "ldrh r4, [%[dst]] \n\t" |
michael@0 | 56 | "rsb r7, r7, #255 \n\t" |
michael@0 | 57 | "and r6, r4, #0x001f \n\t" |
michael@0 | 58 | #if SK_ARM_ARCH <= 6 |
michael@0 | 59 | "lsl r5, r4, #21 \n\t" |
michael@0 | 60 | "lsr r5, r5, #26 \n\t" |
michael@0 | 61 | #else |
michael@0 | 62 | "ubfx r5, r4, #5, #6 \n\t" |
michael@0 | 63 | #endif |
michael@0 | 64 | #ifdef SK_ARM_HAS_EDSP |
michael@0 | 65 | "pld [r0, #16] \n\t" |
michael@0 | 66 | #endif |
michael@0 | 67 | "lsr r4, r4, #11 \n\t" |
michael@0 | 68 | #ifdef SK_ARM_HAS_EDSP |
michael@0 | 69 | "smulbb r6, r6, r7 \n\t" |
michael@0 | 70 | "smulbb r5, r5, r7 \n\t" |
michael@0 | 71 | "smulbb r4, r4, r7 \n\t" |
michael@0 | 72 | #else |
michael@0 | 73 | "mul r6, r6, r7 \n\t" |
michael@0 | 74 | "mul r5, r5, r7 \n\t" |
michael@0 | 75 | "mul r4, r4, r7 \n\t" |
michael@0 | 76 | #endif |
michael@0 | 77 | #if SK_ARM_ARCH >= 6 |
michael@0 | 78 | "uxtb r7, r3, ROR #16 \n\t" |
michael@0 | 79 | "uxtb ip, r3, ROR #8 \n\t" |
michael@0 | 80 | #else |
michael@0 | 81 | "mov ip, #0xff \n\t" |
michael@0 | 82 | "and r7, ip, r3, ROR #16 \n\t" |
michael@0 | 83 | "and ip, ip, r3, ROR #8 \n\t" |
michael@0 | 84 | #endif |
michael@0 | 85 | "and r3, r3, #0xff \n\t" |
michael@0 | 86 | "add r6, r6, #16 \n\t" |
michael@0 | 87 | "add r5, r5, #32 \n\t" |
michael@0 | 88 | "add r4, r4, #16 \n\t" |
michael@0 | 89 | "add r6, r6, r6, lsr #5 \n\t" |
michael@0 | 90 | "add r5, r5, r5, lsr #6 \n\t" |
michael@0 | 91 | "add r4, r4, r4, lsr #5 \n\t" |
michael@0 | 92 | "add r6, r7, r6, lsr #5 \n\t" |
michael@0 | 93 | "add r5, ip, r5, lsr #6 \n\t" |
michael@0 | 94 | "add r4, r3, r4, lsr #5 \n\t" |
michael@0 | 95 | "lsr r6, r6, #3 \n\t" |
michael@0 | 96 | "and r5, r5, #0xfc \n\t" |
michael@0 | 97 | "and r4, r4, #0xf8 \n\t" |
michael@0 | 98 | "orr r6, r6, r5, lsl #3 \n\t" |
michael@0 | 99 | "orr r4, r6, r4, lsl #8 \n\t" |
michael@0 | 100 | "strh r4, [%[dst]], #2 \n\t" |
michael@0 | 101 | #ifdef SK_ARM_HAS_EDSP |
michael@0 | 102 | "pld [r1, #32] \n\t" |
michael@0 | 103 | #endif |
michael@0 | 104 | "subs %[count], %[count], #1 \n\t" |
michael@0 | 105 | "bne 1b \n\t" |
michael@0 | 106 | "b 4f \n\t" |
michael@0 | 107 | "3: \n\t" |
michael@0 | 108 | "subs %[count], %[count], #1 \n\t" |
michael@0 | 109 | "add %[dst], %[dst], #2 \n\t" |
michael@0 | 110 | "bne 1b \n\t" |
michael@0 | 111 | "4: \n\t" |
michael@0 | 112 | : [dst] "+r" (dst), [src] "+r" (src), [count] "+r" (count) |
michael@0 | 113 | : |
michael@0 | 114 | : "memory", "cc", "r3", "r4", "r5", "r6", "r7", "ip" |
michael@0 | 115 | ); |
michael@0 | 116 | } |
michael@0 | 117 | |
michael@0 | 118 | static void S32A_Opaque_BlitRow32_arm(SkPMColor* SK_RESTRICT dst, |
michael@0 | 119 | const SkPMColor* SK_RESTRICT src, |
michael@0 | 120 | int count, U8CPU alpha) { |
michael@0 | 121 | |
michael@0 | 122 | SkASSERT(255 == alpha); |
michael@0 | 123 | |
michael@0 | 124 | asm volatile ( |
michael@0 | 125 | "cmp %[count], #0 \n\t" /* comparing count with 0 */ |
michael@0 | 126 | "beq 3f \n\t" /* if zero exit */ |
michael@0 | 127 | |
michael@0 | 128 | "mov ip, #0xff \n\t" /* load the 0xff mask in ip */ |
michael@0 | 129 | "orr ip, ip, ip, lsl #16 \n\t" /* convert it to 0xff00ff in ip */ |
michael@0 | 130 | |
michael@0 | 131 | "cmp %[count], #2 \n\t" /* compare count with 2 */ |
michael@0 | 132 | "blt 2f \n\t" /* if less than 2 -> single loop */ |
michael@0 | 133 | |
michael@0 | 134 | /* Double Loop */ |
michael@0 | 135 | "1: \n\t" /* <double loop> */ |
michael@0 | 136 | "ldm %[src]!, {r5,r6} \n\t" /* load the src(s) at r5-r6 */ |
michael@0 | 137 | "ldm %[dst], {r7,r8} \n\t" /* loading dst(s) into r7-r8 */ |
michael@0 | 138 | "lsr r4, r5, #24 \n\t" /* extracting the alpha from source and storing it to r4 */ |
michael@0 | 139 | |
michael@0 | 140 | /* ----------- */ |
michael@0 | 141 | "and r9, ip, r7 \n\t" /* r9 = br masked by ip */ |
michael@0 | 142 | "rsb r4, r4, #256 \n\t" /* subtracting the alpha from 256 -> r4=scale */ |
michael@0 | 143 | "and r10, ip, r7, lsr #8 \n\t" /* r10 = ag masked by ip */ |
michael@0 | 144 | |
michael@0 | 145 | "mul r9, r9, r4 \n\t" /* br = br * scale */ |
michael@0 | 146 | "mul r10, r10, r4 \n\t" /* ag = ag * scale */ |
michael@0 | 147 | "and r9, ip, r9, lsr #8 \n\t" /* lsr br by 8 and mask it */ |
michael@0 | 148 | |
michael@0 | 149 | "and r10, r10, ip, lsl #8 \n\t" /* mask ag with reverse mask */ |
michael@0 | 150 | "lsr r4, r6, #24 \n\t" /* extracting the alpha from source and storing it to r4 */ |
michael@0 | 151 | "orr r7, r9, r10 \n\t" /* br | ag*/ |
michael@0 | 152 | |
michael@0 | 153 | "add r7, r5, r7 \n\t" /* dst = src + calc dest(r7) */ |
michael@0 | 154 | "rsb r4, r4, #256 \n\t" /* subtracting the alpha from 255 -> r4=scale */ |
michael@0 | 155 | |
michael@0 | 156 | /* ----------- */ |
michael@0 | 157 | "and r9, ip, r8 \n\t" /* r9 = br masked by ip */ |
michael@0 | 158 | |
michael@0 | 159 | "and r10, ip, r8, lsr #8 \n\t" /* r10 = ag masked by ip */ |
michael@0 | 160 | "mul r9, r9, r4 \n\t" /* br = br * scale */ |
michael@0 | 161 | "sub %[count], %[count], #2 \n\t" |
michael@0 | 162 | "mul r10, r10, r4 \n\t" /* ag = ag * scale */ |
michael@0 | 163 | |
michael@0 | 164 | "and r9, ip, r9, lsr #8 \n\t" /* lsr br by 8 and mask it */ |
michael@0 | 165 | "and r10, r10, ip, lsl #8 \n\t" /* mask ag with reverse mask */ |
michael@0 | 166 | "cmp %[count], #1 \n\t" /* comparing count with 1 */ |
michael@0 | 167 | "orr r8, r9, r10 \n\t" /* br | ag */ |
michael@0 | 168 | |
michael@0 | 169 | "add r8, r6, r8 \n\t" /* dst = src + calc dest(r8) */ |
michael@0 | 170 | |
michael@0 | 171 | /* ----------------- */ |
michael@0 | 172 | "stm %[dst]!, {r7,r8} \n\t" /* *dst = r7, increment dst by two (each times 4) */ |
michael@0 | 173 | /* ----------------- */ |
michael@0 | 174 | |
michael@0 | 175 | "bgt 1b \n\t" /* if greater than 1 -> reloop */ |
michael@0 | 176 | "blt 3f \n\t" /* if less than 1 -> exit */ |
michael@0 | 177 | |
michael@0 | 178 | /* Single Loop */ |
michael@0 | 179 | "2: \n\t" /* <single loop> */ |
michael@0 | 180 | "ldr r5, [%[src]], #4 \n\t" /* load the src pointer into r5 r5=src */ |
michael@0 | 181 | "ldr r7, [%[dst]] \n\t" /* loading dst into r7 */ |
michael@0 | 182 | "lsr r4, r5, #24 \n\t" /* extracting the alpha from source and storing it to r4 */ |
michael@0 | 183 | |
michael@0 | 184 | /* ----------- */ |
michael@0 | 185 | "and r9, ip, r7 \n\t" /* r9 = br masked by ip */ |
michael@0 | 186 | "rsb r4, r4, #256 \n\t" /* subtracting the alpha from 256 -> r4=scale */ |
michael@0 | 187 | |
michael@0 | 188 | "and r10, ip, r7, lsr #8 \n\t" /* r10 = ag masked by ip */ |
michael@0 | 189 | "mul r9, r9, r4 \n\t" /* br = br * scale */ |
michael@0 | 190 | "mul r10, r10, r4 \n\t" /* ag = ag * scale */ |
michael@0 | 191 | "and r9, ip, r9, lsr #8 \n\t" /* lsr br by 8 and mask it */ |
michael@0 | 192 | |
michael@0 | 193 | "and r10, r10, ip, lsl #8 \n\t" /* mask ag */ |
michael@0 | 194 | "orr r7, r9, r10 \n\t" /* br | ag */ |
michael@0 | 195 | |
michael@0 | 196 | "add r7, r5, r7 \n\t" /* *dst = src + calc dest(r7) */ |
michael@0 | 197 | |
michael@0 | 198 | /* ----------------- */ |
michael@0 | 199 | "str r7, [%[dst]], #4 \n\t" /* *dst = r7, increment dst by one (times 4) */ |
michael@0 | 200 | /* ----------------- */ |
michael@0 | 201 | |
michael@0 | 202 | "3: \n\t" /* <exit> */ |
michael@0 | 203 | : [dst] "+r" (dst), [src] "+r" (src), [count] "+r" (count) |
michael@0 | 204 | : |
michael@0 | 205 | : "cc", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "ip", "memory" |
michael@0 | 206 | ); |
michael@0 | 207 | } |
michael@0 | 208 | |
michael@0 | 209 | /* |
michael@0 | 210 | * ARM asm version of S32A_Blend_BlitRow32 |
michael@0 | 211 | */ |
michael@0 | 212 | void S32A_Blend_BlitRow32_arm(SkPMColor* SK_RESTRICT dst, |
michael@0 | 213 | const SkPMColor* SK_RESTRICT src, |
michael@0 | 214 | int count, U8CPU alpha) { |
michael@0 | 215 | asm volatile ( |
michael@0 | 216 | "cmp %[count], #0 \n\t" /* comparing count with 0 */ |
michael@0 | 217 | "beq 3f \n\t" /* if zero exit */ |
michael@0 | 218 | |
michael@0 | 219 | "mov r12, #0xff \n\t" /* load the 0xff mask in r12 */ |
michael@0 | 220 | "orr r12, r12, r12, lsl #16 \n\t" /* convert it to 0xff00ff in r12 */ |
michael@0 | 221 | |
michael@0 | 222 | /* src1,2_scale */ |
michael@0 | 223 | "add %[alpha], %[alpha], #1 \n\t" /* loading %[alpha]=src_scale=alpha+1 */ |
michael@0 | 224 | |
michael@0 | 225 | "cmp %[count], #2 \n\t" /* comparing count with 2 */ |
michael@0 | 226 | "blt 2f \n\t" /* if less than 2 -> single loop */ |
michael@0 | 227 | |
michael@0 | 228 | /* Double Loop */ |
michael@0 | 229 | "1: \n\t" /* <double loop> */ |
michael@0 | 230 | "ldm %[src]!, {r5, r6} \n\t" /* loading src pointers into r5 and r6 */ |
michael@0 | 231 | "ldm %[dst], {r7, r8} \n\t" /* loading dst pointers into r7 and r8 */ |
michael@0 | 232 | |
michael@0 | 233 | /* dst1_scale and dst2_scale*/ |
michael@0 | 234 | "lsr r9, r5, #24 \n\t" /* src >> 24 */ |
michael@0 | 235 | "lsr r10, r6, #24 \n\t" /* src >> 24 */ |
michael@0 | 236 | #ifdef SK_ARM_HAS_EDSP |
michael@0 | 237 | "smulbb r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */ |
michael@0 | 238 | "smulbb r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */ |
michael@0 | 239 | #else |
michael@0 | 240 | "mul r9, r9, %[alpha] \n\t" /* r9 = SkMulS16 r9 with src_scale */ |
michael@0 | 241 | "mul r10, r10, %[alpha] \n\t" /* r10 = SkMulS16 r10 with src_scale */ |
michael@0 | 242 | #endif |
michael@0 | 243 | "lsr r9, r9, #8 \n\t" /* r9 >> 8 */ |
michael@0 | 244 | "lsr r10, r10, #8 \n\t" /* r10 >> 8 */ |
michael@0 | 245 | "rsb r9, r9, #256 \n\t" /* dst1_scale = r9 = 255 - r9 + 1 */ |
michael@0 | 246 | "rsb r10, r10, #256 \n\t" /* dst2_scale = r10 = 255 - r10 + 1 */ |
michael@0 | 247 | |
michael@0 | 248 | /* ---------------------- */ |
michael@0 | 249 | |
michael@0 | 250 | /* src1, src1_scale */ |
michael@0 | 251 | "and r11, r12, r5, lsr #8 \n\t" /* ag = r11 = r5 masked by r12 lsr by #8 */ |
michael@0 | 252 | "and r4, r12, r5 \n\t" /* rb = r4 = r5 masked by r12 */ |
michael@0 | 253 | "mul r11, r11, %[alpha] \n\t" /* ag = r11 times src_scale */ |
michael@0 | 254 | "mul r4, r4, %[alpha] \n\t" /* rb = r4 times src_scale */ |
michael@0 | 255 | "and r11, r11, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */ |
michael@0 | 256 | "and r4, r12, r4, lsr #8 \n\t" /* rb masked by mask (r12) */ |
michael@0 | 257 | "orr r5, r11, r4 \n\t" /* r5 = (src1, src_scale) */ |
michael@0 | 258 | |
michael@0 | 259 | /* dst1, dst1_scale */ |
michael@0 | 260 | "and r11, r12, r7, lsr #8 \n\t" /* ag = r11 = r7 masked by r12 lsr by #8 */ |
michael@0 | 261 | "and r4, r12, r7 \n\t" /* rb = r4 = r7 masked by r12 */ |
michael@0 | 262 | "mul r11, r11, r9 \n\t" /* ag = r11 times dst_scale (r9) */ |
michael@0 | 263 | "mul r4, r4, r9 \n\t" /* rb = r4 times dst_scale (r9) */ |
michael@0 | 264 | "and r11, r11, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */ |
michael@0 | 265 | "and r4, r12, r4, lsr #8 \n\t" /* rb masked by mask (r12) */ |
michael@0 | 266 | "orr r9, r11, r4 \n\t" /* r9 = (dst1, dst_scale) */ |
michael@0 | 267 | |
michael@0 | 268 | /* ---------------------- */ |
michael@0 | 269 | "add r9, r5, r9 \n\t" /* *dst = src plus dst both scaled */ |
michael@0 | 270 | /* ---------------------- */ |
michael@0 | 271 | |
michael@0 | 272 | /* ====================== */ |
michael@0 | 273 | |
michael@0 | 274 | /* src2, src2_scale */ |
michael@0 | 275 | "and r11, r12, r6, lsr #8 \n\t" /* ag = r11 = r6 masked by r12 lsr by #8 */ |
michael@0 | 276 | "and r4, r12, r6 \n\t" /* rb = r4 = r6 masked by r12 */ |
michael@0 | 277 | "mul r11, r11, %[alpha] \n\t" /* ag = r11 times src_scale */ |
michael@0 | 278 | "mul r4, r4, %[alpha] \n\t" /* rb = r4 times src_scale */ |
michael@0 | 279 | "and r11, r11, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */ |
michael@0 | 280 | "and r4, r12, r4, lsr #8 \n\t" /* rb masked by mask (r12) */ |
michael@0 | 281 | "orr r6, r11, r4 \n\t" /* r6 = (src2, src_scale) */ |
michael@0 | 282 | |
michael@0 | 283 | /* dst2, dst2_scale */ |
michael@0 | 284 | "and r11, r12, r8, lsr #8 \n\t" /* ag = r11 = r8 masked by r12 lsr by #8 */ |
michael@0 | 285 | "and r4, r12, r8 \n\t" /* rb = r4 = r8 masked by r12 */ |
michael@0 | 286 | "mul r11, r11, r10 \n\t" /* ag = r11 times dst_scale (r10) */ |
michael@0 | 287 | "mul r4, r4, r10 \n\t" /* rb = r4 times dst_scale (r6) */ |
michael@0 | 288 | "and r11, r11, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */ |
michael@0 | 289 | "and r4, r12, r4, lsr #8 \n\t" /* rb masked by mask (r12) */ |
michael@0 | 290 | "orr r10, r11, r4 \n\t" /* r10 = (dst2, dst_scale) */ |
michael@0 | 291 | |
michael@0 | 292 | "sub %[count], %[count], #2 \n\t" /* decrease count by 2 */ |
michael@0 | 293 | /* ---------------------- */ |
michael@0 | 294 | "add r10, r6, r10 \n\t" /* *dst = src plus dst both scaled */ |
michael@0 | 295 | /* ---------------------- */ |
michael@0 | 296 | "cmp %[count], #1 \n\t" /* compare count with 1 */ |
michael@0 | 297 | /* ----------------- */ |
michael@0 | 298 | "stm %[dst]!, {r9, r10} \n\t" /* copy r9 and r10 to r7 and r8 respectively */ |
michael@0 | 299 | /* ----------------- */ |
michael@0 | 300 | |
michael@0 | 301 | "bgt 1b \n\t" /* if %[count] greater than 1 reloop */ |
michael@0 | 302 | "blt 3f \n\t" /* if %[count] less than 1 exit */ |
michael@0 | 303 | /* else get into the single loop */ |
michael@0 | 304 | /* Single Loop */ |
michael@0 | 305 | "2: \n\t" /* <single loop> */ |
michael@0 | 306 | "ldr r5, [%[src]], #4 \n\t" /* loading src pointer into r5: r5=src */ |
michael@0 | 307 | "ldr r7, [%[dst]] \n\t" /* loading dst pointer into r7: r7=dst */ |
michael@0 | 308 | |
michael@0 | 309 | "lsr r6, r5, #24 \n\t" /* src >> 24 */ |
michael@0 | 310 | "and r8, r12, r5, lsr #8 \n\t" /* ag = r8 = r5 masked by r12 lsr by #8 */ |
michael@0 | 311 | #ifdef SK_ARM_HAS_EDSP |
michael@0 | 312 | "smulbb r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */ |
michael@0 | 313 | #else |
michael@0 | 314 | "mul r6, r6, %[alpha] \n\t" /* r6 = SkMulS16 with src_scale */ |
michael@0 | 315 | #endif |
michael@0 | 316 | "and r9, r12, r5 \n\t" /* rb = r9 = r5 masked by r12 */ |
michael@0 | 317 | "lsr r6, r6, #8 \n\t" /* r6 >> 8 */ |
michael@0 | 318 | "mul r8, r8, %[alpha] \n\t" /* ag = r8 times scale */ |
michael@0 | 319 | "rsb r6, r6, #256 \n\t" /* r6 = 255 - r6 + 1 */ |
michael@0 | 320 | |
michael@0 | 321 | /* src, src_scale */ |
michael@0 | 322 | "mul r9, r9, %[alpha] \n\t" /* rb = r9 times scale */ |
michael@0 | 323 | "and r8, r8, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */ |
michael@0 | 324 | "and r9, r12, r9, lsr #8 \n\t" /* rb masked by mask (r12) */ |
michael@0 | 325 | "orr r10, r8, r9 \n\t" /* r10 = (scr, src_scale) */ |
michael@0 | 326 | |
michael@0 | 327 | /* dst, dst_scale */ |
michael@0 | 328 | "and r8, r12, r7, lsr #8 \n\t" /* ag = r8 = r7 masked by r12 lsr by #8 */ |
michael@0 | 329 | "and r9, r12, r7 \n\t" /* rb = r9 = r7 masked by r12 */ |
michael@0 | 330 | "mul r8, r8, r6 \n\t" /* ag = r8 times scale (r6) */ |
michael@0 | 331 | "mul r9, r9, r6 \n\t" /* rb = r9 times scale (r6) */ |
michael@0 | 332 | "and r8, r8, r12, lsl #8 \n\t" /* ag masked by reverse mask (r12) */ |
michael@0 | 333 | "and r9, r12, r9, lsr #8 \n\t" /* rb masked by mask (r12) */ |
michael@0 | 334 | "orr r7, r8, r9 \n\t" /* r7 = (dst, dst_scale) */ |
michael@0 | 335 | |
michael@0 | 336 | "add r10, r7, r10 \n\t" /* *dst = src plus dst both scaled */ |
michael@0 | 337 | |
michael@0 | 338 | /* ----------------- */ |
michael@0 | 339 | "str r10, [%[dst]], #4 \n\t" /* *dst = r10, postincrement dst by one (times 4) */ |
michael@0 | 340 | /* ----------------- */ |
michael@0 | 341 | |
michael@0 | 342 | "3: \n\t" /* <exit> */ |
michael@0 | 343 | : [dst] "+r" (dst), [src] "+r" (src), [count] "+r" (count), [alpha] "+r" (alpha) |
michael@0 | 344 | : |
michael@0 | 345 | : "cc", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "memory" |
michael@0 | 346 | ); |
michael@0 | 347 | |
michael@0 | 348 | } |
michael@0 | 349 | |
michael@0 | 350 | /////////////////////////////////////////////////////////////////////////////// |
michael@0 | 351 | |
michael@0 | 352 | static const SkBlitRow::Proc sk_blitrow_platform_565_procs_arm[] = { |
michael@0 | 353 | // no dither |
michael@0 | 354 | // NOTE: For the functions below, we don't have a special version |
michael@0 | 355 | // that assumes that each source pixel is opaque. But our S32A is |
michael@0 | 356 | // still faster than the default, so use it. |
michael@0 | 357 | S32A_D565_Opaque, // S32_D565_Opaque |
michael@0 | 358 | NULL, // S32_D565_Blend |
michael@0 | 359 | S32A_D565_Opaque, // S32A_D565_Opaque |
michael@0 | 360 | NULL, // S32A_D565_Blend |
michael@0 | 361 | |
michael@0 | 362 | // dither |
michael@0 | 363 | NULL, // S32_D565_Opaque_Dither |
michael@0 | 364 | NULL, // S32_D565_Blend_Dither |
michael@0 | 365 | NULL, // S32A_D565_Opaque_Dither |
michael@0 | 366 | NULL, // S32A_D565_Blend_Dither |
michael@0 | 367 | }; |
michael@0 | 368 | |
michael@0 | 369 | static const SkBlitRow::Proc32 sk_blitrow_platform_32_procs_arm[] = { |
michael@0 | 370 | NULL, // S32_Opaque, |
michael@0 | 371 | NULL, // S32_Blend, |
michael@0 | 372 | S32A_Opaque_BlitRow32_arm, // S32A_Opaque, |
michael@0 | 373 | S32A_Blend_BlitRow32_arm // S32A_Blend |
michael@0 | 374 | }; |
michael@0 | 375 | |
michael@0 | 376 | #endif // USE_ARM_CODE |
michael@0 | 377 | |
michael@0 | 378 | SkBlitRow::Proc SkBlitRow::PlatformProcs565(unsigned flags) { |
michael@0 | 379 | return SK_ARM_NEON_WRAP(sk_blitrow_platform_565_procs_arm)[flags]; |
michael@0 | 380 | } |
michael@0 | 381 | |
michael@0 | 382 | SkBlitRow::Proc32 SkBlitRow::PlatformProcs32(unsigned flags) { |
michael@0 | 383 | return SK_ARM_NEON_WRAP(sk_blitrow_platform_32_procs_arm)[flags]; |
michael@0 | 384 | } |
michael@0 | 385 | |
michael@0 | 386 | /////////////////////////////////////////////////////////////////////////////// |
michael@0 | 387 | #define Color32_arm NULL |
michael@0 | 388 | SkBlitRow::ColorProc SkBlitRow::PlatformColorProc() { |
michael@0 | 389 | return SK_ARM_NEON_WRAP(Color32_arm); |
michael@0 | 390 | } |
michael@0 | 391 | |
michael@0 | 392 | SkBlitRow::ColorRectProc PlatformColorRectProcFactory() { |
michael@0 | 393 | return NULL; |
michael@0 | 394 | } |